]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/commitdiff
dm raid: add write_mostly parameter
authorJonathan Brassow <jbrassow@redhat.com>
Tue, 2 Aug 2011 11:32:07 +0000 (12:32 +0100)
committerAlasdair G Kergon <agk@redhat.com>
Tue, 2 Aug 2011 11:32:07 +0000 (12:32 +0100)
Add the write_mostly parameter to RAID1 dm-raid tables.

This allows the user to set the WriteMostly flag on a RAID1 device that
should normally be avoided for read I/O.

Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Documentation/device-mapper/dm-raid.txt
drivers/md/dm-raid.c

index be4419a307816b5a21e92454948c1c7978cf5e24..a7d1c4abc92793e79f5cadae0cb9ad3ae368d3aa 100644 (file)
@@ -50,6 +50,7 @@ The target is named "raid" and it accepts the following parameters:
 
        [min_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
        [max_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
+       [write_mostly <idx>]               Drive index is write-mostly
        [max_write_behind <sectors>]       See '-write-behind=' (man mdadm)
        [stripe_cache <sectors>]           Stripe cache size (higher RAIDs only)
        [region_size <sectors>]
@@ -87,9 +88,10 @@ Example tables
         5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
 
 'dmsetup table' displays the table used to construct the mapping.
-The optional parameters will always be printed in the order listed
+The optional parameters are always printed in the order listed
 above with "sync" or "nosync" always output ahead of the other
 arguments, regardless of the order used when originally loading the table.
+Arguments that can be repeated are ordered by value.
 
 'dmsetup status' yields information on the state and health of the
 array.
index a8a1915a450d204a9e1b9400b21d39c8547a56e4..88143a0303d2ac768095caeb3c8c81549dd1a81b 100644 (file)
@@ -308,6 +308,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
  *    [daemon_sleep <ms>]              Time between bitmap daemon work to clear bits
  *    [min_recovery_rate <kB/sec/disk>]        Throttle RAID initialization
  *    [max_recovery_rate <kB/sec/disk>]        Throttle RAID initialization
+ *    [write_mostly <idx>]             Indicate a write mostly drive via index
  *    [max_write_behind <sectors>]     See '-write-behind=' (man mdadm)
  *    [stripe_cache <sectors>]         Stripe cache size for higher RAIDs
  *    [region_size <sectors>]           Defines granularity of bitmap
@@ -376,7 +377,21 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        clear_bit(In_sync, &rs->dev[value].rdev.flags);
                        rs->dev[value].rdev.recovery_offset = 0;
                        rs->print_flags |= DMPF_REBUILD;
+               } else if (!strcasecmp(key, "write_mostly")) {
+                       if (rs->raid_type->level != 1) {
+                               rs->ti->error = "write_mostly option is only valid for RAID1";
+                               return -EINVAL;
+                       }
+                       if (value > rs->md.raid_disks) {
+                               rs->ti->error = "Invalid write_mostly drive index given";
+                               return -EINVAL;
+                       }
+                       set_bit(WriteMostly, &rs->dev[value].rdev.flags);
                } else if (!strcasecmp(key, "max_write_behind")) {
+                       if (rs->raid_type->level != 1) {
+                               rs->ti->error = "max_write_behind option is only valid for RAID1";
+                               return -EINVAL;
+                       }
                        rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
 
                        /*
@@ -621,11 +636,15 @@ static int raid_status(struct dm_target *ti, status_type_t type,
                break;
        case STATUSTYPE_TABLE:
                /* The string you would use to construct this array */
-               for (i = 0; i < rs->md.raid_disks; i++)
+               for (i = 0; i < rs->md.raid_disks; i++) {
                        if ((rs->print_flags & DMPF_REBUILD) &&
                            rs->dev[i].data_dev &&
                            !test_bit(In_sync, &rs->dev[i].rdev.flags))
                                raid_param_cnt += 2; /* for rebuilds */
+                       if (rs->dev[i].data_dev &&
+                           test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+                               raid_param_cnt += 2;
+               }
 
                raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
                if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
@@ -656,6 +675,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
                if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
                        DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
 
+               for (i = 0; i < rs->md.raid_disks; i++)
+                       if (rs->dev[i].data_dev &&
+                           test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+                               DMEMIT(" write_mostly %u", i);
+
                if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
                        DMEMIT(" max_write_behind %lu",
                               rs->md.bitmap_info.max_write_behind);