提交 f72ffdd6 编写于 作者: N NeilBrown

md: remove unwanted white space from md.c

My editor shows much of this is RED.
Signed-off-by: NNeilBrown <neilb@suse.de>
上级 ac05f256
......@@ -10,10 +10,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
......@@ -25,7 +25,7 @@
#include "linear.h"
/*
* find which device holds a particular offset
* find which device holds a particular offset
*/
static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
{
......@@ -355,7 +355,6 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
static struct md_personality linear_personality =
{
.name = "linear",
......@@ -379,7 +378,6 @@ static void linear_exit (void)
unregister_md_personality (&linear_personality);
}
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
......
/*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
......@@ -218,7 +218,6 @@ static void md_new_event_inintr(struct mddev *mddev)
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
......@@ -228,7 +227,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
......@@ -241,7 +240,6 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
......@@ -488,7 +486,7 @@ void mddev_init(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev * mddev_find(dev_t unit)
static struct mddev *mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
......@@ -530,7 +528,7 @@ static struct mddev * mddev_find(dev_t unit)
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
......@@ -562,7 +560,7 @@ static struct mddev * mddev_find(dev_t unit)
goto retry;
}
static inline int __must_check mddev_lock(struct mddev * mddev)
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
......@@ -570,7 +568,7 @@ static inline int __must_check mddev_lock(struct mddev * mddev)
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
*/
static inline void mddev_lock_nointr(struct mddev * mddev)
static inline void mddev_lock_nointr(struct mddev *mddev)
{
mutex_lock(&mddev->reconfig_mutex);
}
......@@ -580,14 +578,14 @@ static inline int mddev_is_locked(struct mddev *mddev)
return mutex_is_locked(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(struct mddev * mddev)
static inline int mddev_trylock(struct mddev *mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
static struct attribute_group md_redundancy_group;
static void mddev_unlock(struct mddev * mddev)
static void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
......@@ -682,7 +680,7 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev * rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
if (rdev->sb_page)
MD_BUG();
......@@ -783,7 +781,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev * rdev, int size)
static int read_disk_sb(struct md_rdev *rdev, int size)
{
char b[BDEVNAME_SIZE];
if (!rdev->sb_page) {
......@@ -793,7 +791,6 @@ static int read_disk_sb(struct md_rdev * rdev, int size)
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
......@@ -807,7 +804,7 @@ static int read_disk_sb(struct md_rdev * rdev, int size)
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
......@@ -843,14 +840,13 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t * sb)
static unsigned int calc_sb_csum(mdp_super_t *sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
......@@ -864,7 +860,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
......@@ -881,7 +876,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
......@@ -947,7 +941,7 @@ int md_check_no_bitmap(struct mddev *mddev)
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
......@@ -1026,7 +1020,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
......@@ -1100,7 +1094,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
......@@ -1128,7 +1122,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
......@@ -1179,7 +1173,6 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
......@@ -1348,7 +1341,7 @@ super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
{
__le32 disk_csum;
u32 csum;
......@@ -1412,7 +1405,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
......@@ -1799,7 +1791,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
......@@ -2015,7 +2007,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
......@@ -2105,7 +2097,7 @@ static void md_delayed_delete(struct work_struct *ws)
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev * rdev)
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
if (!rdev->mddev) {
......@@ -2163,7 +2155,7 @@ static void unlock_rdev(struct md_rdev *rdev)
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev * rdev)
static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
......@@ -2179,7 +2171,7 @@ static void export_rdev(struct md_rdev * rdev)
kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(struct md_rdev * rdev)
static void kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
......@@ -2208,7 +2200,7 @@ static void print_sb_90(mdp_super_t *sb)
{
int i;
printk(KERN_INFO
printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
......@@ -2283,9 +2275,9 @@ static void print_rdev(struct md_rdev *rdev, int major_version)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
......@@ -2328,8 +2320,7 @@ static void md_print_devices(void)
printk("\n");
}
static void sync_sbs(struct mddev * mddev, int nospares)
static void sync_sbs(struct mddev *mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
......@@ -2352,7 +2343,7 @@ static void sync_sbs(struct mddev * mddev, int nospares)
}
}
static void md_update_sb(struct mddev * mddev, int force_change)
static void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
......@@ -2373,7 +2364,7 @@ static void md_update_sb(struct mddev * mddev, int force_change)
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
......@@ -2812,7 +2803,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
......@@ -3009,7 +2999,6 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
......@@ -3045,7 +3034,6 @@ static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
......@@ -3066,7 +3054,6 @@ static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
......@@ -3223,7 +3210,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
......@@ -3242,7 +3229,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
......@@ -3263,8 +3250,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev * mddev)
static void analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
......@@ -3282,12 +3268,11 @@ static void analyze_sbs(struct mddev * mddev)
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
" -- removing from array\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
......@@ -3326,7 +3311,7 @@ static void analyze_sbs(struct mddev * mddev)
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
......@@ -3363,7 +3348,6 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
......@@ -3506,7 +3490,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
if (mddev->pers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
......@@ -3515,7 +3499,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
......@@ -3593,7 +3577,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
......@@ -3636,7 +3619,6 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
......@@ -3841,9 +3823,9 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
static int do_md_run(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
......@@ -3994,7 +3976,6 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
minor != MINOR(dev))
return -EOVERFLOW;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
......@@ -4090,7 +4071,6 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
......@@ -4472,7 +4452,7 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
......@@ -4492,7 +4472,6 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
......@@ -4680,7 +4659,6 @@ static struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
......@@ -5093,7 +5071,7 @@ int md_run(struct mddev *mddev)
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
......@@ -5107,9 +5085,9 @@ int md_run(struct mddev *mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
......@@ -5321,7 +5299,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev * mddev, int mode,
static int do_md_stop(struct mddev *mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
......@@ -5494,12 +5472,12 @@ static void autorun_devices(int part)
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
......@@ -5527,7 +5505,7 @@ static void autorun_devices(int part)
}
#endif /* !MODULE */
static int get_version(void __user * arg)
static int get_version(void __user *arg)
{
mdu_version_t ver;
......@@ -5541,7 +5519,7 @@ static int get_version(void __user * arg)
return 0;
}
static int get_array_info(struct mddev * mddev, void __user * arg)
static int get_array_info(struct mddev *mddev, void __user *arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
......@@ -5556,7 +5534,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
insync++;
else
spare++;
}
......@@ -5596,7 +5574,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
return 0;
}
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr, *buf = NULL;
......@@ -5634,7 +5612,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
return err;
}
static int get_disk_info(struct mddev * mddev, void __user * arg)
static int get_disk_info(struct mddev *mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
......@@ -5670,7 +5648,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
return 0;
}
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
......@@ -5684,7 +5662,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5696,9 +5674,9 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
......@@ -5718,7 +5696,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
......@@ -5729,7 +5707,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5803,7 +5781,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5838,7 +5816,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
return 0;
}
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
......@@ -5864,7 +5842,7 @@ static int hot_remove_disk(struct mddev * mddev, dev_t dev)
return -EBUSY;
}
static int hot_add_disk(struct mddev * mddev, dev_t dev)
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
......@@ -5880,7 +5858,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
......@@ -5888,7 +5866,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
......@@ -5902,7 +5880,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
......@@ -5950,7 +5928,6 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
struct inode *inode;
if (mddev->bitmap)
......@@ -6021,7 +5998,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
......@@ -6030,7 +6007,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
......@@ -6178,7 +6155,6 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
......@@ -6447,7 +6423,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort;
......@@ -6708,7 +6684,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
......@@ -6743,7 +6719,7 @@ static const struct block_device_operations md_fops =
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
static int md_thread(void *arg)
{
struct md_thread *thread = arg;
......@@ -6880,8 +6856,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev * mddev)
static void status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
......@@ -7003,7 +6978,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
......@@ -7018,7 +6993,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
......@@ -7114,7 +7089,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
......@@ -7132,7 +7107,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
mddev_unlock(mddev);
return 0;
}
......@@ -7205,7 +7180,7 @@ int unregister_md_personality(struct md_personality *p)
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev * rdev;
struct md_rdev *rdev;
int idle;
int curr_events;
......@@ -7260,7 +7235,6 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok)
}
}
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
......@@ -8637,7 +8611,6 @@ void md_autodetect_dev(dev_t dev)
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
......
/*
md.h : kernel internal structure of the Linux MD driver
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_MD_H
......@@ -56,7 +56,7 @@ struct md_rdev {
__u64 sb_events;
sector_t data_offset; /* start of data in array */
sector_t new_data_offset;/* only relevant while reshaping */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
int sb_size; /* bytes in the superblock */
int preferred_minor; /* autorun support */
......@@ -239,7 +239,7 @@ struct mddev {
minor_version,
patch_version;
int persistent;
int external; /* metadata is
int external; /* metadata is
* managed externally */
char metadata_type[17]; /* externally set*/
int chunk_sectors;
......@@ -248,7 +248,7 @@ struct mddev {
char clevel[16];
int raid_disks;
int max_disks;
sector_t dev_sectors; /* used size of
sector_t dev_sectors; /* used size of
* component devices */
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
......@@ -312,7 +312,7 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
/* recovery/resync flags
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
* SYNC: actually doing a resync, not a recovery
......@@ -392,20 +392,20 @@ struct mddev {
unsigned int safemode; /* if set, update "clean" superblock
* when no writes pending.
*/
*/
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
loff_t offset; /* offset from superblock of
* start of bitmap. May be
* negative, but not '0'
* For external metadata, offset
* from start of device.
* from start of device.
*/
unsigned long space; /* space available at this offset */
loff_t default_offset; /* this is the offset to use when
......@@ -421,7 +421,7 @@ struct mddev {
int external;
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
struct attribute_group *to_remove;
......@@ -439,7 +439,6 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
};
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
int faulty = test_bit(Faulty, &rdev->flags);
......@@ -449,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
struct md_personality
......@@ -463,7 +462,7 @@ struct md_personality
int (*stop)(struct mddev *mddev);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
* if appropriate, and should abort recovery if needed
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
......@@ -493,7 +492,6 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
};
struct md_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct mddev *, char *);
......@@ -560,7 +558,7 @@ struct md_thread {
void (*run) (struct md_thread *thread);
struct mddev *mddev;
wait_queue_head_t wqueue;
unsigned long flags;
unsigned long flags;
struct task_struct *tsk;
unsigned long timeout;
void *private;
......@@ -594,7 +592,7 @@ extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);
......
......@@ -31,13 +31,12 @@
#define NR_RESERVED_BUFS 32
static int multipath_map (struct mpconf *conf)
{
int i, disks = conf->raid_disks;
/*
* Later we do read balancing on the read side
* Later we do read balancing on the read side
* now we use the first available disk.
*/
......@@ -68,7 +67,6 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
md_wakeup_thread(mddev->thread);
}
/*
* multipath_end_bh_io() is called when we have finished servicing a multipathed
* operation and are ready to return a success/failure code to the buffer
......@@ -98,8 +96,8 @@ static void multipath_end_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
......@@ -145,12 +143,12 @@ static void multipath_status (struct seq_file *seq, struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
conf->multipaths[i].rdev &&
test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
......@@ -195,7 +193,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
printk(KERN_ALERT
printk(KERN_ALERT
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
......@@ -242,7 +240,6 @@ static void print_multipath_conf (struct mpconf *conf)
}
}
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
......@@ -325,8 +322,6 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
/*
* This is a kernel thread which:
*
......@@ -356,7 +351,7 @@ static void multipathd(struct md_thread *thread)
bio = &mp_bh->bio;
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
......@@ -414,7 +409,7 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out;
......@@ -423,7 +418,7 @@ static int multipath_run (struct mddev *mddev)
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
......@@ -469,7 +464,7 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
......@@ -485,7 +480,7 @@ static int multipath_run (struct mddev *mddev)
}
}
printk(KERN_INFO
printk(KERN_INFO
"multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
mddev->raid_disks);
......@@ -512,7 +507,6 @@ static int multipath_run (struct mddev *mddev)
return -EIO;
}
static int multipath_stop (struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
......
/*
raid0.c : Multiple Devices driver for Linux
Copyright (C) 1994-96 Marc ZYNGIER
Copyright (C) 1994-96 Marc ZYNGIER
<zyngier@ufr-info-p7.ibp.fr> or
<maz@gloups.fdn.fr>
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
RAID-0 management functions.
......@@ -12,10 +11,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
......
......@@ -494,7 +494,6 @@ static void raid1_end_write_request(struct bio *bio, int error)
bio_put(to_put);
}
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
......@@ -1001,8 +1000,7 @@ static void unfreeze_array(struct r1conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
/* duplicate the data pages for behind I/O
/* duplicate the data pages for behind I/O
*/
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
{
......@@ -1471,7 +1469,6 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
......@@ -1565,7 +1562,7 @@ static int raid1_spare_active(struct mddev *mddev)
unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
*/
......@@ -1606,7 +1603,6 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
......@@ -1735,7 +1731,6 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r1bio *r1_bio = bio->bi_private;
......@@ -2457,7 +2452,6 @@ static void raid1d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r1conf *conf)
{
int buffs;
......@@ -2946,9 +2940,9 @@ static int run(struct mddev *mddev)
printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
printk(KERN_INFO
"md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/*
......
......@@ -90,7 +90,6 @@ struct r1conf {
*/
int recovery_disabled;
/* poolinfo contains information about the content of the
* mempools - it changes when the array grows or shrinks
*/
......@@ -103,7 +102,6 @@ struct r1conf {
*/
struct page *tmppage;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
......
......@@ -366,7 +366,6 @@ static void raid10_end_read_request(struct bio *bio, int error)
struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
rdev = r10_bio->devs[slot].rdev;
......@@ -1559,7 +1558,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
md_write_start(mddev, bio);
do {
/*
......@@ -1782,7 +1780,6 @@ static int raid10_spare_active(struct mddev *mddev)
return count;
}
static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
......@@ -1929,7 +1926,6 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r10bio *r10_bio = bio->bi_private;
......@@ -2295,7 +2291,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
}
/*
* Used by fix_read_error() to decay the per rdev read_errors.
* We halve the read error count for every hour that has elapsed
......@@ -2852,7 +2847,6 @@ static void raid10d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r10conf *conf)
{
int buffs;
......@@ -3776,7 +3770,6 @@ static int run(struct mddev *mddev)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
if (md_integrity_register(mddev))
goto out_free_conf;
......@@ -4577,7 +4570,6 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{
......
......@@ -463,7 +463,6 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
hlist_add_head(&sh->hash, hp);
}
/* find an idle stripe, make sure it is unhashed, and return it. */
static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
......@@ -540,7 +539,6 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
stripe_set_idx(sector, conf, previous, sh);
sh->state = 0;
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
......@@ -1348,7 +1346,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
}
}
static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
......@@ -2417,7 +2414,6 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
return new_sector;
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
{
struct r5conf *conf = sh->raid_conf;
......@@ -2435,7 +2431,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
......@@ -2539,7 +2534,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
......@@ -3011,7 +3005,6 @@ static void handle_stripe_fill(struct stripe_head *sh,
set_bit(STRIPE_HANDLE, &sh->state);
}
/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
......@@ -3302,7 +3295,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
}
}
static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s,
int disks)
......@@ -3937,7 +3929,6 @@ static void handle_stripe(struct stripe_head *sh)
}
}
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
......@@ -4135,7 +4126,6 @@ static int raid5_mergeable_bvec(struct request_queue *q,
return max;
}
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
......@@ -4165,7 +4155,6 @@ static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
md_wakeup_thread(conf->mddev->thread);
}
static struct bio *remove_bio_from_retry(struct r5conf *conf)
{
struct bio *bi;
......@@ -4189,7 +4178,6 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
return bi;
}
/*
* The "raid5_align_endio" should check if the read succeeded and if it
* did, call bio_endio on the original bio (having bio_put the new bio
......@@ -4222,7 +4210,6 @@ static void raid5_align_endio(struct bio *bi, int error)
return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
......@@ -4247,7 +4234,6 @@ static int bio_fits_rdev(struct bio *bi)
return 1;
}
static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
{
struct r5conf *conf = mddev->private;
......@@ -5444,7 +5430,6 @@ raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
raid5_show_skip_copy,
raid5_store_skip_copy);
static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
{
......@@ -5896,7 +5881,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
return ERR_PTR(-ENOMEM);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
{
switch (algo) {
......@@ -5909,7 +5893,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 1;
break;
case ALGORITHM_PARITY_0_6:
if (raid_disk == 0 ||
if (raid_disk == 0 ||
raid_disk == raid_disks - 1)
return 1;
break;
......@@ -6163,7 +6147,6 @@ static int run(struct mddev *mddev)
"reshape");
}
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
......@@ -6812,7 +6795,6 @@ static void raid5_quiesce(struct mddev *mddev, int state)
}
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
{
struct r0conf *raid0_conf = mddev->private;
......@@ -6839,7 +6821,6 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
return setup_conf(mddev);
}
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
......@@ -6900,7 +6881,6 @@ static void *raid5_takeover_raid6(struct mddev *mddev)
return setup_conf(mddev);
}
static int raid5_check_reshape(struct mddev *mddev)
{
/* For a 2-drive array, the layout and chunk size can be changed
......@@ -7049,7 +7029,6 @@ static void *raid6_takeover(struct mddev *mddev)
return setup_conf(mddev);
}
static struct md_personality raid6_personality =
{
.name = "raid6",
......
......@@ -155,7 +155,7 @@
*/
/*
* Operations state - intermediate states that are visible outside of
* Operations state - intermediate states that are visible outside of
* STRIPE_ACTIVE.
* In general _idle indicates nothing is running, _run indicates a data
* processing operation is active, and _result means the data processing result
......@@ -364,7 +364,6 @@ enum {
* HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
struct disk_info {
struct md_rdev *rdev, *replacement;
};
......@@ -528,7 +527,6 @@ struct r5conf {
#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
/* For every RAID5 algorithm we define a RAID6 algorithm
* with exactly the same layout for data and parity, and
* with the Q block always on the last device (N-1).
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册