未验证 提交 401a3d64 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!484 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-1074

scsi bugfixes from Zhong Jinghua
ring-buffer bugfix from Zheng Yejian
block bugfixes from Yu Kuai
fs bugfixes from Zhihao Cheng, Long Li and Zhang Yi
 
 
Link:https://gitee.com/openeuler/kernel/pulls/484 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -1374,15 +1374,18 @@ void blk_account_io_start(struct request *rq) ...@@ -1374,15 +1374,18 @@ void blk_account_io_start(struct request *rq)
} }
static unsigned long __part_start_io_acct(struct hd_struct *part, static unsigned long __part_start_io_acct(struct hd_struct *part,
unsigned int sectors, unsigned int op) unsigned int sectors, unsigned int op,
bool precise)
{ {
const int sgrp = op_stat_group(op); const int sgrp = op_stat_group(op);
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
part_stat_lock(); part_stat_lock();
update_io_ticks(part, now, false); update_io_ticks(part, now, false);
part_stat_inc(part, ios[sgrp]); if (!precise) {
part_stat_add(part, sectors[sgrp], sectors); part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors);
}
part_stat_local_inc(part, in_flight[op_is_write(op)]); part_stat_local_inc(part, in_flight[op_is_write(op)]);
part_stat_unlock(); part_stat_unlock();
...@@ -1394,19 +1397,21 @@ unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part, ...@@ -1394,19 +1397,21 @@ unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
{ {
*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector); *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio)); return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio),
false);
} }
EXPORT_SYMBOL_GPL(part_start_io_acct); EXPORT_SYMBOL_GPL(part_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op) unsigned int op)
{ {
return __part_start_io_acct(&disk->part0, sectors, op); return __part_start_io_acct(&disk->part0, sectors, op, false);
} }
EXPORT_SYMBOL(disk_start_io_acct); EXPORT_SYMBOL(disk_start_io_acct);
static void __part_end_io_acct(struct hd_struct *part, unsigned int op, static void __part_end_io_acct(struct hd_struct *part, unsigned int sectors,
unsigned long start_time) unsigned int op, unsigned long start_time,
bool precise)
{ {
const int sgrp = op_stat_group(op); const int sgrp = op_stat_group(op);
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
...@@ -1414,6 +1419,10 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op, ...@@ -1414,6 +1419,10 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
part_stat_lock(); part_stat_lock();
update_io_ticks(part, now, true); update_io_ticks(part, now, true);
if (precise) {
part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors);
}
part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
part_stat_local_dec(part, in_flight[op_is_write(op)]); part_stat_local_dec(part, in_flight[op_is_write(op)]);
part_stat_unlock(); part_stat_unlock();
...@@ -1422,7 +1431,7 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op, ...@@ -1422,7 +1431,7 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
void part_end_io_acct(struct hd_struct *part, struct bio *bio, void part_end_io_acct(struct hd_struct *part, struct bio *bio,
unsigned long start_time) unsigned long start_time)
{ {
__part_end_io_acct(part, bio_op(bio), start_time); __part_end_io_acct(part, 0, bio_op(bio), start_time, false);
hd_struct_put(part); hd_struct_put(part);
} }
EXPORT_SYMBOL_GPL(part_end_io_acct); EXPORT_SYMBOL_GPL(part_end_io_acct);
...@@ -1430,10 +1439,42 @@ EXPORT_SYMBOL_GPL(part_end_io_acct); ...@@ -1430,10 +1439,42 @@ EXPORT_SYMBOL_GPL(part_end_io_acct);
void disk_end_io_acct(struct gendisk *disk, unsigned int op, void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time) unsigned long start_time)
{ {
__part_end_io_acct(&disk->part0, op, start_time); __part_end_io_acct(&disk->part0, 0, op, start_time, false);
} }
EXPORT_SYMBOL(disk_end_io_acct); EXPORT_SYMBOL(disk_end_io_acct);
unsigned long part_start_precise_io_acct(struct gendisk *disk,
struct hd_struct **part,
struct bio *bio)
{
*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
return __part_start_io_acct(*part, 0, bio_op(bio), true);
}
EXPORT_SYMBOL_GPL(part_start_precise_io_acct);
unsigned long disk_start_precise_io_acct(struct gendisk *disk, unsigned int op)
{
return __part_start_io_acct(&disk->part0, 0, op, true);
}
EXPORT_SYMBOL(disk_start_precise_io_acct);
void part_end_precise_io_acct(struct hd_struct *part, struct bio *bio,
unsigned long start_time)
{
__part_end_io_acct(part, bio_sectors(bio), bio_op(bio), start_time,
true);
hd_struct_put(part);
}
EXPORT_SYMBOL_GPL(part_end_precise_io_acct);
void disk_end_precise_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op, unsigned long start_time)
{
__part_end_io_acct(&disk->part0, sectors, op, start_time, true);
}
EXPORT_SYMBOL(disk_end_precise_io_acct);
/* /*
* Steal bios from a request and add them to a bio list. * Steal bios from a request and add them to a bio list.
* The request must not have been partially completed before. * The request must not have been partially completed before.
......
...@@ -301,7 +301,7 @@ static void call_bio_endio(struct r1bio *r1_bio) ...@@ -301,7 +301,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
if (blk_queue_io_stat(bio->bi_disk->queue)) if (blk_queue_io_stat(bio->bi_disk->queue))
bio_end_io_acct(bio, r1_bio->start_time); bio_end_precise_io_acct(bio, r1_bio->start_time);
bio_endio(bio); bio_endio(bio);
} }
...@@ -1295,7 +1295,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1295,7 +1295,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
r1_bio->read_disk = rdisk; r1_bio->read_disk = rdisk;
if (!r1bio_existed && blk_queue_io_stat(bio->bi_disk->queue)) if (!r1bio_existed && blk_queue_io_stat(bio->bi_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio); r1_bio->start_time = bio_start_precise_io_acct(bio);
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
...@@ -1487,7 +1487,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1487,7 +1487,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
} }
if (blk_queue_io_stat(bio->bi_disk->queue)) if (blk_queue_io_stat(bio->bi_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio); r1_bio->start_time = bio_start_precise_io_acct(bio);
atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0); atomic_set(&r1_bio->behind_remaining, 0);
......
...@@ -298,7 +298,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio) ...@@ -298,7 +298,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
if (blk_queue_io_stat(bio->bi_disk->queue)) if (blk_queue_io_stat(bio->bi_disk->queue))
bio_end_io_acct(bio, r10_bio->start_time); bio_end_precise_io_acct(bio, r10_bio->start_time);
bio_endio(bio); bio_endio(bio);
/* /*
* Wake up any possible resync thread that waits for the device * Wake up any possible resync thread that waits for the device
...@@ -1188,7 +1188,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, ...@@ -1188,7 +1188,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
slot = r10_bio->read_slot; slot = r10_bio->read_slot;
if (!handle_error && blk_queue_io_stat(bio->bi_disk->queue)) if (!handle_error && blk_queue_io_stat(bio->bi_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio); r10_bio->start_time = bio_start_precise_io_acct(bio);
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].bio = read_bio;
...@@ -1473,7 +1473,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1473,7 +1473,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
} }
if (blk_queue_io_stat(bio->bi_disk->queue)) if (blk_queue_io_stat(bio->bi_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio); r10_bio->start_time = bio_start_precise_io_acct(bio);
atomic_set(&r10_bio->remaining, 1); atomic_set(&r10_bio->remaining, 1);
md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
...@@ -3624,6 +3624,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) ...@@ -3624,6 +3624,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
return nc*fc; return nc*fc;
} }
static void raid10_free_conf(struct r10conf *conf)
{
if (!conf)
return;
mempool_exit(&conf->r10bio_pool);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
safe_put_page(conf->tmppage);
bioset_exit(&conf->bio_split);
kfree(conf);
}
static struct r10conf *setup_conf(struct mddev *mddev) static struct r10conf *setup_conf(struct mddev *mddev)
{ {
struct r10conf *conf = NULL; struct r10conf *conf = NULL;
...@@ -3706,13 +3720,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) ...@@ -3706,13 +3720,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf; return conf;
out: out:
if (conf) { raid10_free_conf(conf);
mempool_exit(&conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
bioset_exit(&conf->bio_split);
kfree(conf);
}
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -3918,10 +3926,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -3918,10 +3926,7 @@ static int raid10_run(struct mddev *mddev)
out_free_conf: out_free_conf:
md_unregister_thread(&mddev->thread); md_unregister_thread(&mddev->thread);
mempool_exit(&conf->r10bio_pool); raid10_free_conf(conf);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
return -EIO; return -EIO;
...@@ -3929,15 +3934,7 @@ static int raid10_run(struct mddev *mddev) ...@@ -3929,15 +3934,7 @@ static int raid10_run(struct mddev *mddev)
static void raid10_free(struct mddev *mddev, void *priv) static void raid10_free(struct mddev *mddev, void *priv)
{ {
struct r10conf *conf = priv; raid10_free_conf(priv);
mempool_exit(&conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
bioset_exit(&conf->bio_split);
kfree(conf);
} }
static void raid10_quiesce(struct mddev *mddev, int quiesce) static void raid10_quiesce(struct mddev *mddev, int quiesce)
......
...@@ -783,7 +783,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, ...@@ -783,7 +783,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf) enum iscsi_host_param param, char *buf)
{ {
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
struct iscsi_session *session = tcp_sw_host->session; struct iscsi_session *session;
struct iscsi_conn *conn; struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn; struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn;
...@@ -793,6 +793,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, ...@@ -793,6 +793,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) { switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS: case ISCSI_HOST_PARAM_IPADDRESS:
session = tcp_sw_host->session;
if (!session) if (!session)
return -ENOTCONN; return -ENOTCONN;
...@@ -889,11 +890,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, ...@@ -889,11 +890,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session) if (!cls_session)
goto remove_host; goto remove_host;
session = cls_session->dd_data; session = cls_session->dd_data;
tcp_sw_host = iscsi_host_priv(shost);
tcp_sw_host->session = session;
if (iscsi_tcp_r2tpool_alloc(session)) if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session; goto remove_session;
/* We are now fully setup so expose the session to sysfs. */
tcp_sw_host = iscsi_host_priv(shost);
tcp_sw_host->session = session;
return cls_session; return cls_session;
remove_session: remove_session:
...@@ -913,10 +916,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) ...@@ -913,10 +916,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
if (WARN_ON_ONCE(session->leadconn)) if (WARN_ON_ONCE(session->leadconn))
return; return;
iscsi_session_remove(cls_session);
/*
* Our get_host_param needs to access the session, so remove the
* host from sysfs before freeing the session to make sure userspace
* is no longer accessing the callout.
*/
iscsi_host_remove(shost);
iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost); iscsi_session_free(cls_session);
iscsi_host_free(shost); iscsi_host_free(shost);
} }
......
...@@ -2983,20 +2983,34 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, ...@@ -2983,20 +2983,34 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
} }
EXPORT_SYMBOL_GPL(iscsi_session_setup); EXPORT_SYMBOL_GPL(iscsi_session_setup);
/*
* issi_session_remove - Remove session from iSCSI class.
*/
void iscsi_session_remove(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct Scsi_Host *shost = session->host;
iscsi_remove_session(cls_session);
/*
* host removal only has to wait for its children to be removed from
* sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
* the session, so drop the session count here.
*/
iscsi_host_dec_session_cnt(shost);
}
EXPORT_SYMBOL_GPL(iscsi_session_remove);
/** /**
* iscsi_session_teardown - destroy session, host, and cls_session * iscsi_session_free - Free iscsi session and it's resources
* @cls_session: iscsi session * @cls_session: iscsi session
*/ */
void iscsi_session_teardown(struct iscsi_cls_session *cls_session) void iscsi_session_free(struct iscsi_cls_session *cls_session)
{ {
struct iscsi_session *session = cls_session->dd_data; struct iscsi_session *session = cls_session->dd_data;
struct module *owner = cls_session->transport->owner; struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
iscsi_pool_free(&session->cmdpool); iscsi_pool_free(&session->cmdpool);
iscsi_remove_session(cls_session);
kfree(session->password); kfree(session->password);
kfree(session->password_in); kfree(session->password_in);
kfree(session->username); kfree(session->username);
...@@ -3012,10 +3026,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) ...@@ -3012,10 +3026,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->discovery_parent_type); kfree(session->discovery_parent_type);
iscsi_free_session(cls_session); iscsi_free_session(cls_session);
iscsi_host_dec_session_cnt(shost);
module_put(owner); module_put(owner);
} }
EXPORT_SYMBOL_GPL(iscsi_session_free);
/**
* iscsi_session_teardown - destroy session and cls_session
* @cls_session: iscsi session
*/
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
iscsi_session_remove(cls_session);
iscsi_session_free(cls_session);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown); EXPORT_SYMBOL_GPL(iscsi_session_teardown);
/** /**
......
...@@ -1503,6 +1503,40 @@ void scsi_remove_device(struct scsi_device *sdev) ...@@ -1503,6 +1503,40 @@ void scsi_remove_device(struct scsi_device *sdev)
} }
EXPORT_SYMBOL(scsi_remove_device); EXPORT_SYMBOL(scsi_remove_device);
/* Cancel the inflight async probe for scsi_device */
static void __scsi_kill_devices(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct scsi_device *sdev, *to_put = NULL;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
sdev->id != starget->id)
continue;
if ((sdev->sdev_state != SDEV_DEL &&
sdev->sdev_state != SDEV_CANCEL) || !sdev->is_visible)
continue;
if (!kobject_get_unless_zero(&sdev->sdev_gendev.kobj))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
if (to_put)
put_device(&to_put->sdev_gendev);
device_lock(&sdev->sdev_gendev);
kill_device(&sdev->sdev_gendev);
device_unlock(&sdev->sdev_gendev);
to_put = sdev;
spin_lock_irqsave(shost->host_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
if (to_put)
put_device(&to_put->sdev_gendev);
}
static void __scsi_remove_target(struct scsi_target *starget) static void __scsi_remove_target(struct scsi_target *starget)
{ {
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
...@@ -1532,6 +1566,8 @@ static void __scsi_remove_target(struct scsi_target *starget) ...@@ -1532,6 +1566,8 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart; goto restart;
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_kill_devices(starget);
} }
/** /**
...@@ -1556,7 +1592,16 @@ void scsi_remove_target(struct device *dev) ...@@ -1556,7 +1592,16 @@ void scsi_remove_target(struct device *dev)
starget->state == STARGET_CREATED_REMOVE) starget->state == STARGET_CREATED_REMOVE)
continue; continue;
if (starget->dev.parent == dev || &starget->dev == dev) { if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref); /*
* If the reference count is already zero, skip
* this target. Calling kref_get_unless_zero() if
* the reference count is zero is safe because
* scsi_target_destroy() will wait until the host
* lock has been released before freeing starget.
*/
if (!kref_get_unless_zero(&starget->reap_ref))
continue;
if (starget->state == STARGET_CREATED) if (starget->state == STARGET_CREATED)
starget->state = STARGET_CREATED_REMOVE; starget->state = STARGET_CREATED_REMOVE;
else else
......
...@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc; return desc;
} }
static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
ext4_group_t block_group,
struct buffer_head *bh)
{
ext4_grpblk_t next_zero_bit;
unsigned long bitmap_size = sb->s_blocksize * 8;
unsigned int offset = num_clusters_in_group(sb, block_group);
if (bitmap_size <= offset)
return 0;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
}
/* /*
* Return the block number which was discovered to be invalid, or 0 if * Return the block number which was discovered to be invalid, or 0 if
* the block bitmap is valid. * the block bitmap is valid.
...@@ -401,6 +417,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb, ...@@ -401,6 +417,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT); EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
if (unlikely(blk != 0)) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
block_group, blk);
ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED;
}
set_buffer_verified(bh); set_buffer_verified(bh);
verified: verified:
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
......
...@@ -1462,6 +1462,7 @@ struct ext4_sb_info { ...@@ -1462,6 +1462,7 @@ struct ext4_sb_info {
unsigned int s_mount_opt2; unsigned int s_mount_opt2;
unsigned long s_mount_flags; unsigned long s_mount_flags;
unsigned int s_def_mount_opt; unsigned int s_def_mount_opt;
unsigned int s_def_mount_opt2;
ext4_fsblk_t s_sb_block; ext4_fsblk_t s_sb_block;
atomic64_t s_resv_clusters; atomic64_t s_resv_clusters;
kuid_t s_resuid; kuid_t s_resuid;
......
...@@ -2586,7 +2586,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, ...@@ -2586,7 +2586,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es; struct ext4_super_block *es = sbi->s_es;
int def_errors, def_mount_opt = sbi->s_def_mount_opt; int def_errors;
const struct mount_opts *m; const struct mount_opts *m;
char sep = nodefs ? '\n' : ','; char sep = nodefs ? '\n' : ',';
...@@ -2598,15 +2598,28 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, ...@@ -2598,15 +2598,28 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
for (m = ext4_mount_opts; m->token != Opt_err; m++) { for (m = ext4_mount_opts; m->token != Opt_err; m++) {
int want_set = m->flags & MOPT_SET; int want_set = m->flags & MOPT_SET;
int opt_2 = m->flags & MOPT_2;
unsigned int mount_opt, def_mount_opt;
if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
(m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP) (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
continue; continue;
if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
continue; /* skip if same as the default */ if (opt_2) {
mount_opt = sbi->s_mount_opt2;
def_mount_opt = sbi->s_def_mount_opt2;
} else {
mount_opt = sbi->s_mount_opt;
def_mount_opt = sbi->s_def_mount_opt;
}
/* skip if same as the default */
if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
continue;
/* select Opt_noFoo vs Opt_Foo */
if ((want_set && if ((want_set &&
(sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || (mount_opt & m->mount_opt) != m->mount_opt) ||
(!want_set && (sbi->s_mount_opt & m->mount_opt))) (!want_set && (mount_opt & m->mount_opt)))
continue; /* select Opt_noFoo vs Opt_Foo */ continue;
SEQ_OPTS_PRINT("%s", token2str(m->token)); SEQ_OPTS_PRINT("%s", token2str(m->token));
} }
...@@ -2636,7 +2649,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, ...@@ -2636,7 +2649,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
if (nodefs || sbi->s_stripe) if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
if (nodefs || EXT4_MOUNT_DATA_FLAGS & if (nodefs || EXT4_MOUNT_DATA_FLAGS &
(sbi->s_mount_opt ^ def_mount_opt)) { (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
SEQ_OPTS_PUTS("data=journal"); SEQ_OPTS_PUTS("data=journal");
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
...@@ -4341,6 +4354,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4341,6 +4354,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
kfree(s_mount_opts); kfree(s_mount_opts);
} }
sbi->s_def_mount_opt = sbi->s_mount_opt; sbi->s_def_mount_opt = sbi->s_mount_opt;
sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
if (!parse_options((char *) data, sb, &journal_devnum, if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, 0)) &journal_ioprio, 0))
goto failed_mount; goto failed_mount;
......
...@@ -885,7 +885,7 @@ xfs_reclaim_inode( ...@@ -885,7 +885,7 @@ xfs_reclaim_inode(
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip); xfs_iunpin_wait(ip);
xfs_iflush_abort(ip); xfs_iflush_shutdown_abort(ip);
goto reclaim; goto reclaim;
} }
if (xfs_ipincount(ip)) if (xfs_ipincount(ip))
......
...@@ -3725,7 +3725,7 @@ xfs_iflush_cluster( ...@@ -3725,7 +3725,7 @@ xfs_iflush_cluster(
/* /*
* We must use the safe variant here as on shutdown xfs_iflush_abort() * We must use the safe variant here as on shutdown xfs_iflush_abort()
* can remove itself from the list. * will remove itself from the list.
*/ */
list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
iip = (struct xfs_inode_log_item *)lip; iip = (struct xfs_inode_log_item *)lip;
......
...@@ -517,10 +517,17 @@ xfs_inode_item_push( ...@@ -517,10 +517,17 @@ xfs_inode_item_push(
uint rval = XFS_ITEM_SUCCESS; uint rval = XFS_ITEM_SUCCESS;
int error; int error;
ASSERT(iip->ili_item.li_buf); if (!bp || (ip->i_flags & XFS_ISTALE)) {
/*
* Inode item/buffer is being being aborted due to cluster
* buffer deletion. Trigger a log force to have that operation
* completed and items removed from the AIL before the next push
* attempt.
*/
return XFS_ITEM_PINNED;
}
if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp))
(ip->i_flags & XFS_ISTALE))
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
if (xfs_iflags_test(ip, XFS_IFLUSHING)) if (xfs_iflags_test(ip, XFS_IFLUSHING))
...@@ -796,46 +803,143 @@ xfs_buf_inode_io_fail( ...@@ -796,46 +803,143 @@ xfs_buf_inode_io_fail(
} }
/* /*
* This is the inode flushing abort routine. It is called when * Clear the inode logging fields so no more flushes are attempted. If we are
* the filesystem is shutting down to clean up the inode state. It is * on a buffer list, it is now safe to remove it because the buffer is
* responsible for removing the inode item from the AIL if it has not been * guaranteed to be locked. The caller will drop the reference to the buffer
* re-logged and clearing the inode's flush state. * the log item held.
*/
static void
xfs_iflush_abort_clean(
struct xfs_inode_log_item *iip)
{
iip->ili_last_fields = 0;
iip->ili_fields = 0;
iip->ili_fsync_fields = 0;
iip->ili_flush_lsn = 0;
iip->ili_item.li_buf = NULL;
list_del_init(&iip->ili_item.li_bio_list);
}
/*
* Abort flushing the inode from a context holding the cluster buffer locked.
*
* This is the normal runtime method of aborting writeback of an inode that is
* attached to a cluster buffer. It occurs when the inode and the backing
* cluster buffer have been freed (i.e. inode is XFS_ISTALE), or when cluster
* flushing or buffer IO completion encounters a log shutdown situation.
*
* If we need to abort inode writeback and we don't already hold the buffer
* locked, call xfs_iflush_shutdown_abort() instead as this should only ever be
* necessary in a shutdown situation.
*/ */
void void
xfs_iflush_abort( xfs_iflush_abort(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_buf *bp = NULL; struct xfs_buf *bp;
if (iip) { if (!iip) {
/* /* clean inode, nothing to do */
* Clear the failed bit before removing the item from the AIL so xfs_iflags_clear(ip, XFS_IFLUSHING);
* xfs_trans_ail_delete() doesn't try to clear and release the return;
* buffer attached to the log item before we are done with it. }
*/
clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); /*
xfs_trans_ail_delete(&iip->ili_item, 0); * Remove the inode item from the AIL before we clear its internal
* state. Whilst the inode is in the AIL, it should have a valid buffer
* pointer for push operations to access - it is only safe to remove the
* inode from the buffer once it has been removed from the AIL.
*
* We also clear the failed bit before removing the item from the AIL
* as xfs_trans_ail_delete()->xfs_clear_li_failed() will release buffer
* references the inode item owns and needs to hold until we've fully
* aborted the inode log item and detached it from the buffer.
*/
clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
xfs_trans_ail_delete(&iip->ili_item, 0);
/*
* Grab the inode buffer so can we release the reference the inode log
* item holds on it.
*/
spin_lock(&iip->ili_lock);
bp = iip->ili_item.li_buf;
xfs_iflush_abort_clean(iip);
spin_unlock(&iip->ili_lock);
xfs_iflags_clear(ip, XFS_IFLUSHING);
if (bp)
xfs_buf_rele(bp);
}
/*
* Abort an inode flush in the case of a shutdown filesystem. This can be called
* from anywhere with just an inode reference and does not require holding the
* inode cluster buffer locked. If the inode is attached to a cluster buffer,
* it will grab and lock it safely, then abort the inode flush.
*/
void
xfs_iflush_shutdown_abort(
struct xfs_inode *ip)
{
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_buf *bp;
if (!iip) {
/* clean inode, nothing to do */
xfs_iflags_clear(ip, XFS_IFLUSHING);
return;
}
spin_lock(&iip->ili_lock);
bp = iip->ili_item.li_buf;
if (!bp) {
spin_unlock(&iip->ili_lock);
xfs_iflush_abort(ip);
return;
}
/*
* We have to take a reference to the buffer so that it doesn't get
* freed when we drop the ili_lock and then wait to lock the buffer.
* We'll clean up the extra reference after we pick up the ili_lock
* again.
*/
xfs_buf_hold(bp);
spin_unlock(&iip->ili_lock);
xfs_buf_lock(bp);
spin_lock(&iip->ili_lock);
if (!iip->ili_item.li_buf) {
/* /*
* Clear the inode logging fields so no more flushes are * Raced with another removal, hold the only reference
* attempted. * to bp now. Inode should not be in the AIL now, so just clean
* up and return;
*/ */
spin_lock(&iip->ili_lock); ASSERT(list_empty(&iip->ili_item.li_bio_list));
iip->ili_last_fields = 0; ASSERT(!test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags));
iip->ili_fields = 0; xfs_iflush_abort_clean(iip);
iip->ili_fsync_fields = 0;
iip->ili_flush_lsn = 0;
bp = iip->ili_item.li_buf;
iip->ili_item.li_buf = NULL;
list_del_init(&iip->ili_item.li_bio_list);
spin_unlock(&iip->ili_lock); spin_unlock(&iip->ili_lock);
xfs_iflags_clear(ip, XFS_IFLUSHING);
xfs_buf_relse(bp);
return;
} }
xfs_iflags_clear(ip, XFS_IFLUSHING);
if (bp) /*
xfs_buf_rele(bp); * Got two references to bp. The first will get dropped by
* xfs_iflush_abort() when the item is removed from the buffer list, but
* we can't drop our reference until _abort() returns because we have to
* unlock the buffer as well. Hence we abort and then unlock and release
* our reference to the buffer.
*/
ASSERT(iip->ili_item.li_buf == bp);
spin_unlock(&iip->ili_lock);
xfs_iflush_abort(ip);
xfs_buf_relse(bp);
} }
/* /*
* convert an xfs_inode_log_format struct from the old 32 bit version * convert an xfs_inode_log_format struct from the old 32 bit version
* (which can have different field alignments) to the native 64 bit version * (which can have different field alignments) to the native 64 bit version
......
...@@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip) ...@@ -44,6 +44,7 @@ static inline int xfs_inode_clean(struct xfs_inode *ip)
extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_abort(struct xfs_inode *); extern void xfs_iflush_abort(struct xfs_inode *);
extern void xfs_iflush_shutdown_abort(struct xfs_inode *);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
struct xfs_inode_log_format *); struct xfs_inode_log_format *);
......
...@@ -2027,6 +2027,27 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) ...@@ -2027,6 +2027,27 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
} }
unsigned long disk_start_precise_io_acct(struct gendisk *disk, unsigned int op);
void disk_end_precise_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op, unsigned long start_time);
unsigned long part_start_precise_io_acct(struct gendisk *disk,
struct hd_struct **part,
struct bio *bio);
void part_end_precise_io_acct(struct hd_struct *part, struct bio *bio,
unsigned long start_time);
static inline unsigned long bio_start_precise_io_acct(struct bio *bio)
{
return disk_start_precise_io_acct(bio->bi_disk, bio_op(bio));
}
static inline void bio_end_precise_io_acct(struct bio *bio,
unsigned long start_time)
{
return disk_end_precise_io_acct(bio->bi_disk, bio_sectors(bio),
bio_op(bio), start_time);
}
int bdev_read_only(struct block_device *bdev); int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct block_device *bdev, int size); int set_blocksize(struct block_device *bdev, int size);
......
...@@ -404,6 +404,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, ...@@ -404,6 +404,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
extern struct iscsi_cls_session * extern struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost, iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
uint16_t, int, int, uint32_t, unsigned int); uint16_t, int, int, uint32_t, unsigned int);
void iscsi_session_remove(struct iscsi_cls_session *cls_session);
void iscsi_session_free(struct iscsi_cls_session *cls_session);
extern void iscsi_session_teardown(struct iscsi_cls_session *); extern void iscsi_session_teardown(struct iscsi_cls_session *);
extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn, extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
......
...@@ -2469,6 +2469,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2469,6 +2469,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Mark the rest of the page with padding */ /* Mark the rest of the page with padding */
rb_event_set_padding(event); rb_event_set_padding(event);
/* Make sure the padding is visible before the write update */
smp_wmb();
/* Set the write back to the previous setting */ /* Set the write back to the previous setting */
local_sub(length, &tail_page->write); local_sub(length, &tail_page->write);
return; return;
...@@ -2480,6 +2483,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2480,6 +2483,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* time delta must be non zero */ /* time delta must be non zero */
event->time_delta = 1; event->time_delta = 1;
/* Make sure the padding is visible before the tail_page->write update */
smp_wmb();
/* Set write to end of buffer */ /* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE; length = (tail + length) - BUF_PAGE_SIZE;
local_sub(length, &tail_page->write); local_sub(length, &tail_page->write);
...@@ -4294,6 +4300,33 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -4294,6 +4300,33 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
arch_spin_unlock(&cpu_buffer->lock); arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags); local_irq_restore(flags);
/*
* The writer has preempt disable, wait for it. But not forever
* Although, 1 second is pretty much "forever"
*/
#define USECS_WAIT 1000000
for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
/* If the write is past the end of page, a writer is still updating it */
if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
break;
udelay(1);
/* Get the latest version of the reader write value */
smp_rmb();
}
/* The writer is not moving forward? Something is wrong */
if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
reader = NULL;
/*
* Make sure we see any padding after the write update
* (see rb_reset_tail())
*/
smp_rmb();
return reader; return reader;
} }
......
...@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, ...@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
} }
} }
/* If somehow no addresses were found that can be used with this
* scope, it's an error.
*/
if (list_empty(&dest->address_list))
error = -ENETUNREACH;
out: out:
if (error) if (error)
sctp_bind_addr_clean(dest); sctp_bind_addr_clean(dest);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册