提交 e1777d09 编写于 作者: D Damien Le Moal 提交者: Jens Axboe

null_blk: Fix scheduling in atomic with zoned mode

Commit aa1c09cb ("null_blk: Fix locking in zoned mode") changed
zone locking to using the potentially sleeping wait_on_bit_io()
function. This is acceptable when memory backing is enabled as the
device queue is in that case marked as blocking, but this triggers a
scheduling while in atomic context with memory backing disabled.

Fix this by relying solely on the device zone spinlock for zone
information protection without temporarily releasing this lock around
null_process_cmd() execution in null_zone_write(). This is OK to do
since when memory backing is disabled, command processing does not
block and the memory backing lock nullb->lock is unused. This solution
avoids the overhead of having to mark a zoned null_blk device queue as
blocking when memory backing is unused.

This patch also adds comments to the zone locking code to explain the
unusual locking scheme.

Fixes: aa1c09cb ("null_blk: Fix locking in zoned mode")
Reported-by: Nkernel test robot <lkp@intel.com>
Signed-off-by: NDamien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 7ae7a8de
...@@ -47,7 +47,7 @@ struct nullb_device { ...@@ -47,7 +47,7 @@ struct nullb_device {
unsigned int nr_zones_closed; unsigned int nr_zones_closed;
struct blk_zone *zones; struct blk_zone *zones;
sector_t zone_size_sects; sector_t zone_size_sects;
spinlock_t zone_dev_lock; spinlock_t zone_lock;
unsigned long *zone_locks; unsigned long *zone_locks;
unsigned long size; /* device size in MB */ unsigned long size; /* device size in MB */
......
...@@ -46,12 +46,21 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) ...@@ -46,12 +46,21 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones) if (!dev->zones)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&dev->zone_dev_lock); /*
* With memory backing, the zone_lock spinlock needs to be temporarily
* released to avoid scheduling in atomic context. To guarantee zone
* information protection, use a bitmap to lock zones with
* wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
* implies that the queue is marked with BLK_MQ_F_BLOCKING.
*/
spin_lock_init(&dev->zone_lock);
if (dev->memory_backed) {
dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
if (!dev->zone_locks) { if (!dev->zone_locks) {
kvfree(dev->zones); kvfree(dev->zones);
return -ENOMEM; return -ENOMEM;
} }
}
if (dev->zone_nr_conv >= dev->nr_zones) { if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1; dev->zone_nr_conv = dev->nr_zones - 1;
...@@ -137,11 +146,16 @@ void null_free_zoned_dev(struct nullb_device *dev) ...@@ -137,11 +146,16 @@ void null_free_zoned_dev(struct nullb_device *dev)
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
{ {
if (dev->memory_backed)
wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
spin_lock_irq(&dev->zone_lock);
} }
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
{ {
spin_unlock_irq(&dev->zone_lock);
if (dev->memory_backed)
clear_and_wake_up_bit(zno, dev->zone_locks); clear_and_wake_up_bit(zno, dev->zone_locks);
} }
...@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
null_lock_zone(dev, zno); null_lock_zone(dev, zno);
spin_lock(&dev->zone_dev_lock);
switch (zone->cond) { switch (zone->cond) {
case BLK_ZONE_COND_FULL: case BLK_ZONE_COND_FULL:
...@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN) if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->cond = BLK_ZONE_COND_IMP_OPEN;
spin_unlock(&dev->zone_dev_lock); /*
* Memory backing allocation may sleep: release the zone_lock spinlock
* to avoid scheduling in atomic context. Zone operation atomicity is
* still guaranteed through the zone_locks bitmap.
*/
if (dev->memory_backed)
spin_unlock_irq(&dev->zone_lock);
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
spin_lock(&dev->zone_dev_lock); if (dev->memory_backed)
spin_lock_irq(&dev->zone_lock);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
goto unlock; goto unlock;
...@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
ret = BLK_STS_OK; ret = BLK_STS_OK;
unlock: unlock:
spin_unlock(&dev->zone_dev_lock);
null_unlock_zone(dev, zno); null_unlock_zone(dev, zno);
return ret; return ret;
...@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
null_lock_zone(dev, i); null_lock_zone(dev, i);
zone = &dev->zones[i]; zone = &dev->zones[i];
if (zone->cond != BLK_ZONE_COND_EMPTY) { if (zone->cond != BLK_ZONE_COND_EMPTY) {
spin_lock(&dev->zone_dev_lock);
null_reset_zone(dev, zone); null_reset_zone(dev, zone);
spin_unlock(&dev->zone_dev_lock);
trace_nullb_zone_op(cmd, i, zone->cond); trace_nullb_zone_op(cmd, i, zone->cond);
} }
null_unlock_zone(dev, i); null_unlock_zone(dev, i);
...@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
zone = &dev->zones[zone_no]; zone = &dev->zones[zone_no];
null_lock_zone(dev, zone_no); null_lock_zone(dev, zone_no);
spin_lock(&dev->zone_dev_lock);
switch (op) { switch (op) {
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
...@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
break; break;
} }
spin_unlock(&dev->zone_dev_lock);
if (ret == BLK_STS_OK) if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond); trace_nullb_zone_op(cmd, zone_no, zone->cond);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册