blk-lib.c 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * Functions related to generic helpers functions
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

static void blkdev_discard_end_io(struct bio *bio, int err)
{
	if (err) {
		if (err == -EOPNOTSUPP)
			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	}

	if (bio->bi_private)
		complete(bio->bi_private);

	bio_put(bio);
}

/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @flags:	BLKDEV_IFL_* flags to control behaviour
 *
 * Description:
 *    Issue a discard request for the sectors in question.
 */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
42
	int type = REQ_WRITE | REQ_DISCARD;
43
	unsigned int max_discard_sectors;
44 45 46 47 48 49 50 51 52
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

53 54 55 56 57 58 59 60 61 62
	/*
	 * Ensure that max_discard_sectors is of the proper
	 * granularity
	 */
	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	if (q->limits.discard_granularity) {
		unsigned int disc_sects = q->limits.discard_granularity >> 9;

		max_discard_sectors &= ~(disc_sects - 1);
	}
63

64
	if (flags & BLKDEV_DISCARD_SECURE) {
A
Adrian Hunter 已提交
65 66
		if (!blk_queue_secdiscard(q))
			return -EOPNOTSUPP;
67
		type |= REQ_SECURE;
A
Adrian Hunter 已提交
68 69
	}

70
	while (nr_sects && !ret) {
71
		bio = bio_alloc(gfp_mask, 1);
72 73 74 75 76
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

77 78 79
		bio->bi_sector = sector;
		bio->bi_end_io = blkdev_discard_end_io;
		bio->bi_bdev = bdev;
80
		bio->bi_private = &wait;
81 82 83 84 85 86 87 88 89 90 91 92 93

		if (nr_sects > max_discard_sectors) {
			bio->bi_size = max_discard_sectors << 9;
			nr_sects -= max_discard_sectors;
			sector += max_discard_sectors;
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
		}

		bio_get(bio);
		submit_bio(type, bio);

94
		wait_for_completion(&wait);
95 96 97 98 99 100 101

		if (bio_flagged(bio, BIO_EOPNOTSUPP))
			ret = -EOPNOTSUPP;
		else if (!bio_flagged(bio, BIO_UPTODATE))
			ret = -EIO;
		bio_put(bio);
	}
102

103 104 105
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
106 107 108 109 110 111 112 113 114 115 116 117

struct bio_batch
{
	atomic_t 		done;
	unsigned long 		flags;
	struct completion 	*wait;
	bio_end_io_t		*end_io;
};

static void bio_batch_end_io(struct bio *bio, int err)
{
	struct bio_batch *bb = bio->bi_private;
118

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	if (err) {
		if (err == -EOPNOTSUPP)
			set_bit(BIO_EOPNOTSUPP, &bb->flags);
		else
			clear_bit(BIO_UPTODATE, &bb->flags);
	}
	if (bb) {
		if (bb->end_io)
			bb->end_io(bio, err);
		atomic_inc(&bb->done);
		complete(bb->wait);
	}
	bio_put(bio);
}

/**
 * blkdev_issue_zeroout generate number of zero filed write bios
 * @bdev:	blockdev to issue
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 *
 * Description:
 *  Generate and issue number of bios with zerofiled pages.
 *  Send barrier at the beginning and at the end if requested. This guarantie
 *  correct request ordering. Empty barrier allow us to avoid post queue flush.
 */

int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
148
			sector_t nr_sects, gfp_t gfp_mask)
149
{
150
	int ret;
151 152 153 154 155 156 157 158 159 160 161
	struct bio *bio;
	struct bio_batch bb;
	unsigned int sz, issued = 0;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 0);
	bb.flags = 1 << BIO_UPTODATE;
	bb.wait = &wait;
	bb.end_io = NULL;

submit:
162
	ret = 0;
163 164 165
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
166 167
		if (!bio) {
			ret = -ENOMEM;
168
			break;
169
		}
170 171 172 173

		bio->bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
174
		bio->bi_private = &bb;
175

176 177
		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
178 179 180 181 182 183 184 185 186
			if (sz == 0)
				/* bio has maximum size possible */
				break;
			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
			nr_sects -= ret >> 9;
			sector += ret >> 9;
			if (ret < (sz << 9))
				break;
		}
187
		ret = 0;
188 189 190 191
		issued++;
		submit_bio(WRITE, bio);
	}

192 193 194
	/* Wait for bios in-flight */
	while (issued != atomic_read(&bb.done))
		wait_for_completion(&wait);
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

	if (!test_bit(BIO_UPTODATE, &bb.flags))
		/* One of bios in the batch was completed with error.*/
		ret = -EIO;

	if (ret)
		goto out;

	if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
		ret = -EOPNOTSUPP;
		goto out;
	}
	if (nr_sects != 0)
		goto submit;
out:
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_zeroout);