blk-map.c 6.3 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7
/*
 * Functions related to mapping data to requests
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
8
#include <linux/uio.h>
J
Jens Axboe 已提交
9 10 11

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
static bool iovec_gap_to_prv(struct request_queue *q,
			     struct iovec *prv, struct iovec *cur)
{
	unsigned long prev_end;

	if (!queue_virt_boundary(q))
		return false;

	if (prv->iov_base == NULL && prv->iov_len == 0)
		/* prv is not set - don't check */
		return false;

	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);

	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
		prev_end & queue_virt_boundary(q));
}

J
Jens Axboe 已提交
30 31 32 33 34 35 36 37 38 39 40
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
		      struct bio *bio)
{
	if (!rq->bio)
		blk_rq_bio_prep(q, rq, bio);
	else if (!ll_back_merge_fn(q, rq, bio))
		return -EINVAL;
	else {
		rq->biotail->bi_next = bio;
		rq->biotail = bio;

41
		rq->__data_len += bio->bi_iter.bi_size;
J
Jens Axboe 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	}
	return 0;
}

static int __blk_rq_unmap_user(struct bio *bio)
{
	int ret = 0;

	if (bio) {
		if (bio_flagged(bio, BIO_USER_MAPPED))
			bio_unmap_user(bio);
		else
			ret = bio_uncopy_user(bio);
	}

	return ret;
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static int __blk_rq_map_user_iov(struct request *rq,
		struct rq_map_data *map_data, struct iov_iter *iter,
		gfp_t gfp_mask, bool copy)
{
	struct request_queue *q = rq->q;
	struct bio *bio, *orig_bio;
	int ret;

	if (copy)
		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
	else
		bio = bio_map_user_iov(q, iter, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio_set_flag(bio, BIO_NULL_MAPPED);

	iov_iter_advance(iter, bio->bi_iter.bi_size);
	if (map_data)
		map_data->offset += bio->bi_iter.bi_size;

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (ret) {
		bio_endio(bio);
		__blk_rq_unmap_user(orig_bio);
		bio_put(bio);
		return ret;
	}

	return 0;
}

J
Jens Axboe 已提交
103
/**
104
 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
J
Jens Axboe 已提交
105 106
 * @q:		request queue where request should be inserted
 * @rq:		request to map data to
107
 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
108
 * @iter:	iovec iterator
109
 * @gfp_mask:	memory allocation flags
J
Jens Axboe 已提交
110 111
 *
 * Description:
112
 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
J
Jens Axboe 已提交
113 114
 *    a kernel bounce buffer is used.
 *
115
 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
J
Jens Axboe 已提交
116 117 118 119 120 121 122 123 124
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
125 126
			struct rq_map_data *map_data,
			const struct iov_iter *iter, gfp_t gfp_mask)
J
Jens Axboe 已提交
127
{
128
	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
129 130 131 132
	bool copy = (q->dma_pad_mask & iter->count) || map_data;
	struct bio *bio = NULL;
	struct iov_iter i;
	int ret;
J
Jens Axboe 已提交
133

134
	if (!iter || !iter->count)
J
Jens Axboe 已提交
135 136
		return -EINVAL;

137 138
	iov_for_each(iov, i, *iter) {
		unsigned long uaddr = (unsigned long) iov.iov_base;
139

140
		if (!iov.iov_len)
141 142
			return -EINVAL;

143 144 145
		/*
		 * Keep going so we check length of all segments
		 */
146 147
		if ((uaddr & queue_dma_alignment(q)) ||
		    iovec_gap_to_prv(q, &prv, &iov))
148
			copy = true;
149 150 151

		prv.iov_base = iov.iov_base;
		prv.iov_len = iov.iov_len;
152 153
	}

154 155 156 157 158 159 160 161
	i = *iter;
	do {
		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
		if (ret)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
	} while (iov_iter_count(&i));
J
Jens Axboe 已提交
162

163 164
	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;
J
Jens Axboe 已提交
165
	return 0;
166 167 168 169 170

unmap_rq:
	__blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return -EINVAL;
J
Jens Axboe 已提交
171
}
172
EXPORT_SYMBOL(blk_rq_map_user_iov);
J
Jens Axboe 已提交
173

174 175 176 177
int blk_rq_map_user(struct request_queue *q, struct request *rq,
		    struct rq_map_data *map_data, void __user *ubuf,
		    unsigned long len, gfp_t gfp_mask)
{
178 179
	struct iovec iov;
	struct iov_iter i;
180
	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
181

182 183
	if (unlikely(ret < 0))
		return ret;
184

185
	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
186 187 188
}
EXPORT_SYMBOL(blk_rq_map_user);

J
Jens Axboe 已提交
189 190 191 192 193 194 195
/**
 * blk_rq_unmap_user - unmap a request with user data
 * @bio:	       start of bio list
 *
 * Description:
 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 *    supply the original rq->bio from the blk_rq_map_user() return, since
196
 *    the I/O completion may have changed rq->bio.
J
Jens Axboe 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
 */
int blk_rq_unmap_user(struct bio *bio)
{
	struct bio *mapped_bio;
	int ret = 0, ret2;

	while (bio) {
		mapped_bio = bio;
		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
			mapped_bio = bio->bi_private;

		ret2 = __blk_rq_unmap_user(mapped_bio);
		if (ret2 && !ret)
			ret = ret2;

		mapped_bio = bio;
		bio = bio->bi_next;
		bio_put(mapped_bio);
	}

	return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);

/**
222
 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
J
Jens Axboe 已提交
223 224 225 226 227
 * @q:		request queue where request should be inserted
 * @rq:		request to fill
 * @kbuf:	the kernel buffer
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
228 229 230
 *
 * Description:
 *    Data will be mapped directly if possible. Otherwise a bounce
231
 *    buffer is used. Can be called multiple times to append multiple
232
 *    buffers.
J
Jens Axboe 已提交
233 234 235 236
 */
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
		    unsigned int len, gfp_t gfp_mask)
{
237
	int reading = rq_data_dir(rq) == READ;
238
	unsigned long addr = (unsigned long) kbuf;
239
	int do_copy = 0;
J
Jens Axboe 已提交
240
	struct bio *bio;
241
	int ret;
J
Jens Axboe 已提交
242

243
	if (len > (queue_max_hw_sectors(q) << 9))
J
Jens Axboe 已提交
244 245 246 247
		return -EINVAL;
	if (!len || !kbuf)
		return -EINVAL;

248
	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
249 250 251 252 253
	if (do_copy)
		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
	else
		bio = bio_map_kern(q, kbuf, len, gfp_mask);

J
Jens Axboe 已提交
254 255 256
	if (IS_ERR(bio))
		return PTR_ERR(bio);

257
	if (!reading)
258
		bio->bi_rw |= REQ_WRITE;
J
Jens Axboe 已提交
259

260 261 262
	if (do_copy)
		rq->cmd_flags |= REQ_COPY_USER;

263 264 265 266 267 268 269
	ret = blk_rq_append_bio(q, rq, bio);
	if (unlikely(ret)) {
		/* request is too big */
		bio_put(bio);
		return ret;
	}

J
Jens Axboe 已提交
270 271 272 273
	blk_queue_bounce(q, &rq->bio);
	return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);