queue.c 13.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
18
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
19 20 21

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
22
#include "queue.h"
L
Linus Torvalds 已提交
23

24 25
#define MMC_QUEUE_BOUNCESZ	65536

L
Linus Torvalds 已提交
26
/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32
	struct mmc_queue *mq = q->queuedata;

33
	/*
A
Adrian Hunter 已提交
34
	 * We only like normal block requests and discards.
35
	 */
A
Adrian Hunter 已提交
36 37
	if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
	    req_op(req) != REQ_OP_SECURE_ERASE) {
L
Linus Torvalds 已提交
38
		blk_dump_rq_flags(req, "MMC bad request");
39
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
40 41
	}

42
	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
43 44
		return BLKPREP_KILL;

45
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
46

47
	return BLKPREP_OK;
L
Linus Torvalds 已提交
48 49 50 51 52 53 54
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

55
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
56 57 58 59 60 61 62

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
J
Jens Axboe 已提交
63
		req = blk_fetch_request(q);
64
		mq->mqrq_cur->req = req;
L
Linus Torvalds 已提交
65 66
		spin_unlock_irq(q->queue_lock);

67 68 69
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
			mq->issue_fn(mq, req);
70
			cond_resched();
71 72 73 74
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}
75 76 77 78

			/*
			 * Current request becomes previous request
			 * and vice versa.
79 80 81
			 * In case of special requests, current request
			 * has been finished. Do not assign it to previous
			 * request.
82
			 */
M
Mike Christie 已提交
83
			if (mmc_req_is_special(req))
84 85
				mq->mqrq_cur->req = NULL;

86 87
			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
88
			swap(mq->mqrq_prev, mq->mqrq_cur);
89
		} else {
90 91
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
92
				break;
93
			}
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
110
static void mmc_request_fn(struct request_queue *q)
L
Linus Torvalds 已提交
111 112
{
	struct mmc_queue *mq = q->queuedata;
113
	struct request *req;
114 115
	unsigned long flags;
	struct mmc_context_info *cntx;
116 117

	if (!mq) {
A
Adrian Hunter 已提交
118 119
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
120
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
121
		}
122 123
		return;
	}
L
Linus Torvalds 已提交
124

125 126 127 128 129 130 131 132 133 134 135 136 137 138
	cntx = &mq->card->host->context_info;
	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
		/*
		 * New MMC request arrived when MMC thread may be
		 * blocked on the previous request to be complete
		 * with no current request fetched
		 */
		spin_lock_irqsave(&cntx->lock, flags);
		if (cntx->is_waiting_last_req) {
			cntx->is_new_req = true;
			wake_up_interruptible(&cntx->wait);
		}
		spin_unlock_irqrestore(&cntx->lock, flags);
	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
C
Christoph Hellwig 已提交
139
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
140 141
}

142
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
143 144 145 146 147 148 149 150 151 152 153 154 155 156
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

157 158 159 160 161 162 163 164 165 166
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
167
	blk_queue_max_discard_sectors(q, max_discard);
168
	if (card->erased_byte == 0 && !mmc_can_discard(card))
169 170 171 172 173
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
174
	if (mmc_can_secure_erase_trim(card))
175
		queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
176 177
}

L
Linus Torvalds 已提交
178 179 180 181 182
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
183
 * @subname: partition subname
L
Linus Torvalds 已提交
184 185 186
 *
 * Initialise a MMC card request queue.
 */
187 188
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
189 190 191 192
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
193
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
194
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
L
Linus Torvalds 已提交
195

196
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
197
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
198 199

	mq->card = card;
200
	mq->queue = blk_init_queue(mmc_request_fn, lock);
L
Linus Torvalds 已提交
201 202 203
	if (!mq->queue)
		return -ENOMEM;

204
	mq->mqrq_cur = mqrq_cur;
205
	mq->mqrq_prev = mqrq_prev;
L
Linus Torvalds 已提交
206 207
	mq->queue->queuedata = mq;

208
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
209
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
210
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
211 212
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
213 214

#ifdef CONFIG_MMC_BLOCK_BOUNCE
215
	if (host->max_segs == 1) {
216 217
		unsigned int bouncesz;

218 219 220 221 222 223
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
224 225 226 227
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
228 229
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
J
Joe Perches 已提交
230
				pr_warn("%s: unable to allocate bounce cur buffer\n",
231
					mmc_card_name(card));
232 233 234 235 236 237 238 239 240
			} else {
				mqrq_prev->bounce_buf =
						kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_prev->bounce_buf) {
					pr_warn("%s: unable to allocate bounce prev buffer\n",
						mmc_card_name(card));
					kfree(mqrq_cur->bounce_buf);
					mqrq_cur->bounce_buf = NULL;
				}
241
			}
242
		}
243

244
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
245
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
246
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
247
			blk_queue_max_segments(mq->queue, bouncesz / 512);
248 249
			blk_queue_max_segment_size(mq->queue, bouncesz);

250 251
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
252
				goto cleanup_queue;
253

254 255 256
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
257
				goto cleanup_queue;
258

259 260 261 262 263 264 265 266
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
267 268 269 270
		}
	}
#endif

271
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
272
		blk_queue_bounce_limit(mq->queue, limit);
273
		blk_queue_max_hw_sectors(mq->queue,
274
			min(host->max_blk_count, host->max_req_size / 512));
275
		blk_queue_max_segments(mq->queue, host->max_segs);
276 277
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

278 279
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
280
			goto cleanup_queue;
281

282 283 284 285

		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
L
Linus Torvalds 已提交
286 287
	}

288
	sema_init(&mq->thread_sem, 1);
L
Linus Torvalds 已提交
289

290 291
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
292

C
Christoph Hellwig 已提交
293 294
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
295
		goto free_bounce_sg;
L
Linus Torvalds 已提交
296 297
	}

C
Christoph Hellwig 已提交
298
	return 0;
299
 free_bounce_sg:
300 301
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
302 303
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
304

305
 cleanup_queue:
306 307 308 309 310
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

311 312 313 314 315
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
316 317 318 319 320 321
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
322
	struct request_queue *q = mq->queue;
323
	unsigned long flags;
324
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
325
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
326

327 328 329
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

330
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
331
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
332

A
Adrian Hunter 已提交
333 334 335 336 337 338
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

339 340
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
341

342 343
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
L
Linus Torvalds 已提交
344

345 346
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
347

348 349 350 351 352 353 354 355 356
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
357 358 359 360
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	int ret = 0;


	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_cur->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
			mmc_card_name(card));
		ret = -ENOMEM;
		goto out;
	}

	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_prev->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
			mmc_card_name(card));
		kfree(mqrq_cur->packed);
		mqrq_cur->packed = NULL;
		ret = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&mqrq_cur->packed->list);
	INIT_LIST_HEAD(&mqrq_prev->packed->list);

out:
	return ret;
}

void mmc_packed_clean(struct mmc_queue *mq)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	kfree(mqrq_cur->packed);
	mqrq_cur->packed = NULL;
	kfree(mqrq_prev->packed);
	mqrq_prev->packed = NULL;
}

L
Linus Torvalds 已提交
404 405 406 407 408 409 410 411 412 413
/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
414
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
434
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
435 436 437 438 439 440 441 442 443 444 445 446
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
					    struct mmc_packed *packed,
					    struct scatterlist *sg,
					    enum mmc_packed_type cmd_type)
{
	struct scatterlist *__sg = sg;
	unsigned int sg_len = 0;
	struct request *req;

	if (mmc_packed_wr(cmd_type)) {
		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
		unsigned int len, remain, offset = 0;
		u8 *buf = (u8 *)packed->cmd_hdr;

		remain = hdr_sz;
		do {
			len = min(remain, max_seg_sz);
			sg_set_buf(__sg, buf + offset, len);
			offset += len;
			remain -= len;
469
			sg_unmark_end(__sg++);
470 471 472 473 474 475 476
			sg_len++;
		} while (remain);
	}

	list_for_each_entry(req, &packed->list, queuelist) {
		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
		__sg = sg + (sg_len - 1);
477
		sg_unmark_end(__sg++);
478 479 480 481 482
	}
	sg_mark_end(sg + (sg_len - 1));
	return sg_len;
}

483 484 485
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
486
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
487 488
{
	unsigned int sg_len;
489 490
	size_t buflen;
	struct scatterlist *sg;
491
	enum mmc_packed_type cmd_type;
492
	int i;
493

494 495 496 497 498 499 500 501 502
	cmd_type = mqrq->cmd_type;

	if (!mqrq->bounce_buf) {
		if (mmc_packed_cmd(cmd_type))
			return mmc_queue_packed_map_sg(mq, mqrq->packed,
						       mqrq->sg, cmd_type);
		else
			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
	}
503

504
	BUG_ON(!mqrq->bounce_sg);
505

506 507 508 509 510
	if (mmc_packed_cmd(cmd_type))
		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
						 mqrq->bounce_sg, cmd_type);
	else
		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
511

512
	mqrq->bounce_sg_len = sg_len;
513

514
	buflen = 0;
515
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
516
		buflen += sg->length;
517

518
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
519 520 521 522

	return 1;
}

523 524 525 526
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
527
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
528
{
529
	if (!mqrq->bounce_buf)
530 531
		return;

532
	if (rq_data_dir(mqrq->req) != WRITE)
533 534
		return;

535 536
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
537 538
}

539 540 541 542
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
543
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
544
{
545
	if (!mqrq->bounce_buf)
546 547
		return;

548
	if (rq_data_dir(mqrq->req) != READ)
549 550
		return;

551 552
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
553
}