queue.c 13.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
18
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
19 20 21

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
22
#include "queue.h"
L
Linus Torvalds 已提交
23

24 25
#define MMC_QUEUE_BOUNCESZ	65536

L
Linus Torvalds 已提交
26
/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32
	struct mmc_queue *mq = q->queuedata;

33
	/*
A
Adrian Hunter 已提交
34
	 * We only like normal block requests and discards.
35
	 */
M
Mike Christie 已提交
36
	if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
L
Linus Torvalds 已提交
37
		blk_dump_rq_flags(req, "MMC bad request");
38
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
39 40
	}

41
	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
42 43
		return BLKPREP_KILL;

44
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
45

46
	return BLKPREP_OK;
L
Linus Torvalds 已提交
47 48 49 50 51 52 53
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

54
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
55 56 57 58 59 60 61

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
J
Jens Axboe 已提交
62
		req = blk_fetch_request(q);
63
		mq->mqrq_cur->req = req;
L
Linus Torvalds 已提交
64 65
		spin_unlock_irq(q->queue_lock);

66 67 68
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
			mq->issue_fn(mq, req);
69
			cond_resched();
70 71 72 73
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}
74 75 76 77

			/*
			 * Current request becomes previous request
			 * and vice versa.
78 79 80
			 * In case of special requests, current request
			 * has been finished. Do not assign it to previous
			 * request.
81
			 */
M
Mike Christie 已提交
82
			if (mmc_req_is_special(req))
83 84
				mq->mqrq_cur->req = NULL;

85 86
			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
87
			swap(mq->mqrq_prev, mq->mqrq_cur);
88
		} else {
89 90
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
91
				break;
92
			}
L
Linus Torvalds 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
109
static void mmc_request_fn(struct request_queue *q)
L
Linus Torvalds 已提交
110 111
{
	struct mmc_queue *mq = q->queuedata;
112
	struct request *req;
113 114
	unsigned long flags;
	struct mmc_context_info *cntx;
115 116

	if (!mq) {
A
Adrian Hunter 已提交
117 118
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
119
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
120
		}
121 122
		return;
	}
L
Linus Torvalds 已提交
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137
	cntx = &mq->card->host->context_info;
	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
		/*
		 * New MMC request arrived when MMC thread may be
		 * blocked on the previous request to be complete
		 * with no current request fetched
		 */
		spin_lock_irqsave(&cntx->lock, flags);
		if (cntx->is_waiting_last_req) {
			cntx->is_new_req = true;
			wake_up_interruptible(&cntx->wait);
		}
		spin_unlock_irqrestore(&cntx->lock, flags);
	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
C
Christoph Hellwig 已提交
138
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
139 140
}

141
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
142 143 144 145 146 147 148 149 150 151 152 153 154 155
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

156 157 158 159 160 161 162 163 164 165
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
166
	blk_queue_max_discard_sectors(q, max_discard);
167
	if (card->erased_byte == 0 && !mmc_can_discard(card))
168 169 170 171 172
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
173
	if (mmc_can_secure_erase_trim(card))
174 175 176
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}

L
Linus Torvalds 已提交
177 178 179 180 181
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
182
 * @subname: partition subname
L
Linus Torvalds 已提交
183 184 185
 *
 * Initialise a MMC card request queue.
 */
186 187
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
188 189 190 191
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
192
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
193
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
L
Linus Torvalds 已提交
194

195
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
196
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
197 198

	mq->card = card;
199
	mq->queue = blk_init_queue(mmc_request_fn, lock);
L
Linus Torvalds 已提交
200 201 202
	if (!mq->queue)
		return -ENOMEM;

203
	mq->mqrq_cur = mqrq_cur;
204
	mq->mqrq_prev = mqrq_prev;
L
Linus Torvalds 已提交
205 206
	mq->queue->queuedata = mq;

207
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
208
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
209
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
210 211
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
212 213

#ifdef CONFIG_MMC_BLOCK_BOUNCE
214
	if (host->max_segs == 1) {
215 216
		unsigned int bouncesz;

217 218 219 220 221 222
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
223 224 225 226
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
227 228
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
J
Joe Perches 已提交
229
				pr_warn("%s: unable to allocate bounce cur buffer\n",
230
					mmc_card_name(card));
231 232 233 234 235 236 237 238 239
			} else {
				mqrq_prev->bounce_buf =
						kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_prev->bounce_buf) {
					pr_warn("%s: unable to allocate bounce prev buffer\n",
						mmc_card_name(card));
					kfree(mqrq_cur->bounce_buf);
					mqrq_cur->bounce_buf = NULL;
				}
240
			}
241
		}
242

243
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
244
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
245
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
246
			blk_queue_max_segments(mq->queue, bouncesz / 512);
247 248
			blk_queue_max_segment_size(mq->queue, bouncesz);

249 250
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
251
				goto cleanup_queue;
252

253 254 255
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
256
				goto cleanup_queue;
257

258 259 260 261 262 263 264 265
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
266 267 268 269
		}
	}
#endif

270
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
271
		blk_queue_bounce_limit(mq->queue, limit);
272
		blk_queue_max_hw_sectors(mq->queue,
273
			min(host->max_blk_count, host->max_req_size / 512));
274
		blk_queue_max_segments(mq->queue, host->max_segs);
275 276
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

277 278
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
279
			goto cleanup_queue;
280

281 282 283 284

		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
L
Linus Torvalds 已提交
285 286
	}

287
	sema_init(&mq->thread_sem, 1);
L
Linus Torvalds 已提交
288

289 290
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
291

C
Christoph Hellwig 已提交
292 293
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
294
		goto free_bounce_sg;
L
Linus Torvalds 已提交
295 296
	}

C
Christoph Hellwig 已提交
297
	return 0;
298
 free_bounce_sg:
299 300
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
301 302
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
303

304
 cleanup_queue:
305 306 307 308 309
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

310 311 312 313 314
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
315 316 317 318 319 320
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
321
	struct request_queue *q = mq->queue;
322
	unsigned long flags;
323
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
324
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
325

326 327 328
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

329
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
330
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
331

A
Adrian Hunter 已提交
332 333 334 335 336 337
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

338 339
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
340

341 342
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
L
Linus Torvalds 已提交
343

344 345
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
346

347 348 349 350 351 352 353 354 355
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
356 357 358 359
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	int ret = 0;


	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_cur->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
			mmc_card_name(card));
		ret = -ENOMEM;
		goto out;
	}

	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_prev->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
			mmc_card_name(card));
		kfree(mqrq_cur->packed);
		mqrq_cur->packed = NULL;
		ret = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&mqrq_cur->packed->list);
	INIT_LIST_HEAD(&mqrq_prev->packed->list);

out:
	return ret;
}

void mmc_packed_clean(struct mmc_queue *mq)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	kfree(mqrq_cur->packed);
	mqrq_cur->packed = NULL;
	kfree(mqrq_prev->packed);
	mqrq_prev->packed = NULL;
}

L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412
/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
413
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
433
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
434 435 436 437 438 439 440 441 442 443 444 445
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
446

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
					    struct mmc_packed *packed,
					    struct scatterlist *sg,
					    enum mmc_packed_type cmd_type)
{
	struct scatterlist *__sg = sg;
	unsigned int sg_len = 0;
	struct request *req;

	if (mmc_packed_wr(cmd_type)) {
		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
		unsigned int len, remain, offset = 0;
		u8 *buf = (u8 *)packed->cmd_hdr;

		remain = hdr_sz;
		do {
			len = min(remain, max_seg_sz);
			sg_set_buf(__sg, buf + offset, len);
			offset += len;
			remain -= len;
468
			sg_unmark_end(__sg++);
469 470 471 472 473 474 475
			sg_len++;
		} while (remain);
	}

	list_for_each_entry(req, &packed->list, queuelist) {
		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
		__sg = sg + (sg_len - 1);
476
		sg_unmark_end(__sg++);
477 478 479 480 481
	}
	sg_mark_end(sg + (sg_len - 1));
	return sg_len;
}

482 483 484
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
485
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
486 487
{
	unsigned int sg_len;
488 489
	size_t buflen;
	struct scatterlist *sg;
490
	enum mmc_packed_type cmd_type;
491
	int i;
492

493 494 495 496 497 498 499 500 501
	cmd_type = mqrq->cmd_type;

	if (!mqrq->bounce_buf) {
		if (mmc_packed_cmd(cmd_type))
			return mmc_queue_packed_map_sg(mq, mqrq->packed,
						       mqrq->sg, cmd_type);
		else
			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
	}
502

503
	BUG_ON(!mqrq->bounce_sg);
504

505 506 507 508 509
	if (mmc_packed_cmd(cmd_type))
		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
						 mqrq->bounce_sg, cmd_type);
	else
		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
510

511
	mqrq->bounce_sg_len = sg_len;
512

513
	buflen = 0;
514
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
515
		buflen += sg->length;
516

517
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
518 519 520 521

	return 1;
}

522 523 524 525
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
526
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
527
{
528
	if (!mqrq->bounce_buf)
529 530
		return;

531
	if (rq_data_dir(mqrq->req) != WRITE)
532 533
		return;

534 535
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
536 537
}

538 539 540 541
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
542
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
543
{
544
	if (!mqrq->bounce_buf)
545 546
		return;

547
	if (rq_data_dir(mqrq->req) != READ)
548 549
		return;

550 551
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
552
}