queue.c 13.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
18
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
19 20 21

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
22
#include "queue.h"
L
Linus Torvalds 已提交
23

24 25
#define MMC_QUEUE_BOUNCESZ	65536

L
Linus Torvalds 已提交
26
/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32
	struct mmc_queue *mq = q->queuedata;

33
	/*
A
Adrian Hunter 已提交
34
	 * We only like normal block requests and discards.
35
	 */
A
Adrian Hunter 已提交
36
	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
L
Linus Torvalds 已提交
37
		blk_dump_rq_flags(req, "MMC bad request");
38
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
39 40
	}

41 42 43
	if (mq && mmc_card_removed(mq->card))
		return BLKPREP_KILL;

44
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
45

46
	return BLKPREP_OK;
L
Linus Torvalds 已提交
47 48 49 50 51 52 53
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

54
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
55 56 57 58

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
59
		struct mmc_queue_req *tmp;
60
		unsigned int cmd_flags = 0;
L
Linus Torvalds 已提交
61 62 63

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
J
Jens Axboe 已提交
64
		req = blk_fetch_request(q);
65
		mq->mqrq_cur->req = req;
L
Linus Torvalds 已提交
66 67
		spin_unlock_irq(q->queue_lock);

68 69
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
70
			cmd_flags = req ? req->cmd_flags : 0;
71
			mq->issue_fn(mq, req);
72 73 74 75
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}
76 77 78 79

			/*
			 * Current request becomes previous request
			 * and vice versa.
80 81 82
			 * In case of special requests, current request
			 * has been finished. Do not assign it to previous
			 * request.
83
			 */
84 85 86
			if (cmd_flags & MMC_REQ_SPECIAL_MASK)
				mq->mqrq_cur->req = NULL;

87 88 89 90 91
			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
			tmp = mq->mqrq_prev;
			mq->mqrq_prev = mq->mqrq_cur;
			mq->mqrq_cur = tmp;
92
		} else {
93 94
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
95
				break;
96
			}
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
113
static void mmc_request_fn(struct request_queue *q)
L
Linus Torvalds 已提交
114 115
{
	struct mmc_queue *mq = q->queuedata;
116
	struct request *req;
117 118
	unsigned long flags;
	struct mmc_context_info *cntx;
119 120

	if (!mq) {
A
Adrian Hunter 已提交
121 122
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
123
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
124
		}
125 126
		return;
	}
L
Linus Torvalds 已提交
127

128 129 130 131 132 133 134 135 136 137 138 139 140 141
	cntx = &mq->card->host->context_info;
	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
		/*
		 * New MMC request arrived when MMC thread may be
		 * blocked on the previous request to be complete
		 * with no current request fetched
		 */
		spin_lock_irqsave(&cntx->lock, flags);
		if (cntx->is_waiting_last_req) {
			cntx->is_new_req = true;
			wake_up_interruptible(&cntx->wait);
		}
		spin_unlock_irqrestore(&cntx->lock, flags);
	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
C
Christoph Hellwig 已提交
142
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
143 144
}

145
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
146 147 148 149 150 151 152 153 154 155 156 157 158 159
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

160 161 162 163 164 165 166 167 168 169 170
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.max_discard_sectors = max_discard;
171
	if (card->erased_byte == 0 && !mmc_can_discard(card))
172 173 174 175 176
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
177
	if (mmc_can_secure_erase_trim(card))
178 179 180
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}

L
Linus Torvalds 已提交
181 182 183 184 185
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
186
 * @subname: partition subname
L
Linus Torvalds 已提交
187 188 189
 *
 * Initialise a MMC card request queue.
 */
190 191
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
192 193 194 195
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
196
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
197
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
L
Linus Torvalds 已提交
198

199
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
200
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
201 202

	mq->card = card;
203
	mq->queue = blk_init_queue(mmc_request_fn, lock);
L
Linus Torvalds 已提交
204 205 206
	if (!mq->queue)
		return -ENOMEM;

207
	mq->mqrq_cur = mqrq_cur;
208
	mq->mqrq_prev = mqrq_prev;
L
Linus Torvalds 已提交
209 210
	mq->queue->queuedata = mq;

211
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
212
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
213 214
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
215 216

#ifdef CONFIG_MMC_BLOCK_BOUNCE
217
	if (host->max_segs == 1) {
218 219
		unsigned int bouncesz;

220 221 222 223 224 225
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
226 227 228 229
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
230 231
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
232
				pr_warning("%s: unable to "
233
					"allocate bounce cur buffer\n",
234 235
					mmc_card_name(card));
			}
236 237
			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_prev->bounce_buf) {
238
				pr_warning("%s: unable to "
239 240 241 242 243
					"allocate bounce prev buffer\n",
					mmc_card_name(card));
				kfree(mqrq_cur->bounce_buf);
				mqrq_cur->bounce_buf = NULL;
			}
244
		}
245

246
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
247
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
248
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
249
			blk_queue_max_segments(mq->queue, bouncesz / 512);
250 251
			blk_queue_max_segment_size(mq->queue, bouncesz);

252 253
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
254
				goto cleanup_queue;
255

256 257 258
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
259
				goto cleanup_queue;
260

261 262 263 264 265 266 267 268
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
269 270 271 272
		}
	}
#endif

273
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
274
		blk_queue_bounce_limit(mq->queue, limit);
275
		blk_queue_max_hw_sectors(mq->queue,
276
			min(host->max_blk_count, host->max_req_size / 512));
277
		blk_queue_max_segments(mq->queue, host->max_segs);
278 279
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

280 281
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
282
			goto cleanup_queue;
283

284 285 286 287

		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
L
Linus Torvalds 已提交
288 289
	}

290
	sema_init(&mq->thread_sem, 1);
L
Linus Torvalds 已提交
291

292 293
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
294

C
Christoph Hellwig 已提交
295 296
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
297
		goto free_bounce_sg;
L
Linus Torvalds 已提交
298 299
	}

C
Christoph Hellwig 已提交
300
	return 0;
301
 free_bounce_sg:
302 303
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
304 305
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
306

307
 cleanup_queue:
308 309 310 311 312
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

313 314 315 316 317
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
318 319 320 321 322 323
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
324
	struct request_queue *q = mq->queue;
325
	unsigned long flags;
326
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
327
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
328

329 330 331
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

332
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
333
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
334

A
Adrian Hunter 已提交
335 336 337 338 339 340
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

341 342
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
343

344 345
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
L
Linus Torvalds 已提交
346

347 348
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
349

350 351 352 353 354 355 356 357 358
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
359 360 361 362
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	int ret = 0;


	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_cur->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
			mmc_card_name(card));
		ret = -ENOMEM;
		goto out;
	}

	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_prev->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
			mmc_card_name(card));
		kfree(mqrq_cur->packed);
		mqrq_cur->packed = NULL;
		ret = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&mqrq_cur->packed->list);
	INIT_LIST_HEAD(&mqrq_prev->packed->list);

out:
	return ret;
}

void mmc_packed_clean(struct mmc_queue *mq)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	kfree(mqrq_cur->packed);
	mqrq_cur->packed = NULL;
	kfree(mqrq_prev->packed);
	mqrq_prev->packed = NULL;
}

L
Linus Torvalds 已提交
406 407 408 409 410 411 412 413 414 415
/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
416
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
436
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
437 438 439 440 441 442 443 444 445 446 447 448
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
					    struct mmc_packed *packed,
					    struct scatterlist *sg,
					    enum mmc_packed_type cmd_type)
{
	struct scatterlist *__sg = sg;
	unsigned int sg_len = 0;
	struct request *req;

	if (mmc_packed_wr(cmd_type)) {
		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
		unsigned int len, remain, offset = 0;
		u8 *buf = (u8 *)packed->cmd_hdr;

		remain = hdr_sz;
		do {
			len = min(remain, max_seg_sz);
			sg_set_buf(__sg, buf + offset, len);
			offset += len;
			remain -= len;
			(__sg++)->page_link &= ~0x02;
			sg_len++;
		} while (remain);
	}

	list_for_each_entry(req, &packed->list, queuelist) {
		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
		__sg = sg + (sg_len - 1);
		(__sg++)->page_link &= ~0x02;
	}
	sg_mark_end(sg + (sg_len - 1));
	return sg_len;
}

485 486 487
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
488
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
489 490
{
	unsigned int sg_len;
491 492
	size_t buflen;
	struct scatterlist *sg;
493
	enum mmc_packed_type cmd_type;
494
	int i;
495

496 497 498 499 500 501 502 503 504
	cmd_type = mqrq->cmd_type;

	if (!mqrq->bounce_buf) {
		if (mmc_packed_cmd(cmd_type))
			return mmc_queue_packed_map_sg(mq, mqrq->packed,
						       mqrq->sg, cmd_type);
		else
			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
	}
505

506
	BUG_ON(!mqrq->bounce_sg);
507

508 509 510 511 512
	if (mmc_packed_cmd(cmd_type))
		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
						 mqrq->bounce_sg, cmd_type);
	else
		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
513

514
	mqrq->bounce_sg_len = sg_len;
515

516
	buflen = 0;
517
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
518
		buflen += sg->length;
519

520
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
521 522 523 524

	return 1;
}

525 526 527 528
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
529
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
530
{
531
	if (!mqrq->bounce_buf)
532 533
		return;

534
	if (rq_data_dir(mqrq->req) != WRITE)
535 536
		return;

537 538
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
539 540
}

541 542 543 544
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
545
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
546
{
547
	if (!mqrq->bounce_buf)
548 549
		return;

550
	if (rq_data_dir(mqrq->req) != READ)
551 552
		return;

553 554
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
555
}