queue.c 13.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
18
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
19 20 21

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
22
#include "queue.h"
L
Linus Torvalds 已提交
23

24 25
#define MMC_QUEUE_BOUNCESZ	65536

L
Linus Torvalds 已提交
26
/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32
	struct mmc_queue *mq = q->queuedata;

33
	/*
A
Adrian Hunter 已提交
34
	 * We only like normal block requests and discards.
35
	 */
A
Adrian Hunter 已提交
36
	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
L
Linus Torvalds 已提交
37
		blk_dump_rq_flags(req, "MMC bad request");
38
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
39 40
	}

41
	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
42 43
		return BLKPREP_KILL;

44
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
45

46
	return BLKPREP_OK;
L
Linus Torvalds 已提交
47 48 49 50 51 52 53
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

54
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
55 56 57 58

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
59
		unsigned int cmd_flags = 0;
L
Linus Torvalds 已提交
60 61 62

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
J
Jens Axboe 已提交
63
		req = blk_fetch_request(q);
64
		mq->mqrq_cur->req = req;
L
Linus Torvalds 已提交
65 66
		spin_unlock_irq(q->queue_lock);

67 68
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
69
			cmd_flags = req ? req->cmd_flags : 0;
70
			mq->issue_fn(mq, req);
71
			cond_resched();
72 73 74 75
			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
				continue; /* fetch again */
			}
76 77 78 79

			/*
			 * Current request becomes previous request
			 * and vice versa.
80 81 82
			 * In case of special requests, current request
			 * has been finished. Do not assign it to previous
			 * request.
83
			 */
84 85 86
			if (cmd_flags & MMC_REQ_SPECIAL_MASK)
				mq->mqrq_cur->req = NULL;

87 88
			mq->mqrq_prev->brq.mrq.data = NULL;
			mq->mqrq_prev->req = NULL;
89
			swap(mq->mqrq_prev, mq->mqrq_cur);
90
		} else {
91 92
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
93
				break;
94
			}
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
111
static void mmc_request_fn(struct request_queue *q)
L
Linus Torvalds 已提交
112 113
{
	struct mmc_queue *mq = q->queuedata;
114
	struct request *req;
115 116
	unsigned long flags;
	struct mmc_context_info *cntx;
117 118

	if (!mq) {
A
Adrian Hunter 已提交
119 120
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
121
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
122
		}
123 124
		return;
	}
L
Linus Torvalds 已提交
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139
	cntx = &mq->card->host->context_info;
	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
		/*
		 * New MMC request arrived when MMC thread may be
		 * blocked on the previous request to be complete
		 * with no current request fetched
		 */
		spin_lock_irqsave(&cntx->lock, flags);
		if (cntx->is_waiting_last_req) {
			cntx->is_new_req = true;
			wake_up_interruptible(&cntx->wait);
		}
		spin_unlock_irqrestore(&cntx->lock, flags);
	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
C
Christoph Hellwig 已提交
140
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
141 142
}

143
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
144 145 146 147 148 149 150 151 152 153 154 155 156 157
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

158 159 160 161 162 163 164 165 166 167 168
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.max_discard_sectors = max_discard;
169
	if (card->erased_byte == 0 && !mmc_can_discard(card))
170 171 172 173 174
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
175
	if (mmc_can_secure_erase_trim(card))
176 177 178
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}

L
Linus Torvalds 已提交
179 180 181 182 183
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
184
 * @subname: partition subname
L
Linus Torvalds 已提交
185 186 187
 *
 * Initialise a MMC card request queue.
 */
188 189
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
190 191 192 193
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
194
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
195
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
L
Linus Torvalds 已提交
196

197
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
198
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
199 200

	mq->card = card;
201
	mq->queue = blk_init_queue(mmc_request_fn, lock);
L
Linus Torvalds 已提交
202 203 204
	if (!mq->queue)
		return -ENOMEM;

205
	mq->mqrq_cur = mqrq_cur;
206
	mq->mqrq_prev = mqrq_prev;
L
Linus Torvalds 已提交
207 208
	mq->queue->queuedata = mq;

209
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
210
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
211
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
212 213
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
214 215

#ifdef CONFIG_MMC_BLOCK_BOUNCE
216
	if (host->max_segs == 1) {
217 218
		unsigned int bouncesz;

219 220 221 222 223 224
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
225 226 227 228
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
229 230
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
J
Joe Perches 已提交
231
				pr_warn("%s: unable to allocate bounce cur buffer\n",
232
					mmc_card_name(card));
233 234 235 236 237 238 239 240 241
			} else {
				mqrq_prev->bounce_buf =
						kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_prev->bounce_buf) {
					pr_warn("%s: unable to allocate bounce prev buffer\n",
						mmc_card_name(card));
					kfree(mqrq_cur->bounce_buf);
					mqrq_cur->bounce_buf = NULL;
				}
242
			}
243
		}
244

245
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
246
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
247
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
248
			blk_queue_max_segments(mq->queue, bouncesz / 512);
249 250
			blk_queue_max_segment_size(mq->queue, bouncesz);

251 252
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
253
				goto cleanup_queue;
254

255 256 257
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
258
				goto cleanup_queue;
259

260 261 262 263 264 265 266 267
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
268 269 270 271
		}
	}
#endif

272
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
273
		blk_queue_bounce_limit(mq->queue, limit);
274
		blk_queue_max_hw_sectors(mq->queue,
275
			min(host->max_blk_count, host->max_req_size / 512));
276
		blk_queue_max_segments(mq->queue, host->max_segs);
277 278
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

279 280
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
281
			goto cleanup_queue;
282

283 284 285 286

		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
L
Linus Torvalds 已提交
287 288
	}

289
	sema_init(&mq->thread_sem, 1);
L
Linus Torvalds 已提交
290

291 292
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
293

C
Christoph Hellwig 已提交
294 295
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
296
		goto free_bounce_sg;
L
Linus Torvalds 已提交
297 298
	}

C
Christoph Hellwig 已提交
299
	return 0;
300
 free_bounce_sg:
301 302
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
303 304
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
305

306
 cleanup_queue:
307 308 309 310 311
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

312 313 314 315 316
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
317 318 319 320 321 322
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
323
	struct request_queue *q = mq->queue;
324
	unsigned long flags;
325
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
326
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
327

328 329 330
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

331
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
332
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
333

A
Adrian Hunter 已提交
334 335 336 337 338 339
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

340 341
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
342

343 344
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
L
Linus Torvalds 已提交
345

346 347
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
348

349 350 351 352 353 354 355 356 357
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
358 359 360 361
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
	int ret = 0;


	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_cur->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
			mmc_card_name(card));
		ret = -ENOMEM;
		goto out;
	}

	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
	if (!mqrq_prev->packed) {
		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
			mmc_card_name(card));
		kfree(mqrq_cur->packed);
		mqrq_cur->packed = NULL;
		ret = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&mqrq_cur->packed->list);
	INIT_LIST_HEAD(&mqrq_prev->packed->list);

out:
	return ret;
}

void mmc_packed_clean(struct mmc_queue *mq)
{
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	kfree(mqrq_cur->packed);
	mqrq_cur->packed = NULL;
	kfree(mqrq_prev->packed);
	mqrq_prev->packed = NULL;
}

L
Linus Torvalds 已提交
405 406 407 408 409 410 411 412 413 414
/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
415
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
435
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
436 437 438 439 440 441 442 443 444 445 446 447
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
					    struct mmc_packed *packed,
					    struct scatterlist *sg,
					    enum mmc_packed_type cmd_type)
{
	struct scatterlist *__sg = sg;
	unsigned int sg_len = 0;
	struct request *req;

	if (mmc_packed_wr(cmd_type)) {
		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
		unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
		unsigned int len, remain, offset = 0;
		u8 *buf = (u8 *)packed->cmd_hdr;

		remain = hdr_sz;
		do {
			len = min(remain, max_seg_sz);
			sg_set_buf(__sg, buf + offset, len);
			offset += len;
			remain -= len;
			(__sg++)->page_link &= ~0x02;
			sg_len++;
		} while (remain);
	}

	list_for_each_entry(req, &packed->list, queuelist) {
		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
		__sg = sg + (sg_len - 1);
		(__sg++)->page_link &= ~0x02;
	}
	sg_mark_end(sg + (sg_len - 1));
	return sg_len;
}

484 485 486
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
487
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
488 489
{
	unsigned int sg_len;
490 491
	size_t buflen;
	struct scatterlist *sg;
492
	enum mmc_packed_type cmd_type;
493
	int i;
494

495 496 497 498 499 500 501 502 503
	cmd_type = mqrq->cmd_type;

	if (!mqrq->bounce_buf) {
		if (mmc_packed_cmd(cmd_type))
			return mmc_queue_packed_map_sg(mq, mqrq->packed,
						       mqrq->sg, cmd_type);
		else
			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
	}
504

505
	BUG_ON(!mqrq->bounce_sg);
506

507 508 509 510 511
	if (mmc_packed_cmd(cmd_type))
		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
						 mqrq->bounce_sg, cmd_type);
	else
		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
512

513
	mqrq->bounce_sg_len = sg_len;
514

515
	buflen = 0;
516
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
517
		buflen += sg->length;
518

519
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
520 521 522 523

	return 1;
}

524 525 526 527
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
528
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
529
{
530
	if (!mqrq->bounce_buf)
531 532
		return;

533
	if (rq_data_dir(mqrq->req) != WRITE)
534 535
		return;

536 537
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
538 539
}

540 541 542 543
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
544
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
545
{
546
	if (!mqrq->bounce_buf)
547 548
		return;

549
	if (rq_data_dir(mqrq->req) != READ)
550 551
		return;

552 553
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
554
}