queue.c 10.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
18 19 20

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
21
#include "queue.h"
L
Linus Torvalds 已提交
22

23 24
#define MMC_QUEUE_BOUNCESZ	65536

C
Christoph Hellwig 已提交
25
#define MMC_QUEUE_SUSPENDED	(1 << 0)
L
Linus Torvalds 已提交
26 27

/*
28
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
29 30 31
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
32 33
	struct mmc_queue *mq = q->queuedata;

34
	/*
A
Adrian Hunter 已提交
35
	 * We only like normal block requests and discards.
36
	 */
A
Adrian Hunter 已提交
37
	if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
L
Linus Torvalds 已提交
38
		blk_dump_rq_flags(req, "MMC bad request");
39
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
40 41
	}

42 43 44
	if (mq && mmc_card_removed(mq->card))
		return BLKPREP_KILL;

45
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
46

47
	return BLKPREP_OK;
L
Linus Torvalds 已提交
48 49 50 51 52 53 54
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

55
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
56 57 58 59

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
60
		struct mmc_queue_req *tmp;
L
Linus Torvalds 已提交
61 62 63

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
J
Jens Axboe 已提交
64
		req = blk_fetch_request(q);
65
		mq->mqrq_cur->req = req;
L
Linus Torvalds 已提交
66 67
		spin_unlock_irq(q->queue_lock);

68 69 70 71
		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
			mq->issue_fn(mq, req);
		} else {
72 73
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
74
				break;
75
			}
L
Linus Torvalds 已提交
76 77 78 79 80
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}

81 82 83 84 85 86
		/* Current request becomes previous request and vice versa. */
		mq->mqrq_prev->brq.mrq.data = NULL;
		mq->mqrq_prev->req = NULL;
		tmp = mq->mqrq_prev;
		mq->mqrq_prev = mq->mqrq_cur;
		mq->mqrq_cur = tmp;
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94 95 96 97 98
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
99
static void mmc_request_fn(struct request_queue *q)
L
Linus Torvalds 已提交
100 101
{
	struct mmc_queue *mq = q->queuedata;
102 103 104
	struct request *req;

	if (!mq) {
A
Adrian Hunter 已提交
105 106
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
107
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
108
		}
109 110
		return;
	}
L
Linus Torvalds 已提交
111

112
	if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
C
Christoph Hellwig 已提交
113
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
114 115
}

116
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
117 118 119 120 121 122 123 124 125 126 127 128 129 130
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

131 132 133 134 135 136 137 138 139 140 141
static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.max_discard_sectors = max_discard;
142
	if (card->erased_byte == 0 && !mmc_can_discard(card))
143 144 145 146 147
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
148
	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
149 150 151
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}

L
Linus Torvalds 已提交
152 153 154 155 156
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
157
 * @subname: partition subname
L
Linus Torvalds 已提交
158 159 160
 *
 * Initialise a MMC card request queue.
 */
161 162
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
L
Linus Torvalds 已提交
163 164 165 166
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
167
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
168
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
L
Linus Torvalds 已提交
169

170 171
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;
L
Linus Torvalds 已提交
172 173

	mq->card = card;
174
	mq->queue = blk_init_queue(mmc_request_fn, lock);
L
Linus Torvalds 已提交
175 176 177
	if (!mq->queue)
		return -ENOMEM;

178
	mq->mqrq_cur = mqrq_cur;
179
	mq->mqrq_prev = mqrq_prev;
L
Linus Torvalds 已提交
180 181
	mq->queue->queuedata = mq;

182
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
183
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
184 185
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);
186 187

#ifdef CONFIG_MMC_BLOCK_BOUNCE
188
	if (host->max_segs == 1) {
189 190
		unsigned int bouncesz;

191 192 193 194 195 196
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
197 198 199 200
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
201 202
			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_cur->bounce_buf) {
203
				pr_warning("%s: unable to "
204
					"allocate bounce cur buffer\n",
205 206
					mmc_card_name(card));
			}
207 208
			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mqrq_prev->bounce_buf) {
209
				pr_warning("%s: unable to "
210 211 212 213 214
					"allocate bounce prev buffer\n",
					mmc_card_name(card));
				kfree(mqrq_cur->bounce_buf);
				mqrq_cur->bounce_buf = NULL;
			}
215
		}
216

217
		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
218
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
219
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
220
			blk_queue_max_segments(mq->queue, bouncesz / 512);
221 222
			blk_queue_max_segment_size(mq->queue, bouncesz);

223 224
			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
225
				goto cleanup_queue;
226

227 228 229
			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
230
				goto cleanup_queue;
231

232 233 234 235 236 237 238 239
			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
240 241 242 243
		}
	}
#endif

244
	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
245
		blk_queue_bounce_limit(mq->queue, limit);
246
		blk_queue_max_hw_sectors(mq->queue,
247
			min(host->max_blk_count, host->max_req_size / 512));
248
		blk_queue_max_segments(mq->queue, host->max_segs);
249 250
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

251 252
		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
253
			goto cleanup_queue;
254

255 256 257 258

		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
L
Linus Torvalds 已提交
259 260
	}

261
	sema_init(&mq->thread_sem, 1);
L
Linus Torvalds 已提交
262

263 264
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");
265

C
Christoph Hellwig 已提交
266 267
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
268
		goto free_bounce_sg;
L
Linus Torvalds 已提交
269 270
	}

C
Christoph Hellwig 已提交
271
	return 0;
272
 free_bounce_sg:
273 274
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
275 276
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;
277

278
 cleanup_queue:
279 280 281 282 283
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

284 285 286 287 288
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
289 290 291 292 293 294
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
295
	struct request_queue *q = mq->queue;
296
	unsigned long flags;
297
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
298
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
299

300 301 302
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

303
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
304
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
305

A
Adrian Hunter 已提交
306 307 308 309 310 311
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

312 313
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
314

315 316
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
L
Linus Torvalds 已提交
317

318 319
	kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;
320

321 322 323 324 325 326 327 328 329
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

L
Linus Torvalds 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
344
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
364
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
365 366 367 368 369 370 371 372 373 374 375 376
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
377

378 379 380
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
381
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
382 383
{
	unsigned int sg_len;
384 385 386
	size_t buflen;
	struct scatterlist *sg;
	int i;
387

388 389
	if (!mqrq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
390

391
	BUG_ON(!mqrq->bounce_sg);
392

393
	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
394

395
	mqrq->bounce_sg_len = sg_len;
396

397
	buflen = 0;
398
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
399
		buflen += sg->length;
400

401
	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
402 403 404 405

	return 1;
}

406 407 408 409
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
410
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
411
{
412
	if (!mqrq->bounce_buf)
413 414
		return;

415
	if (rq_data_dir(mqrq->req) != WRITE)
416 417
		return;

418 419
	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
420 421
}

422 423 424 425
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
426
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
427
{
428
	if (!mqrq->bounce_buf)
429 430
		return;

431
	if (rq_data_dir(mqrq->req) != READ)
432 433
		return;

434 435
	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
436
}