queue.c 7.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
12
#include <linux/slab.h>
L
Linus Torvalds 已提交
13 14
#include <linux/module.h>
#include <linux/blkdev.h>
15
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
16
#include <linux/kthread.h>
J
Jens Axboe 已提交
17
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
18 19 20

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
21
#include "queue.h"
L
Linus Torvalds 已提交
22

23 24
#define MMC_QUEUE_BOUNCESZ	65536

C
Christoph Hellwig 已提交
25
#define MMC_QUEUE_SUSPENDED	(1 << 0)
L
Linus Torvalds 已提交
26 27

/*
28
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
29 30 31
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
32 33 34
	/*
	 * We only like normal block requests.
	 */
35
	if (req->cmd_type != REQ_TYPE_FS) {
L
Linus Torvalds 已提交
36
		blk_dump_rq_flags(req, "MMC bad request");
37
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
38 39
	}

40
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
41

42
	return BLKPREP_OK;
L
Linus Torvalds 已提交
43 44 45 46 47 48 49
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

50
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
51 52 53 54 55 56 57

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
58 59
		if (!blk_queue_plugged(q))
			req = blk_fetch_request(q);
60
		mq->req = req;
L
Linus Torvalds 已提交
61 62 63
		spin_unlock_irq(q->queue_lock);

		if (!req) {
64 65
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
66
				break;
67
			}
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
88
static void mmc_request(struct request_queue *q)
L
Linus Torvalds 已提交
89 90
{
	struct mmc_queue *mq = q->queuedata;
91 92 93
	struct request *req;

	if (!mq) {
A
Adrian Hunter 已提交
94 95
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
96
			__blk_end_request_all(req, -EIO);
A
Adrian Hunter 已提交
97
		}
98 99
		return;
	}
L
Linus Torvalds 已提交
100 101

	if (!mq->req)
C
Christoph Hellwig 已提交
102
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

119 120
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

130
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
131
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
132
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 134 135

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
136 137
		unsigned int bouncesz;

138 139 140 141 142 143
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
144 145 146 147 148 149 150 151 152 153 154
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}
155

156
		if (mq->bounce_buf) {
157
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
158
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
159
			blk_queue_max_segments(mq->queue, bouncesz / 512);
160 161
			blk_queue_max_segment_size(mq->queue, bouncesz);

J
Jens Axboe 已提交
162
			mq->sg = kmalloc(sizeof(struct scatterlist),
163 164 165
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
166
				goto cleanup_queue;
167
			}
J
Jens Axboe 已提交
168
			sg_init_table(mq->sg, 1);
169

J
Jens Axboe 已提交
170
			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
171 172 173
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
174
				goto cleanup_queue;
175
			}
J
Jens Axboe 已提交
176
			sg_init_table(mq->bounce_sg, bouncesz / 512);
177 178 179 180 181 182
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
183
		blk_queue_max_hw_sectors(mq->queue,
184
			min(host->max_blk_count, host->max_req_size / 512));
185
		blk_queue_max_segments(mq->queue, host->max_hw_segs);
186 187
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

188
		mq->sg = kmalloc(sizeof(struct scatterlist) *
189 190 191 192 193
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
194
		sg_init_table(mq->sg, host->max_phys_segs);
L
Linus Torvalds 已提交
195 196 197 198
	}

	init_MUTEX(&mq->thread_sem);

C
Christoph Hellwig 已提交
199 200 201
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
202
		goto free_bounce_sg;
L
Linus Torvalds 已提交
203 204
	}

C
Christoph Hellwig 已提交
205
	return 0;
206 207 208 209
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
210 211 212
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
L
Linus Torvalds 已提交
213
	mq->sg = NULL;
214 215 216
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
L
Linus Torvalds 已提交
217 218 219 220 221 222
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
223
	struct request_queue *q = mq->queue;
224 225
	unsigned long flags;

226 227 228
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

229
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
230
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
231

A
Adrian Hunter 已提交
232 233 234 235 236 237
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

238 239 240 241
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

L
Linus Torvalds 已提交
242 243 244
	kfree(mq->sg);
	mq->sg = NULL;

245 246 247 248
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

L
Linus Torvalds 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262
	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
263
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
283
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
284 285 286 287 288 289 290 291 292 293 294 295
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
296

297 298 299
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
300 301 302
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;
303 304 305
	size_t buflen;
	struct scatterlist *sg;
	int i;
306 307 308 309 310 311 312 313 314 315

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

316 317 318
	buflen = 0;
	for_each_sg(mq->bounce_sg, sg, sg_len, i)
		buflen += sg->length;
319

320
	sg_init_one(mq->sg, mq->bounce_buf, buflen);
321 322 323 324

	return 1;
}

325 326 327 328
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
329 330
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
331 332
	unsigned long flags;

333 334 335 336 337 338
	if (!mq->bounce_buf)
		return;

	if (rq_data_dir(mq->req) != WRITE)
		return;

339 340 341 342
	local_irq_save(flags);
	sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
		mq->bounce_buf, mq->sg[0].length);
	local_irq_restore(flags);
343 344
}

345 346 347 348
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
349 350
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
351 352
	unsigned long flags;

353 354 355 356 357 358
	if (!mq->bounce_buf)
		return;

	if (rq_data_dir(mq->req) != READ)
		return;

359 360 361 362
	local_irq_save(flags);
	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
		mq->bounce_buf, mq->sg[0].length);
	local_irq_restore(flags);
363 364
}