queue.c 8.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
#include <linux/module.h>
#include <linux/blkdev.h>
14
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
15
#include <linux/kthread.h>
J
Jens Axboe 已提交
16
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
17 18 19

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
20
#include "queue.h"
L
Linus Torvalds 已提交
21

22 23
#define MMC_QUEUE_BOUNCESZ	65536

C
Christoph Hellwig 已提交
24
#define MMC_QUEUE_SUSPENDED	(1 << 0)
L
Linus Torvalds 已提交
25 26

/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32 33
	/*
	 * We only like normal block requests.
	 */
P
Pierre Ossman 已提交
34
	if (!blk_fs_request(req)) {
L
Linus Torvalds 已提交
35
		blk_dump_rq_flags(req, "MMC bad request");
36
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
37 38
	}

39
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
40

41
	return BLKPREP_OK;
L
Linus Torvalds 已提交
42 43 44 45 46 47 48
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

49
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
58 59
			req = elv_next_request(q);
		mq->req = req;
L
Linus Torvalds 已提交
60 61 62
		spin_unlock_irq(q->queue_lock);

		if (!req) {
63 64
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
65
				break;
66
			}
L
Linus Torvalds 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
87
static void mmc_request(struct request_queue *q)
L
Linus Torvalds 已提交
88 89
{
	struct mmc_queue *mq = q->queuedata;
90 91 92 93 94 95 96
	struct request *req;
	int ret;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
97 98
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
99 100 101 102
			} while (ret);
		}
		return;
	}
L
Linus Torvalds 已提交
103 104

	if (!mq->req)
C
Christoph Hellwig 已提交
105
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

122 123
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;
L
Linus Torvalds 已提交
124 125 126 127 128 129 130 131 132

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

133
	blk_queue_prep_rq(mq->queue, mmc_prep_request);
134
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
135 136 137

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
138 139
		unsigned int bouncesz;

140 141 142 143 144 145
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
146 147 148 149 150 151 152 153 154 155 156
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}
157

158
		if (mq->bounce_buf) {
159
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
160 161 162 163 164
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

J
Jens Axboe 已提交
165
			mq->sg = kmalloc(sizeof(struct scatterlist),
166 167 168
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
169
				goto cleanup_queue;
170
			}
J
Jens Axboe 已提交
171
			sg_init_table(mq->sg, 1);
172

J
Jens Axboe 已提交
173
			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
174 175 176
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
177
				goto cleanup_queue;
178
			}
J
Jens Axboe 已提交
179
			sg_init_table(mq->bounce_sg, bouncesz / 512);
180 181 182 183 184 185
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
186 187
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
188 189 190 191
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

192
		mq->sg = kmalloc(sizeof(struct scatterlist) *
193 194 195 196 197
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
198
		sg_init_table(mq->sg, host->max_phys_segs);
L
Linus Torvalds 已提交
199 200 201 202
	}

	init_MUTEX(&mq->thread_sem);

C
Christoph Hellwig 已提交
203 204 205
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
206
		goto free_bounce_sg;
L
Linus Torvalds 已提交
207 208
	}

C
Christoph Hellwig 已提交
209
	return 0;
210 211 212 213
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
214 215 216
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
L
Linus Torvalds 已提交
217
	mq->sg = NULL;
218 219 220
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
L
Linus Torvalds 已提交
221 222 223 224 225 226
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
227
	struct request_queue *q = mq->queue;
228 229 230 231 232 233 234
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

235 236 237
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

238
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
239
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
240

241 242 243 244
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

L
Linus Torvalds 已提交
245 246 247
	kfree(mq->sg);
	mq->sg = NULL;

248 249 250 251
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

L
Linus Torvalds 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
268
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
288
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
289 290 291 292 293 294 295 296 297 298 299 300
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
301

302 303 304
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
305 306 307
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;
308 309 310
	size_t buflen;
	struct scatterlist *sg;
	int i;
311 312 313 314 315 316 317 318 319 320

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

321 322 323
	buflen = 0;
	for_each_sg(mq->bounce_sg, sg, sg_len, i)
		buflen += sg->length;
324

325
	sg_init_one(mq->sg, mq->bounce_buf, buflen);
326 327 328 329

	return 1;
}

330 331 332 333
/*
 * If writing, bounce the data to the buffer before the request
 * is sent to the host driver
 */
334 335
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
336 337
	unsigned long flags;

338 339 340 341 342 343
	if (!mq->bounce_buf)
		return;

	if (rq_data_dir(mq->req) != WRITE)
		return;

344 345 346 347
	local_irq_save(flags);
	sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
		mq->bounce_buf, mq->sg[0].length);
	local_irq_restore(flags);
348 349
}

350 351 352 353
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
354 355
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
356 357
	unsigned long flags;

358 359 360 361 362 363
	if (!mq->bounce_buf)
		return;

	if (rq_data_dir(mq->req) != READ)
		return;

364 365 366 367
	local_irq_save(flags);
	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
		mq->bounce_buf, mq->sg[0].length);
	local_irq_restore(flags);
368 369
}