queue.c 8.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Pierre Ossman 已提交
2
 *  linux/drivers/mmc/card/queue.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2003 Russell King, All Rights Reserved.
5
 *  Copyright 2006-2007 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
#include <linux/module.h>
#include <linux/blkdev.h>
14
#include <linux/freezer.h>
C
Christoph Hellwig 已提交
15
#include <linux/kthread.h>
J
Jens Axboe 已提交
16
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
17 18 19

#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
20
#include "queue.h"
L
Linus Torvalds 已提交
21

22 23
#define MMC_QUEUE_BOUNCESZ	65536

C
Christoph Hellwig 已提交
24
#define MMC_QUEUE_SUSPENDED	(1 << 0)
L
Linus Torvalds 已提交
25 26

/*
27
 * Prepare a MMC request. This just filters out odd stuff.
L
Linus Torvalds 已提交
28 29 30
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
31 32 33 34
	/*
	 * We only like normal block requests.
	 */
	if (!blk_fs_request(req) && !blk_pc_request(req)) {
L
Linus Torvalds 已提交
35
		blk_dump_rq_flags(req, "MMC bad request");
36
		return BLKPREP_KILL;
L
Linus Torvalds 已提交
37 38
	}

39
	req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
40

41
	return BLKPREP_OK;
L
Linus Torvalds 已提交
42 43 44 45 46 47 48
}

static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

49
	current->flags |= PF_MEMALLOC;
L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
58 59
			req = elv_next_request(q);
		mq->req = req;
L
Linus Torvalds 已提交
60 61 62
		spin_unlock_irq(q->queue_lock);

		if (!req) {
63 64
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
65
				break;
66
			}
L
Linus Torvalds 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
87
static void mmc_request(struct request_queue *q)
L
Linus Torvalds 已提交
88 89
{
	struct mmc_queue *mq = q->queuedata;
90 91 92 93 94 95 96 97 98 99 100 101 102
	struct request *req;
	int ret;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = end_that_request_chunk(req, 0,
					req->current_nr_sectors << 9);
			} while (ret);
		}
		return;
	}
L
Linus Torvalds 已提交
103 104

	if (!mq->req)
C
Christoph Hellwig 已提交
105
		wake_up_process(mq->thread);
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

122 123
	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;
L
Linus Torvalds 已提交
124 125 126 127 128 129 130 131 132

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

133 134 135 136
	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
137 138
		unsigned int bouncesz;

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

J
Jens Axboe 已提交
157
			mq->sg = kmalloc(sizeof(struct scatterlist),
158 159 160
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
161
				goto cleanup_queue;
162
			}
J
Jens Axboe 已提交
163
			sg_init_table(mq->sg, 1);
164

J
Jens Axboe 已提交
165
			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
166 167 168
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
169
				goto cleanup_queue;
170
			}
J
Jens Axboe 已提交
171
			sg_init_table(mq->bounce_sg, bouncesz / 512);
172 173 174 175 176 177 178 179 180 181 182
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

183
		mq->sg = kmalloc(sizeof(struct scatterlist) *
184 185 186 187 188
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
189
		sg_init_table(mq->sg, host->max_phys_segs);
L
Linus Torvalds 已提交
190 191 192 193
	}

	init_MUTEX(&mq->thread_sem);

C
Christoph Hellwig 已提交
194 195 196
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
197
		goto free_bounce_sg;
L
Linus Torvalds 已提交
198 199
	}

C
Christoph Hellwig 已提交
200
	return 0;
201 202 203 204
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
205 206 207
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
L
Linus Torvalds 已提交
208
	mq->sg = NULL;
209 210 211
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
L
Linus Torvalds 已提交
212 213 214 215 216 217
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
218
	struct request_queue *q = mq->queue;
219 220 221 222 223 224 225
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

226 227 228
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

229
	/* Then terminate our worker thread */
C
Christoph Hellwig 已提交
230
	kthread_stop(mq->thread);
L
Linus Torvalds 已提交
231

232 233 234 235
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

L
Linus Torvalds 已提交
236 237 238
	kfree(mq->sg);
	mq->sg = NULL;

239 240 241 242
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

L
Linus Torvalds 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
259
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
279
	struct request_queue *q = mq->queue;
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287 288 289 290 291
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
292

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
J
Jens Axboe 已提交
309
			dst_buf = sg_virt(dst);
310 311 312 313
			dst_size = dst->length;
		}

		if (src_size == 0) {
314
			src_buf = sg_virt(src);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}

unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

	/*
	 * Shortcut in the event we only get a single entry.
	 */
	if (sg_len == 1) {
		memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
		return 1;
	}

J
Jens Axboe 已提交
360
	sg_init_one(mq->sg, mq->bounce_buf, 0);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395

	while (sg_len) {
		mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
		sg_len--;
	}

	return 1;
}

void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
	if (!mq->bounce_buf)
		return;

	if (mq->bounce_sg_len == 1)
		return;
	if (rq_data_dir(mq->req) != WRITE)
		return;

	copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
}

void mmc_queue_bounce_post(struct mmc_queue *mq)
{
	if (!mq->bounce_buf)
		return;

	if (mq->bounce_sg_len == 1)
		return;
	if (rq_data_dir(mq->req) != READ)
		return;

	copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
}