block.c 20.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * Block driver for media (i.e., flash cards)
 *
 * Copyright 2002 Hewlett-Packard Company
5
 * Copyright 2005-2008 Pierre Ossman
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Use consistent with the GNU GPL is permitted,
 * provided that this copyright notice is
 * preserved in its entirety in all copies and derived works.
 *
 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
 * FITNESS FOR ANY PARTICULAR PURPOSE.
 *
 * Many thanks to Alessandro Rubini and Jonathan Corbet!
 *
 * Author:  Andrew Christian
 *          28 May 2002
 */
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/init.h>

#include <linux/kernel.h>
#include <linux/fs.h>
26
#include <linux/slab.h>
L
Linus Torvalds 已提交
27 28 29 30
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
31
#include <linux/mutex.h>
32
#include <linux/scatterlist.h>
33
#include <linux/string_helpers.h>
L
Linus Torvalds 已提交
34 35

#include <linux/mmc/card.h>
P
Pierre Ossman 已提交
36
#include <linux/mmc/host.h>
P
Pierre Ossman 已提交
37 38
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
L
Linus Torvalds 已提交
39 40 41 42

#include <asm/system.h>
#include <asm/uaccess.h>

43
#include "queue.h"
L
Linus Torvalds 已提交
44

45
MODULE_ALIAS("mmc:block");
46 47 48 49 50
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."

A
Andrei Warkentin 已提交
51 52 53 54
#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) &&	\
    (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||	\
     ((card)->ext_csd.rel_sectors)))

55
static DEFINE_MUTEX(block_mutex);
56

L
Linus Torvalds 已提交
57
/*
58 59
 * The defaults come from config options but can be overriden by module
 * or bootarg options.
L
Linus Torvalds 已提交
60
 */
61
static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
62

63 64 65 66 67 68 69 70
/*
 * We've only got one major, so number of mmcblk devices is
 * limited to 256 / number of minors per device.
 */
static int max_devices;

/* 256 minors, so at most 256 separate devices */
static DECLARE_BITMAP(dev_use, 256);
L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80

/*
 * There is one mmc_blk_data per slot.
 */
struct mmc_blk_data {
	spinlock_t	lock;
	struct gendisk	*disk;
	struct mmc_queue queue;

	unsigned int	usage;
81
	unsigned int	read_only;
L
Linus Torvalds 已提交
82 83
};

84
static DEFINE_MUTEX(open_lock);
L
Linus Torvalds 已提交
85

86 87 88
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");

L
Linus Torvalds 已提交
89 90 91 92
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
	struct mmc_blk_data *md;

93
	mutex_lock(&open_lock);
L
Linus Torvalds 已提交
94 95 96 97 98
	md = disk->private_data;
	if (md && md->usage == 0)
		md = NULL;
	if (md)
		md->usage++;
99
	mutex_unlock(&open_lock);
L
Linus Torvalds 已提交
100 101 102 103 104 105

	return md;
}

static void mmc_blk_put(struct mmc_blk_data *md)
{
106
	mutex_lock(&open_lock);
L
Linus Torvalds 已提交
107 108
	md->usage--;
	if (md->usage == 0) {
109
		int devmaj = MAJOR(disk_devt(md->disk));
110
		int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
111 112

		if (!devmaj)
113
			devidx = md->disk->first_minor / perdev_minors;
114

A
Adrian Hunter 已提交
115 116
		blk_cleanup_queue(md->queue.queue);

117 118
		__clear_bit(devidx, dev_use);

L
Linus Torvalds 已提交
119 120 121
		put_disk(md->disk);
		kfree(md);
	}
122
	mutex_unlock(&open_lock);
L
Linus Torvalds 已提交
123 124
}

A
Al Viro 已提交
125
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
126
{
A
Al Viro 已提交
127
	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
L
Linus Torvalds 已提交
128 129
	int ret = -ENXIO;

130
	mutex_lock(&block_mutex);
L
Linus Torvalds 已提交
131 132
	if (md) {
		if (md->usage == 2)
A
Al Viro 已提交
133
			check_disk_change(bdev);
L
Linus Torvalds 已提交
134
		ret = 0;
P
Pierre Ossman 已提交
135

A
Al Viro 已提交
136
		if ((mode & FMODE_WRITE) && md->read_only) {
137
			mmc_blk_put(md);
P
Pierre Ossman 已提交
138
			ret = -EROFS;
139
		}
L
Linus Torvalds 已提交
140
	}
141
	mutex_unlock(&block_mutex);
L
Linus Torvalds 已提交
142 143 144 145

	return ret;
}

A
Al Viro 已提交
146
static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
147
{
A
Al Viro 已提交
148
	struct mmc_blk_data *md = disk->private_data;
L
Linus Torvalds 已提交
149

150
	mutex_lock(&block_mutex);
L
Linus Torvalds 已提交
151
	mmc_blk_put(md);
152
	mutex_unlock(&block_mutex);
L
Linus Torvalds 已提交
153 154 155 156
	return 0;
}

static int
157
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
L
Linus Torvalds 已提交
158
{
159 160 161 162
	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
	geo->heads = 4;
	geo->sectors = 16;
	return 0;
L
Linus Torvalds 已提交
163 164
}

165
static const struct block_device_operations mmc_bdops = {
A
Al Viro 已提交
166 167
	.open			= mmc_blk_open,
	.release		= mmc_blk_release,
168
	.getgeo			= mmc_blk_getgeo,
L
Linus Torvalds 已提交
169 170 171 172 173 174 175 176 177 178
	.owner			= THIS_MODULE,
};

struct mmc_blk_request {
	struct mmc_request	mrq;
	struct mmc_command	cmd;
	struct mmc_command	stop;
	struct mmc_data		data;
};

179 180 181
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
	int err;
B
Ben Dooks 已提交
182 183
	u32 result;
	__be32 *blocks;
184 185 186 187 188 189 190 191 192 193 194 195

	struct mmc_request mrq;
	struct mmc_command cmd;
	struct mmc_data data;
	unsigned int timeout_us;

	struct scatterlist sg;

	memset(&cmd, 0, sizeof(struct mmc_command));

	cmd.opcode = MMC_APP_CMD;
	cmd.arg = card->rca << 16;
D
David Brownell 已提交
196
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
197 198

	err = mmc_wait_for_cmd(card->host, &cmd, 0);
D
David Brownell 已提交
199 200 201
	if (err)
		return (u32)-1;
	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
202 203 204 205 206 207
		return (u32)-1;

	memset(&cmd, 0, sizeof(struct mmc_command));

	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
	cmd.arg = 0;
D
David Brownell 已提交
208
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

	memset(&data, 0, sizeof(struct mmc_data));

	data.timeout_ns = card->csd.tacc_ns * 100;
	data.timeout_clks = card->csd.tacc_clks * 100;

	timeout_us = data.timeout_ns / 1000;
	timeout_us += data.timeout_clks * 1000 /
		(card->host->ios.clock / 1000);

	if (timeout_us > 100000) {
		data.timeout_ns = 100000000;
		data.timeout_clks = 0;
	}

	data.blksz = 4;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	memset(&mrq, 0, sizeof(struct mmc_request));

	mrq.cmd = &cmd;
	mrq.data = &data;

B
Ben Dooks 已提交
235 236 237 238 239
	blocks = kmalloc(4, GFP_KERNEL);
	if (!blocks)
		return (u32)-1;

	sg_init_one(&sg, blocks, 4);
240 241 242

	mmc_wait_for_req(card->host, &mrq);

B
Ben Dooks 已提交
243 244 245
	result = ntohl(*blocks);
	kfree(blocks);

P
Pierre Ossman 已提交
246
	if (cmd.error || data.error)
B
Ben Dooks 已提交
247
		result = (u32)-1;
248

B
Ben Dooks 已提交
249
	return result;
250 251
}

252 253 254 255 256 257 258 259 260 261 262 263
static u32 get_card_status(struct mmc_card *card, struct request *req)
{
	struct mmc_command cmd;
	int err;

	memset(&cmd, 0, sizeof(struct mmc_command));
	cmd.opcode = MMC_SEND_STATUS;
	if (!mmc_host_is_spi(card->host))
		cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err)
264
		printk(KERN_ERR "%s: error %d sending status command",
265 266 267 268
		       req->rq_disk->disk_name, err);
	return cmd.resp[0];
}

A
Adrian Hunter 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	unsigned int from, nr, arg;
	int err = 0;

	if (!mmc_can_erase(card)) {
		err = -EOPNOTSUPP;
		goto out;
	}

	from = blk_rq_pos(req);
	nr = blk_rq_sectors(req);

	if (mmc_can_trim(card))
		arg = MMC_TRIM_ARG;
	else
		arg = MMC_ERASE_ARG;

	err = mmc_erase(card, from, nr, arg);
out:
	spin_lock_irq(&md->lock);
	__blk_end_request(req, err, blk_rq_bytes(req));
	spin_unlock_irq(&md->lock);

	return err ? 0 : 1;
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
				       struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	unsigned int from, nr, arg;
	int err = 0;

	if (!mmc_can_secure_erase_trim(card)) {
		err = -EOPNOTSUPP;
		goto out;
	}

	from = blk_rq_pos(req);
	nr = blk_rq_sectors(req);

	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
		arg = MMC_SECURE_TRIM1_ARG;
	else
		arg = MMC_SECURE_ERASE_ARG;

	err = mmc_erase(card, from, nr, arg);
	if (!err && arg == MMC_SECURE_TRIM1_ARG)
		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
out:
	spin_lock_irq(&md->lock);
	__blk_end_request(req, err, blk_rq_bytes(req));
	spin_unlock_irq(&md->lock);

	return err ? 0 : 1;
}

A
Andrei Warkentin 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;

	/*
	 * No-op, only service this because we need REQ_FUA for reliable
	 * writes.
	 */
	spin_lock_irq(&md->lock);
	__blk_end_request_all(req, 0);
	spin_unlock_irq(&md->lock);

	return 1;
}

/*
 * Reformat current write as a reliable write, supporting
 * both legacy and the enhanced reliable write MMC cards.
 * In each transfer we'll handle only as much as a single
 * reliable write can handle, thus finish the request in
 * partial completions.
 */
static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
				   struct mmc_card *card,
				   struct request *req)
{
	int err;
	struct mmc_command set_count;

	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
		/* Legacy mode imposes restrictions on transfers. */
		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
			brq->data.blocks = 1;

		if (brq->data.blocks > card->ext_csd.rel_sectors)
			brq->data.blocks = card->ext_csd.rel_sectors;
		else if (brq->data.blocks < card->ext_csd.rel_sectors)
			brq->data.blocks = 1;
	}

	memset(&set_count, 0, sizeof(struct mmc_command));
	set_count.opcode = MMC_SET_BLOCK_COUNT;
	set_count.arg = brq->data.blocks | (1 << 31);
	set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
	err = mmc_wait_for_cmd(card->host, &set_count, 0);
	if (err)
		printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
		       req->rq_disk->disk_name, err);
	return err;
}

A
Adrian Hunter 已提交
381
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
L
Linus Torvalds 已提交
382 383 384
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
385
	struct mmc_blk_request brq;
386
	int ret = 1, disable_multi = 0;
L
Linus Torvalds 已提交
387

A
Andrei Warkentin 已提交
388 389 390 391 392 393 394 395 396
	/*
	 * Reliable writes are used to implement Forced Unit Access and
	 * REQ_META accesses, and are supported only on MMCs.
	 */
	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
			  (req->cmd_flags & REQ_META)) &&
		(rq_data_dir(req) == WRITE) &&
		REL_WRITES_SUPPORTED(card);

L
Linus Torvalds 已提交
397 398
	do {
		struct mmc_command cmd;
399
		u32 readcmd, writecmd, status = 0;
L
Linus Torvalds 已提交
400 401 402 403 404

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

405
		brq.cmd.arg = blk_rq_pos(req);
406 407
		if (!mmc_card_blockaddr(card))
			brq.cmd.arg <<= 9;
D
David Brownell 已提交
408
		brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
409
		brq.data.blksz = 512;
L
Linus Torvalds 已提交
410 411
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
D
David Brownell 已提交
412
		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
413
		brq.data.blocks = blk_rq_sectors(req);
L
Linus Torvalds 已提交
414

415 416 417 418 419 420 421 422
		/*
		 * The block layer doesn't support all sector count
		 * restrictions, so we need to be prepared for too big
		 * requests.
		 */
		if (brq.data.blocks > card->host->max_blk_count)
			brq.data.blocks = card->host->max_blk_count;

423 424 425 426 427 428 429 430
		/*
		 * After a read error, we redo the request one sector at a time
		 * in order to accurately determine which sectors can be read
		 * successfully.
		 */
		if (disable_multi && brq.data.blocks > 1)
			brq.data.blocks = 1;

A
Andrei Warkentin 已提交
431
		if (brq.data.blocks > 1 || do_rel_wr) {
D
David Brownell 已提交
432
			/* SPI multiblock writes terminate using a special
A
Andrei Warkentin 已提交
433 434 435
			 * token, not a STOP_TRANSMISSION request. Reliable
			 * writes use SET_BLOCK_COUNT and do not use a
			 * STOP_TRANSMISSION request either.
D
David Brownell 已提交
436
			 */
A
Andrei Warkentin 已提交
437 438
			if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
			    rq_data_dir(req) == READ)
D
David Brownell 已提交
439
				brq.mrq.stop = &brq.stop;
440 441
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
R
Russell King 已提交
442 443
		} else {
			brq.mrq.stop = NULL;
444 445 446 447 448 449 450 451 452
			readcmd = MMC_READ_SINGLE_BLOCK;
			writecmd = MMC_WRITE_BLOCK;
		}
		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = readcmd;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = writecmd;
			brq.data.flags |= MMC_DATA_WRITE;
R
Russell King 已提交
453
		}
L
Linus Torvalds 已提交
454

A
Andrei Warkentin 已提交
455 456 457
		if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
			goto cmd_err;

458 459
		mmc_set_data_timeout(&brq.data, card);

L
Linus Torvalds 已提交
460
		brq.data.sg = mq->sg;
461 462
		brq.data.sg_len = mmc_queue_map_sg(mq);

463 464 465 466
		/*
		 * Adjust the sg list so it is the same size as the
		 * request.
		 */
467
		if (brq.data.blocks != blk_rq_sectors(req)) {
468 469 470 471 472 473 474 475 476 477 478 479 480 481
			int i, data_size = brq.data.blocks << 9;
			struct scatterlist *sg;

			for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
				data_size -= sg->length;
				if (data_size <= 0) {
					sg->length += data_size;
					i++;
					break;
				}
			}
			brq.data.sg_len = i;
		}

482
		mmc_queue_bounce_pre(mq);
L
Linus Torvalds 已提交
483 484

		mmc_wait_for_req(card->host, &brq.mrq);
485 486 487

		mmc_queue_bounce_post(mq);

488 489 490 491 492
		/*
		 * Check for errors here, but don't jump to cmd_err
		 * until later as we need to wait for the card to leave
		 * programming mode even when things go wrong.
		 */
493 494 495 496 497 498 499 500
		if (brq.cmd.error || brq.data.error || brq.stop.error) {
			if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
				/* Redo read one sector at a time */
				printk(KERN_WARNING "%s: retrying using single "
				       "block read\n", req->rq_disk->disk_name);
				disable_multi = 1;
				continue;
			}
501
			status = get_card_status(card, req);
502
		}
503

L
Linus Torvalds 已提交
504
		if (brq.cmd.error) {
505 506 507 508
			printk(KERN_ERR "%s: error %d sending read/write "
			       "command, response %#x, card status %#x\n",
			       req->rq_disk->disk_name, brq.cmd.error,
			       brq.cmd.resp[0], status);
L
Linus Torvalds 已提交
509 510 511
		}

		if (brq.data.error) {
512 513 514 515 516 517
			if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
				/* 'Stop' response contains card status */
				status = brq.mrq.stop->resp[0];
			printk(KERN_ERR "%s: error %d transferring data,"
			       " sector %u, nr %u, card status %#x\n",
			       req->rq_disk->disk_name, brq.data.error,
518 519
			       (unsigned)blk_rq_pos(req),
			       (unsigned)blk_rq_sectors(req), status);
L
Linus Torvalds 已提交
520 521 522
		}

		if (brq.stop.error) {
523 524 525 526
			printk(KERN_ERR "%s: error %d sending stop command, "
			       "response %#x, card status %#x\n",
			       req->rq_disk->disk_name, brq.stop.error,
			       brq.stop.resp[0], status);
L
Linus Torvalds 已提交
527 528
		}

D
David Brownell 已提交
529
		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
530 531 532 533 534 535 536 537 538 539 540 541
			do {
				int err;

				cmd.opcode = MMC_SEND_STATUS;
				cmd.arg = card->rca << 16;
				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
				err = mmc_wait_for_cmd(card->host, &cmd, 5);
				if (err) {
					printk(KERN_ERR "%s: error %d requesting status\n",
					       req->rq_disk->disk_name, err);
					goto cmd_err;
				}
542 543 544 545 546 547 548
				/*
				 * Some cards mishandle the status bits,
				 * so make sure to check both the busy
				 * indication and the card state.
				 */
			} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
				(R1_CURRENT_STATE(cmd.resp[0]) == 7));
L
Linus Torvalds 已提交
549 550

#if 0
551 552 553 554 555
			if (cmd.resp[0] & ~0x00000900)
				printk(KERN_ERR "%s: status = %08x\n",
				       req->rq_disk->disk_name, cmd.resp[0]);
			if (mmc_decode_status(cmd.resp))
				goto cmd_err;
L
Linus Torvalds 已提交
556
#endif
557
		}
L
Linus Torvalds 已提交
558

559 560 561 562 563 564 565 566 567 568 569 570
		if (brq.cmd.error || brq.stop.error || brq.data.error) {
			if (rq_data_dir(req) == READ) {
				/*
				 * After an error, we redo I/O one sector at a
				 * time, so we only reach here after trying to
				 * read a single sector.
				 */
				spin_lock_irq(&md->lock);
				ret = __blk_end_request(req, -EIO, brq.data.blksz);
				spin_unlock_irq(&md->lock);
				continue;
			}
571
			goto cmd_err;
572
		}
573

L
Linus Torvalds 已提交
574 575 576 577
		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
578
		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
L
Linus Torvalds 已提交
579 580 581 582 583 584
		spin_unlock_irq(&md->lock);
	} while (ret);

	return 1;

 cmd_err:
585 586 587 588 589
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
 	 *
	 * If the card is not SD, we can still ok written sectors
590 591
	 * as reported by the controller (which might be less than
	 * the real number of written sectors, but never more).
L
Linus Torvalds 已提交
592
	 */
593 594
	if (mmc_card_sd(card)) {
		u32 blocks;
595

596 597
		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
598
			spin_lock_irq(&md->lock);
599
			ret = __blk_end_request(req, 0, blocks << 9);
600 601
			spin_unlock_irq(&md->lock);
		}
602 603 604 605
	} else {
		spin_lock_irq(&md->lock);
		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
		spin_unlock_irq(&md->lock);
606 607
	}

L
Linus Torvalds 已提交
608
	spin_lock_irq(&md->lock);
609 610
	while (ret)
		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
L
Linus Torvalds 已提交
611 612 613 614 615
	spin_unlock_irq(&md->lock);

	return 0;
}

A
Adrian Hunter 已提交
616 617
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
618 619 620 621 622 623
	int ret;
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;

	mmc_claim_host(card->host);

624 625
	if (req->cmd_flags & REQ_DISCARD) {
		if (req->cmd_flags & REQ_SECURE)
626
			ret = mmc_blk_issue_secdiscard_rq(mq, req);
627
		else
628
			ret = mmc_blk_issue_discard_rq(mq, req);
A
Andrei Warkentin 已提交
629
	} else if (req->cmd_flags & REQ_FLUSH) {
630
		ret = mmc_blk_issue_flush(mq, req);
631
	} else {
632
		ret = mmc_blk_issue_rw_rq(mq, req);
633
	}
634 635 636

	mmc_release_host(card->host);
	return ret;
A
Adrian Hunter 已提交
637
}
L
Linus Torvalds 已提交
638

639 640 641 642 643 644
static inline int mmc_blk_readonly(struct mmc_card *card)
{
	return mmc_card_readonly(card) ||
	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
}

L
Linus Torvalds 已提交
645 646 647 648 649
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
	struct mmc_blk_data *md;
	int devidx, ret;

650 651
	devidx = find_first_zero_bit(dev_use, max_devices);
	if (devidx >= max_devices)
L
Linus Torvalds 已提交
652 653 654
		return ERR_PTR(-ENOSPC);
	__set_bit(devidx, dev_use);

655
	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
656 657 658 659
	if (!md) {
		ret = -ENOMEM;
		goto out;
	}
L
Linus Torvalds 已提交
660 661


662 663 664 665 666
	/*
	 * Set the read-only status based on the supported commands
	 * and the write protect switch.
	 */
	md->read_only = mmc_blk_readonly(card);
L
Linus Torvalds 已提交
667

668
	md->disk = alloc_disk(perdev_minors);
669 670 671 672
	if (md->disk == NULL) {
		ret = -ENOMEM;
		goto err_kfree;
	}
L
Linus Torvalds 已提交
673

674 675
	spin_lock_init(&md->lock);
	md->usage = 1;
L
Linus Torvalds 已提交
676

677 678 679
	ret = mmc_init_queue(&md->queue, card, &md->lock);
	if (ret)
		goto err_putdisk;
L
Linus Torvalds 已提交
680

681 682
	md->queue.issue_fn = mmc_blk_issue_rq;
	md->queue.data = md;
683

684
	md->disk->major	= MMC_BLOCK_MAJOR;
685
	md->disk->first_minor = devidx * perdev_minors;
686 687 688 689
	md->disk->fops = &mmc_bdops;
	md->disk->private_data = md;
	md->disk->queue = md->queue.queue;
	md->disk->driverfs_dev = &card->dev;
690
	set_disk_ro(md->disk, md->read_only);
A
Andrei Warkentin 已提交
691 692
	if (REL_WRITES_SUPPORTED(card))
		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
693 694 695 696 697 698 699 700 701 702 703 704 705

	/*
	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
	 *
	 * - be set for removable media with permanent block devices
	 * - be unset for removable block devices with permanent media
	 *
	 * Since MMC block devices clearly fall under the second
	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
	 * should use the block device creation/destruction hotplug
	 * messages to tell when the card is present.
	 */

J
JiebingLi 已提交
706 707
	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
		"mmcblk%d", devidx);
708

709
	blk_queue_logical_block_size(md->queue.queue, 512);
710

P
Pierre Ossman 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724
	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
		/*
		 * The EXT_CSD sector count is in number or 512 byte
		 * sectors.
		 */
		set_capacity(md->disk, card->ext_csd.sectors);
	} else {
		/*
		 * The CSD capacity field is in units of read_blkbits.
		 * set_capacity takes units of 512 bytes.
		 */
		set_capacity(md->disk,
			card->csd.capacity << (card->csd.read_blkbits - 9));
	}
L
Linus Torvalds 已提交
725
	return md;
726 727 728 729 730 731 732

 err_putdisk:
	put_disk(md->disk);
 err_kfree:
	kfree(md);
 out:
	return ERR_PTR(ret);
L
Linus Torvalds 已提交
733 734 735 736 737 738 739
}

static int
mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
{
	int err;

P
Pierre Ossman 已提交
740
	mmc_claim_host(card->host);
741
	err = mmc_set_blocklen(card, 512);
P
Pierre Ossman 已提交
742
	mmc_release_host(card->host);
L
Linus Torvalds 已提交
743 744

	if (err) {
745 746
		printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
			md->disk->disk_name, err);
L
Linus Torvalds 已提交
747 748 749 750 751 752 753 754 755 756
		return -EINVAL;
	}

	return 0;
}

static int mmc_blk_probe(struct mmc_card *card)
{
	struct mmc_blk_data *md;
	int err;
757 758
	char cap_str[10];

759 760 761 762
	/*
	 * Check that the card supports the command class(es) we need.
	 */
	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
L
Linus Torvalds 已提交
763 764 765 766 767 768 769 770 771 772
		return -ENODEV;

	md = mmc_blk_alloc(card);
	if (IS_ERR(md))
		return PTR_ERR(md);

	err = mmc_blk_set_blksize(md, card);
	if (err)
		goto out;

773
	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
774 775
			cap_str, sizeof(cap_str));
	printk(KERN_INFO "%s: %s %s %s %s\n",
L
Linus Torvalds 已提交
776
		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
777
		cap_str, md->read_only ? "(ro)" : "");
L
Linus Torvalds 已提交
778 779 780 781 782 783

	mmc_set_drvdata(card, md);
	add_disk(md->disk);
	return 0;

 out:
784
	mmc_cleanup_queue(&md->queue);
L
Linus Torvalds 已提交
785 786 787 788 789 790 791 792 793 794
	mmc_blk_put(md);

	return err;
}

static void mmc_blk_remove(struct mmc_card *card)
{
	struct mmc_blk_data *md = mmc_get_drvdata(card);

	if (md) {
795
		/* Stop new requests from getting into the queue */
L
Linus Torvalds 已提交
796 797
		del_gendisk(md->disk);

798 799
		/* Then flush out any already in there */
		mmc_cleanup_queue(&md->queue);
L
Linus Torvalds 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843

		mmc_blk_put(md);
	}
	mmc_set_drvdata(card, NULL);
}

#ifdef CONFIG_PM
static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
{
	struct mmc_blk_data *md = mmc_get_drvdata(card);

	if (md) {
		mmc_queue_suspend(&md->queue);
	}
	return 0;
}

static int mmc_blk_resume(struct mmc_card *card)
{
	struct mmc_blk_data *md = mmc_get_drvdata(card);

	if (md) {
		mmc_blk_set_blksize(md, card);
		mmc_queue_resume(&md->queue);
	}
	return 0;
}
#else
#define	mmc_blk_suspend	NULL
#define mmc_blk_resume	NULL
#endif

static struct mmc_driver mmc_driver = {
	.drv		= {
		.name	= "mmcblk",
	},
	.probe		= mmc_blk_probe,
	.remove		= mmc_blk_remove,
	.suspend	= mmc_blk_suspend,
	.resume		= mmc_blk_resume,
};

static int __init mmc_blk_init(void)
{
844
	int res;
L
Linus Torvalds 已提交
845

846 847 848 849 850
	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
		pr_info("mmcblk: using %d minors per device\n", perdev_minors);

	max_devices = 256 / perdev_minors;

851 852
	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
	if (res)
L
Linus Torvalds 已提交
853 854
		goto out;

855 856 857
	res = mmc_register_driver(&mmc_driver);
	if (res)
		goto out2;
L
Linus Torvalds 已提交
858

859 860 861
	return 0;
 out2:
	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
L
Linus Torvalds 已提交
862 863 864 865 866 867 868
 out:
	return res;
}

static void __exit mmc_blk_exit(void)
{
	mmc_unregister_driver(&mmc_driver);
869
	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
L
Linus Torvalds 已提交
870 871 872 873 874 875 876 877
}

module_init(mmc_blk_init);
module_exit(mmc_blk_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");