io-cmd-bdev.c 5.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * NVMe I/O command implementation.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
#include <linux/module.h>
#include "nvmet.h"

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
	int ret;

	ns->bdev = blkdev_get_by_path(ns->device_path,
			FMODE_READ | FMODE_WRITE, NULL);
	if (IS_ERR(ns->bdev)) {
		ret = PTR_ERR(ns->bdev);
		if (ret != -ENOTBLK) {
			pr_err("failed to open block device %s: (%ld)\n",
					ns->device_path, PTR_ERR(ns->bdev));
		}
		ns->bdev = NULL;
		return ret;
	}
	ns->size = i_size_read(ns->bdev->bd_inode);
	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
	return 0;
}

void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
	if (ns->bdev) {
		blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
		ns->bdev = NULL;
	}
}

47 48 49 50 51
static void nvmet_bio_done(struct bio *bio)
{
	struct nvmet_req *req = bio->bi_private;

	nvmet_req_complete(req,
52
		bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
53

54
	if (bio != &req->b.inline_bio)
55 56 57
		bio_put(bio);
}

58
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
59 60
{
	int sg_cnt = req->sg_cnt;
61
	struct bio *bio = &req->b.inline_bio;
62 63 64 65 66 67 68 69 70 71 72 73
	struct scatterlist *sg;
	sector_t sector;
	blk_qc_t cookie;
	int op, op_flags = 0, i;

	if (!req->sg_cnt) {
		nvmet_req_complete(req, 0);
		return;
	}

	if (req->cmd->rw.opcode == nvme_cmd_write) {
		op = REQ_OP_WRITE;
74
		op_flags = REQ_SYNC | REQ_IDLE;
75 76 77 78 79 80 81 82 83
		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
			op_flags |= REQ_FUA;
	} else {
		op = REQ_OP_READ;
	}

	sector = le64_to_cpu(req->cmd->rw.slba);
	sector <<= (req->ns->blksize_shift - 9);

84
	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
85
	bio_set_dev(bio, req->ns->bdev);
86 87 88 89 90 91 92 93 94 95 96
	bio->bi_iter.bi_sector = sector;
	bio->bi_private = req;
	bio->bi_end_io = nvmet_bio_done;
	bio_set_op_attrs(bio, op, op_flags);

	for_each_sg(req->sg, sg, req->sg_cnt, i) {
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
			struct bio *prev = bio;

			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
97
			bio_set_dev(bio, req->ns->bdev);
98 99 100 101
			bio->bi_iter.bi_sector = sector;
			bio_set_op_attrs(bio, op, op_flags);

			bio_chain(bio, prev);
102
			submit_bio(prev);
103 104 105 106 107 108 109 110
		}

		sector += sg->length >> 9;
		sg_cnt--;
	}

	cookie = submit_bio(bio);

111
	blk_poll(bdev_get_queue(req->ns->bdev), cookie, true);
112 113
}

114
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
115
{
116
	struct bio *bio = &req->b.inline_bio;
117

118
	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
119
	bio_set_dev(bio, req->ns->bdev);
120 121
	bio->bi_private = req;
	bio->bi_end_io = nvmet_bio_done;
122
	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
123 124 125 126

	submit_bio(bio);
}

127 128 129 130 131 132 133
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
		return NVME_SC_INTERNAL | NVME_SC_DNR;
	return 0;
}

134
static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
135 136
		struct nvme_dsm_range *range, struct bio **bio)
{
137 138 139
	int ret;

	ret = __blkdev_issue_discard(ns->bdev,
140 141
			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
142 143
			GFP_KERNEL, 0, bio);
	if (ret && ret != -EOPNOTSUPP)
144 145 146 147
		return NVME_SC_INTERNAL | NVME_SC_DNR;
	return 0;
}

148
static void nvmet_bdev_execute_discard(struct nvmet_req *req)
149 150 151 152 153 154 155 156 157 158 159 160
{
	struct nvme_dsm_range range;
	struct bio *bio = NULL;
	int i;
	u16 status;

	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
				sizeof(range));
		if (status)
			break;

161
		status = nvmet_bdev_discard_range(req->ns, &range, &bio);
162 163 164 165 166 167 168 169
		if (status)
			break;
	}

	if (bio) {
		bio->bi_private = req;
		bio->bi_end_io = nvmet_bio_done;
		if (status) {
170
			bio->bi_status = BLK_STS_IOERR;
171 172 173 174 175 176 177 178 179
			bio_endio(bio);
		} else {
			submit_bio(bio);
		}
	} else {
		nvmet_req_complete(req, status);
	}
}

180
static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
181 182 183
{
	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
	case NVME_DSMGMT_AD:
184
		nvmet_bdev_execute_discard(req);
185 186 187 188 189 190 191 192 193 194
		return;
	case NVME_DSMGMT_IDR:
	case NVME_DSMGMT_IDW:
	default:
		/* Not supported yet */
		nvmet_req_complete(req, 0);
		return;
	}
}

195
static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
196 197 198 199 200 201 202 203 204
{
	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
	struct bio *bio = NULL;
	u16 status = NVME_SC_SUCCESS;
	sector_t sector;
	sector_t nr_sector;

	sector = le64_to_cpu(write_zeroes->slba) <<
		(req->ns->blksize_shift - 9);
205 206
	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
		(req->ns->blksize_shift - 9));
207 208

	if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
209
				GFP_KERNEL, &bio, 0))
210 211 212 213 214 215 216 217 218 219 220
		status = NVME_SC_INTERNAL | NVME_SC_DNR;

	if (bio) {
		bio->bi_private = req;
		bio->bi_end_io = nvmet_bio_done;
		submit_bio(bio);
	} else {
		nvmet_req_complete(req, status);
	}
}

221
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
222 223 224 225 226 227
{
	struct nvme_command *cmd = req->cmd;

	switch (cmd->common.opcode) {
	case nvme_cmd_read:
	case nvme_cmd_write:
228
		req->execute = nvmet_bdev_execute_rw;
229 230 231
		req->data_len = nvmet_rw_len(req);
		return 0;
	case nvme_cmd_flush:
232
		req->execute = nvmet_bdev_execute_flush;
233 234 235
		req->data_len = 0;
		return 0;
	case nvme_cmd_dsm:
236
		req->execute = nvmet_bdev_execute_dsm;
237
		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
238 239
			sizeof(struct nvme_dsm_range);
		return 0;
240
	case nvme_cmd_write_zeroes:
241
		req->execute = nvmet_bdev_execute_write_zeroes;
242
		req->data_len = 0;
243
		return 0;
244
	default:
245 246
		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
		       req->sq->qid);
247 248 249
		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
	}
}