/* * NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include #include #include #include #include #include #include "nvme.h" struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, unsigned int flags) { bool write = cmd->common.opcode & 1; struct request *req; req = blk_mq_alloc_request(q, write, flags); if (IS_ERR(req)) return req; req->cmd_type = REQ_TYPE_DRV_PRIV; req->cmd_flags |= REQ_FAILFAST_DRIVER; req->__data_len = 0; req->__sector = (sector_t) -1; req->bio = req->biotail = NULL; req->cmd = (unsigned char *)cmd; req->cmd_len = sizeof(struct nvme_command); req->special = (void *)0; return req; } /* * Returns 0 on success. If the result is negative, it's a Linux error code; * if the result is positive, it's an NVM Express status code */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen, u32 *result, unsigned timeout) { struct request *req; int ret; req = nvme_alloc_request(q, cmd, 0); if (IS_ERR(req)) return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); if (ret) goto out; } blk_execute_rq(req->q, NULL, req, 0); if (result) *result = (u32)(uintptr_t)req->special; ret = req->errors; out: blk_mq_free_request(req); return ret; } int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0); } int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, u32 *result, unsigned timeout) { struct bio *bio = NULL; struct request *req; int ret; req = nvme_alloc_request(q, cmd, 0); if (IS_ERR(req)) return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; if (ubuffer && bufflen) { ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, GFP_KERNEL); if (ret) goto out; bio = req->bio; } blk_execute_rq(req->q, NULL, req, 0); if (bio) blk_rq_unmap_user(bio); if (result) *result = (u32)(uintptr_t)req->special; ret = req->errors; out: blk_mq_free_request(req); return ret; } int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) { struct nvme_command c = { }; int error; /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify; c.identify.cns = cpu_to_le32(1); *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); if (!*id) return -ENOMEM; error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, sizeof(struct nvme_id_ctrl)); if (error) kfree(*id); return error; } int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, struct nvme_id_ns **id) { struct nvme_command c = { }; int error; /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify, c.identify.nsid = cpu_to_le32(nsid), *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); if (!*id) return -ENOMEM; error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, sizeof(struct nvme_id_ns)); if (error) kfree(*id); return error; } int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, dma_addr_t dma_addr, u32 *result) { struct nvme_command c; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; c.features.nsid = cpu_to_le32(nsid); c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); } int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, dma_addr_t dma_addr, u32 *result) { struct nvme_command c; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); } int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) { struct nvme_command c = { }; int error; c.common.opcode = nvme_admin_get_log_page, c.common.nsid = cpu_to_le32(0xFFFFFFFF), c.common.cdw10[0] = cpu_to_le32( (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | NVME_LOG_SMART), *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); if (!*log) return -ENOMEM; error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, sizeof(struct nvme_smart_log)); if (error) kfree(*log); return error; }