scsi.c 64.2 KB
Newer Older
1 2 3
/*******************************************************************************
 * Vhost kernel TCM fabric driver for virtio SCSI initiators
 *
4
 * (C) Copyright 2010-2013 Datera, Inc.
5 6 7 8
 * (C) Copyright 2010-2012 IBM Corp.
 *
 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
 *
9
 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 ****************************************************************************/

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include <linux/vhost.h>
#include <linux/virtio_scsi.h>
48
#include <linux/llist.h>
A
Asias He 已提交
49
#include <linux/bitmap.h>
50
#include <linux/percpu_ida.h>
51 52

#include "vhost.h"
M
Michael S. Tsirkin 已提交
53

54 55 56 57 58 59 60
#define VHOST_SCSI_VERSION  "v0.1"
#define VHOST_SCSI_NAMELEN 256
#define VHOST_SCSI_MAX_CDB_SIZE 32
#define VHOST_SCSI_DEFAULT_TAGS 256
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
M
Michael S. Tsirkin 已提交
61 62 63 64 65 66 67 68

struct vhost_scsi_inflight {
	/* Wait for the flush operation to finish */
	struct completion comp;
	/* Refcount for the inflight reqs */
	struct kref kref;
};

69
struct vhost_scsi_cmd {
M
Michael S. Tsirkin 已提交
70 71 72 73
	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
	int tvc_vq_desc;
	/* virtio-scsi initiator task attribute */
	int tvc_task_attr;
74 75
	/* virtio-scsi response incoming iovecs */
	int tvc_in_iovs;
M
Michael S. Tsirkin 已提交
76 77 78 79 80 81 82 83
	/* virtio-scsi initiator data direction */
	enum dma_data_direction tvc_data_direction;
	/* Expected data transfer length from virtio-scsi header */
	u32 tvc_exp_data_len;
	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
	u64 tvc_tag;
	/* The number of scatterlists associated with this cmd */
	u32 tvc_sgl_count;
84
	u32 tvc_prot_sgl_count;
85
	/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
M
Michael S. Tsirkin 已提交
86 87 88
	u32 tvc_lun;
	/* Pointer to the SGL formatted memory from virtio-scsi */
	struct scatterlist *tvc_sgl;
89
	struct scatterlist *tvc_prot_sgl;
90
	struct page **tvc_upages;
91 92
	/* Pointer to response header iovec */
	struct iovec *tvc_resp_iov;
M
Michael S. Tsirkin 已提交
93 94 95 96 97
	/* Pointer to vhost_scsi for our device */
	struct vhost_scsi *tvc_vhost;
	/* Pointer to vhost_virtqueue for the cmd */
	struct vhost_virtqueue *tvc_vq;
	/* Pointer to vhost nexus memory */
98
	struct vhost_scsi_nexus *tvc_nexus;
M
Michael S. Tsirkin 已提交
99 100
	/* The TCM I/O descriptor that is accessed via container_of() */
	struct se_cmd tvc_se_cmd;
101
	/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
M
Michael S. Tsirkin 已提交
102 103
	struct work_struct work;
	/* Copy of the incoming SCSI command descriptor block (CDB) */
104
	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
M
Michael S. Tsirkin 已提交
105 106 107 108 109 110 111 112
	/* Sense buffer that will be mapped into outgoing status */
	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
	/* Completed commands list, serviced from vhost worker thread */
	struct llist_node tvc_completion_list;
	/* Used to track inflight cmd */
	struct vhost_scsi_inflight *inflight;
};

113
struct vhost_scsi_nexus {
M
Michael S. Tsirkin 已提交
114 115 116 117
	/* Pointer to TCM session for I_T Nexus */
	struct se_session *tvn_se_sess;
};

118
struct vhost_scsi_nacl {
M
Michael S. Tsirkin 已提交
119 120 121
	/* Binary World Wide unique Port Name for Vhost Initiator port */
	u64 iport_wwpn;
	/* ASCII formatted WWPN for Sas Initiator port */
122 123
	char iport_name[VHOST_SCSI_NAMELEN];
	/* Returned by vhost_scsi_make_nodeacl() */
M
Michael S. Tsirkin 已提交
124 125 126
	struct se_node_acl se_node_acl;
};

127
struct vhost_scsi_tpg {
M
Michael S. Tsirkin 已提交
128 129 130 131 132 133
	/* Vhost port target portal group tag for TCM */
	u16 tport_tpgt;
	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
	int tv_tpg_port_count;
	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
	int tv_tpg_vhost_count;
134 135
	/* Used for enabling T10-PI with legacy devices */
	int tv_fabric_prot_type;
136
	/* list for vhost_scsi_list */
M
Michael S. Tsirkin 已提交
137 138 139 140
	struct list_head tv_tpg_list;
	/* Used to protect access for tpg_nexus */
	struct mutex tv_tpg_mutex;
	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
141 142 143 144
	struct vhost_scsi_nexus *tpg_nexus;
	/* Pointer back to vhost_scsi_tport */
	struct vhost_scsi_tport *tport;
	/* Returned by vhost_scsi_make_tpg() */
M
Michael S. Tsirkin 已提交
145 146 147 148 149
	struct se_portal_group se_tpg;
	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
	struct vhost_scsi *vhost_scsi;
};

150
struct vhost_scsi_tport {
M
Michael S. Tsirkin 已提交
151 152 153 154 155
	/* SCSI protocol the tport is providing */
	u8 tport_proto_id;
	/* Binary World Wide unique Port Name for Vhost Target port */
	u64 tport_wwpn;
	/* ASCII formatted WWPN for Vhost Target port */
156 157
	char tport_name[VHOST_SCSI_NAMELEN];
	/* Returned by vhost_scsi_make_tport() */
M
Michael S. Tsirkin 已提交
158 159 160
	struct se_wwn tport_wwn;
};

161
struct vhost_scsi_evt {
M
Michael S. Tsirkin 已提交
162 163 164 165 166
	/* event to be sent to guest */
	struct virtio_scsi_event event;
	/* event list, serviced from vhost worker thread */
	struct llist_node list;
};
167

168 169 170 171 172 173
enum {
	VHOST_SCSI_VQ_CTL = 0,
	VHOST_SCSI_VQ_EVT = 1,
	VHOST_SCSI_VQ_IO = 2,
};

174
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
175
enum {
176
	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
177 178 179
					       (1ULL << VIRTIO_SCSI_F_T10_PI) |
					       (1ULL << VIRTIO_F_ANY_LAYOUT) |
					       (1ULL << VIRTIO_F_VERSION_1)
180 181
};

A
Asias He 已提交
182 183
#define VHOST_SCSI_MAX_TARGET	256
#define VHOST_SCSI_MAX_VQ	128
184
#define VHOST_SCSI_MAX_EVENT	128
A
Asias He 已提交
185

186 187
struct vhost_scsi_virtqueue {
	struct vhost_virtqueue vq;
188 189 190 191 192
	/*
	 * Reference counting for inflight reqs, used for flush operation. At
	 * each time, one reference tracks new commands submitted, while we
	 * wait for another one to reach 0.
	 */
193
	struct vhost_scsi_inflight inflights[2];
194 195 196 197
	/*
	 * Indicate current inflight in use, protected by vq->mutex.
	 * Writers must also take dev mutex and flush under it.
	 */
198
	int inflight_idx;
199 200
};

201
struct vhost_scsi {
A
Asias He 已提交
202
	/* Protected by vhost_scsi->dev.mutex */
203
	struct vhost_scsi_tpg **vs_tpg;
A
Asias He 已提交
204 205
	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];

206
	struct vhost_dev dev;
207
	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
208 209

	struct vhost_work vs_completion_work; /* cmd completion work item */
210
	struct llist_head vs_completion_list; /* cmd completion queue */
211 212 213 214 215 216

	struct vhost_work vs_event_work; /* evt injection work item */
	struct llist_head vs_event_list; /* evt injection queue */

	bool vs_events_missed; /* any missed events, protected by vq->mutex */
	int vs_events_nr; /* num of pending events, protected by vq->mutex */
217 218
};

219
static struct target_core_fabric_ops vhost_scsi_ops;
220
static struct workqueue_struct *vhost_scsi_workqueue;
221

222 223 224
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(vhost_scsi_list);
225

226
static int iov_num_pages(void __user *iov_base, size_t iov_len)
A
Asias He 已提交
227
{
228 229
	return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
	       ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
A
Asias He 已提交
230 231
}

232
static void vhost_scsi_done_inflight(struct kref *kref)
233 234 235 236 237 238 239
{
	struct vhost_scsi_inflight *inflight;

	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
	complete(&inflight->comp);
}

240
static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
				    struct vhost_scsi_inflight *old_inflight[])
{
	struct vhost_scsi_inflight *new_inflight;
	struct vhost_virtqueue *vq;
	int idx, i;

	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
		vq = &vs->vqs[i].vq;

		mutex_lock(&vq->mutex);

		/* store old infight */
		idx = vs->vqs[i].inflight_idx;
		if (old_inflight)
			old_inflight[i] = &vs->vqs[i].inflights[idx];

		/* setup new infight */
		vs->vqs[i].inflight_idx = idx ^ 1;
		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
		kref_init(&new_inflight->kref);
		init_completion(&new_inflight->comp);

		mutex_unlock(&vq->mutex);
	}
}

static struct vhost_scsi_inflight *
268
vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
269 270 271 272 273 274 275 276 277 278 279
{
	struct vhost_scsi_inflight *inflight;
	struct vhost_scsi_virtqueue *svq;

	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
	inflight = &svq->inflights[svq->inflight_idx];
	kref_get(&inflight->kref);

	return inflight;
}

280
static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
281
{
282
	kref_put(&inflight->kref, vhost_scsi_done_inflight);
283 284
}

285
static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
286 287 288 289
{
	return 1;
}

290
static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
291 292 293 294
{
	return 0;
}

295
static char *vhost_scsi_get_fabric_name(void)
296 297 298 299
{
	return "vhost";
}

300
static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
301
{
302 303 304
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport = tpg->tport;
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

	switch (tport->tport_proto_id) {
	case SCSI_PROTOCOL_SAS:
		return sas_get_fabric_proto_ident(se_tpg);
	case SCSI_PROTOCOL_FCP:
		return fc_get_fabric_proto_ident(se_tpg);
	case SCSI_PROTOCOL_ISCSI:
		return iscsi_get_fabric_proto_ident(se_tpg);
	default:
		pr_err("Unknown tport_proto_id: 0x%02x, using"
			" SAS emulation\n", tport->tport_proto_id);
		break;
	}

	return sas_get_fabric_proto_ident(se_tpg);
}

322
static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
323
{
324 325 326
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport = tpg->tport;
327 328 329 330

	return &tport->tport_name[0];
}

331
static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
332
{
333 334
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
335 336 337
	return tpg->tport_tpgt;
}

338
static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
339 340 341 342
{
	return 1;
}

343
static u32
344
vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
345 346 347 348
			      struct se_node_acl *se_nacl,
			      struct t10_pr_registration *pr_reg,
			      int *format_code,
			      unsigned char *buf)
349
{
350 351 352
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport = tpg->tport;
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

	switch (tport->tport_proto_id) {
	case SCSI_PROTOCOL_SAS:
		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
					format_code, buf);
	case SCSI_PROTOCOL_FCP:
		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
					format_code, buf);
	case SCSI_PROTOCOL_ISCSI:
		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
					format_code, buf);
	default:
		pr_err("Unknown tport_proto_id: 0x%02x, using"
			" SAS emulation\n", tport->tport_proto_id);
		break;
	}

	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
			format_code, buf);
}

374
static u32
375
vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
376 377 378
				  struct se_node_acl *se_nacl,
				  struct t10_pr_registration *pr_reg,
				  int *format_code)
379
{
380 381 382
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport = tpg->tport;
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

	switch (tport->tport_proto_id) {
	case SCSI_PROTOCOL_SAS:
		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
					format_code);
	case SCSI_PROTOCOL_FCP:
		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
					format_code);
	case SCSI_PROTOCOL_ISCSI:
		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
					format_code);
	default:
		pr_err("Unknown tport_proto_id: 0x%02x, using"
			" SAS emulation\n", tport->tport_proto_id);
		break;
	}

	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
			format_code);
}

404
static char *
405
vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
406 407 408
				    const char *buf,
				    u32 *out_tid_len,
				    char **port_nexus_ptr)
409
{
410 411 412
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport = tpg->tport;
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

	switch (tport->tport_proto_id) {
	case SCSI_PROTOCOL_SAS:
		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
					port_nexus_ptr);
	case SCSI_PROTOCOL_FCP:
		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
					port_nexus_ptr);
	case SCSI_PROTOCOL_ISCSI:
		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
					port_nexus_ptr);
	default:
		pr_err("Unknown tport_proto_id: 0x%02x, using"
			" SAS emulation\n", tport->tport_proto_id);
		break;
	}

	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
			port_nexus_ptr);
}

434 435 436 437 438 439 440 441
static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);

	return tpg->tv_fabric_prot_type;
}

442
static struct se_node_acl *
443
vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
444
{
445
	struct vhost_scsi_nacl *nacl;
446

447
	nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
448
	if (!nacl) {
449
		pr_err("Unable to allocate struct vhost_scsi_nacl\n");
450 451 452 453 454 455
		return NULL;
	}

	return &nacl->se_node_acl;
}

456
static void
457
vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
458
			     struct se_node_acl *se_nacl)
459
{
460 461
	struct vhost_scsi_nacl *nacl = container_of(se_nacl,
			struct vhost_scsi_nacl, se_node_acl);
462 463 464
	kfree(nacl);
}

465
static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
466 467 468 469
{
	return 1;
}

470
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
471
{
472 473
	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
				struct vhost_scsi_cmd, tvc_se_cmd);
474
	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
475
	int i;
476 477 478 479

	if (tv_cmd->tvc_sgl_count) {
		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
480
	}
481 482 483 484
	if (tv_cmd->tvc_prot_sgl_count) {
		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
	}
485

486
	vhost_scsi_put_inflight(tv_cmd->inflight);
487
	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
488 489
}

490
static int vhost_scsi_shutdown_session(struct se_session *se_sess)
491 492 493 494
{
	return 0;
}

495
static void vhost_scsi_close_session(struct se_session *se_sess)
496 497 498 499
{
	return;
}

500
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
501 502 503 504
{
	return 0;
}

505
static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
506 507 508 509 510 511
{
	/* Go ahead and process the write immediately */
	target_execute_cmd(se_cmd);
	return 0;
}

512
static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
513 514 515 516
{
	return 0;
}

517
static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
518 519 520 521
{
	return;
}

522
static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
523 524 525 526
{
	return 0;
}

527
static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
528 529 530 531
{
	return 0;
}

532
static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
533
{
534
	struct vhost_scsi *vs = cmd->tvc_vhost;
535

536
	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
537 538 539

	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
540

541
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
542
{
543 544
	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
				struct vhost_scsi_cmd, tvc_se_cmd);
545
	vhost_scsi_complete_cmd(cmd);
546 547 548
	return 0;
}

549
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
550
{
551 552
	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
				struct vhost_scsi_cmd, tvc_se_cmd);
553
	vhost_scsi_complete_cmd(cmd);
554 555 556
	return 0;
}

557
static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
558
{
559
	return;
560 561
}

562
static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
563 564 565 566
{
	return;
}

567
static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
568 569 570 571 572
{
	vs->vs_events_nr--;
	kfree(evt);
}

573 574
static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi *vs,
575
		       u32 event, u32 reason)
576
{
577
	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
578
	struct vhost_scsi_evt *evt;
579 580 581 582 583 584 585 586

	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
		vs->vs_events_missed = true;
		return NULL;
	}

	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
	if (!evt) {
587
		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
588 589 590 591
		vs->vs_events_missed = true;
		return NULL;
	}

592 593
	evt->event.event = cpu_to_vhost32(vq, event);
	evt->event.reason = cpu_to_vhost32(vq, reason);
594 595 596 597 598
	vs->vs_events_nr++;

	return evt;
}

599
static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
600
{
601
	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
602 603

	/* TODO locking against target/backend threads? */
604
	transport_generic_free_cmd(se_cmd, 0);
605

606
}
607

608 609 610
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
611 612
}

613
static void
614
vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
615
{
616
	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
617 618 619 620 621 622 623 624 625 626 627 628
	struct virtio_scsi_event *event = &evt->event;
	struct virtio_scsi_event __user *eventp;
	unsigned out, in;
	int head, ret;

	if (!vq->private_data) {
		vs->vs_events_missed = true;
		return;
	}

again:
	vhost_disable_notify(&vs->dev, vq);
629
	head = vhost_get_vq_desc(vq, vq->iov,
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
			ARRAY_SIZE(vq->iov), &out, &in,
			NULL, NULL);
	if (head < 0) {
		vs->vs_events_missed = true;
		return;
	}
	if (head == vq->num) {
		if (vhost_enable_notify(&vs->dev, vq))
			goto again;
		vs->vs_events_missed = true;
		return;
	}

	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
				vq->iov[out].iov_len);
		vs->vs_events_missed = true;
		return;
	}

	if (vs->vs_events_missed) {
651
		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
652 653 654 655 656 657 658 659
		vs->vs_events_missed = false;
	}

	eventp = vq->iov[out].iov_base;
	ret = __copy_to_user(eventp, event, sizeof(*event));
	if (!ret)
		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
	else
660
		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
661 662
}

663
static void vhost_scsi_evt_work(struct vhost_work *work)
664 665 666
{
	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
					vs_event_work);
667
	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
668
	struct vhost_scsi_evt *evt;
669 670 671 672 673
	struct llist_node *llnode;

	mutex_lock(&vq->mutex);
	llnode = llist_del_all(&vs->vs_event_list);
	while (llnode) {
674
		evt = llist_entry(llnode, struct vhost_scsi_evt, list);
675
		llnode = llist_next(llnode);
676 677
		vhost_scsi_do_evt_work(vs, evt);
		vhost_scsi_free_evt(vs, evt);
678 679 680 681
	}
	mutex_unlock(&vq->mutex);
}

682 683 684 685 686 687 688 689 690
/* Fill in status and signal that we are done processing this command
 *
 * This is scheduled in the vhost work queue so we are called with the owner
 * process mm and can access the vring.
 */
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
					vs_completion_work);
A
Asias He 已提交
691
	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
692
	struct virtio_scsi_cmd_resp v_rsp;
693
	struct vhost_scsi_cmd *cmd;
694 695
	struct llist_node *llnode;
	struct se_cmd *se_cmd;
696
	struct iov_iter iov_iter;
A
Asias He 已提交
697
	int ret, vq;
698

A
Asias He 已提交
699
	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
700 701
	llnode = llist_del_all(&vs->vs_completion_list);
	while (llnode) {
702
		cmd = llist_entry(llnode, struct vhost_scsi_cmd,
703 704
				     tvc_completion_list);
		llnode = llist_next(llnode);
705
		se_cmd = &cmd->tvc_se_cmd;
706 707

		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
708
			cmd, se_cmd->residual_count, se_cmd->scsi_status);
709 710

		memset(&v_rsp, 0, sizeof(v_rsp));
711
		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
712 713
		/* TODO is status_qualifier field needed? */
		v_rsp.status = se_cmd->scsi_status;
714 715
		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
						 se_cmd->scsi_sense_length);
716
		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
717
		       se_cmd->scsi_sense_length);
718 719 720 721 722

		iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
			      cmd->tvc_in_iovs, sizeof(v_rsp));
		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
		if (likely(ret == sizeof(v_rsp))) {
723
			struct vhost_scsi_virtqueue *q;
724 725
			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
726
			vq = q - vs->vqs;
A
Asias He 已提交
727 728
			__set_bit(vq, signal);
		} else
729 730
			pr_err("Faulted on virtio_scsi_cmd_resp\n");

731
		vhost_scsi_free_cmd(cmd);
732 733
	}

A
Asias He 已提交
734 735 736
	vq = -1;
	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
		< VHOST_SCSI_MAX_VQ)
737
		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
738 739
}

740 741
static struct vhost_scsi_cmd *
vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
742 743
		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
		   u32 exp_data_len, int data_direction)
744
{
745 746
	struct vhost_scsi_cmd *cmd;
	struct vhost_scsi_nexus *tv_nexus;
747
	struct se_session *se_sess;
748
	struct scatterlist *sg, *prot_sg;
749
	struct page **pages;
750
	int tag;
751

752
	tv_nexus = tpg->tpg_nexus;
753
	if (!tv_nexus) {
754
		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
755 756
		return ERR_PTR(-EIO);
	}
757
	se_sess = tv_nexus->tvn_se_sess;
758

759
	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
760
	if (tag < 0) {
761
		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
762 763 764
		return ERR_PTR(-ENOMEM);
	}

765
	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
766
	sg = cmd->tvc_sgl;
767
	prot_sg = cmd->tvc_prot_sgl;
768
	pages = cmd->tvc_upages;
769
	memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
770

771
	cmd->tvc_sgl = sg;
772
	cmd->tvc_prot_sgl = prot_sg;
773
	cmd->tvc_upages = pages;
774
	cmd->tvc_se_cmd.map_tag = tag;
775 776 777
	cmd->tvc_tag = scsi_tag;
	cmd->tvc_lun = lun;
	cmd->tvc_task_attr = task_attr;
778 779 780
	cmd->tvc_exp_data_len = exp_data_len;
	cmd->tvc_data_direction = data_direction;
	cmd->tvc_nexus = tv_nexus;
781
	cmd->inflight = vhost_scsi_get_inflight(vq);
782

783
	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
784

785
	return cmd;
786 787 788 789 790 791 792
}

/*
 * Map a user memory range into a scatterlist
 *
 * Returns the number of scatterlist entries used or -errno on error.
 */
793
static int
794
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
795 796
		      void __user *ptr,
		      size_t len,
797
		      struct scatterlist *sgl,
798
		      bool write)
799
{
800 801
	unsigned int npages = 0, offset, nbytes;
	unsigned int pages_nr = iov_num_pages(ptr, len);
802
	struct scatterlist *sg = sgl;
803
	struct page **pages = cmd->tvc_upages;
804
	int ret, i;
805

806
	if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
807
		pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
808 809
		       " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
			pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
810 811 812
		return -ENOBUFS;
	}

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
	ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
	/* No pages were pinned */
	if (ret < 0)
		goto out;
	/* Less pages pinned than wanted */
	if (ret != pages_nr) {
		for (i = 0; i < ret; i++)
			put_page(pages[i]);
		ret = -EFAULT;
		goto out;
	}

	while (len > 0) {
		offset = (uintptr_t)ptr & ~PAGE_MASK;
		nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
		sg_set_page(sg, pages[npages], nbytes, offset);
829 830 831 832 833 834
		ptr += nbytes;
		len -= nbytes;
		sg++;
		npages++;
	}

835
out:
836 837 838
	return ret;
}

839
static int
840
vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
841
{
842
	int sgl_count = 0;
843

844 845 846 847 848
	if (!iter || !iter->iov) {
		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
		       " present\n", __func__, bytes);
		return -EINVAL;
	}
849

850 851 852 853 854
	sgl_count = iov_iter_npages(iter, 0xffff);
	if (sgl_count > max_sgls) {
		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
		return -EINVAL;
855
	}
856 857
	return sgl_count;
}
858

859
static int
860 861 862
vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
		      struct iov_iter *iter,
		      struct scatterlist *sg, int sg_count)
863 864 865
{
	size_t off = iter->iov_offset;
	int i, ret;
866

867 868 869
	for (i = 0; i < iter->nr_segs; i++) {
		void __user *base = iter->iov[i].iov_base + off;
		size_t len = iter->iov[i].iov_len - off;
870

871
		ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
872
		if (ret < 0) {
873 874 875 876 877
			for (i = 0; i < sg_count; i++) {
				struct page *page = sg_page(&sg[i]);
				if (page)
					put_page(page);
			}
878 879 880
			return ret;
		}
		sg += ret;
881
		off = 0;
882 883 884 885
	}
	return 0;
}

886
static int
887
vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
888 889 890 891 892 893 894 895
		 size_t prot_bytes, struct iov_iter *prot_iter,
		 size_t data_bytes, struct iov_iter *data_iter)
{
	int sgl_count, ret;
	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);

	if (prot_bytes) {
		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
896
						 VHOST_SCSI_PREALLOC_PROT_SGLS);
897 898 899 900 901 902 903 904 905 906 907
		if (sgl_count < 0)
			return sgl_count;

		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
		cmd->tvc_prot_sgl_count = sgl_count;
		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);

		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
					    cmd->tvc_prot_sgl,
					    cmd->tvc_prot_sgl_count);
908 909 910 911
		if (ret < 0) {
			cmd->tvc_prot_sgl_count = 0;
			return ret;
		}
912 913
	}
	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
914
					 VHOST_SCSI_PREALLOC_SGLS);
915 916 917 918 919 920 921 922 923 924 925 926 927
	if (sgl_count < 0)
		return sgl_count;

	sg_init_table(cmd->tvc_sgl, sgl_count);
	cmd->tvc_sgl_count = sgl_count;
	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
		  cmd->tvc_sgl, cmd->tvc_sgl_count);

	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
				    cmd->tvc_sgl, cmd->tvc_sgl_count);
	if (ret < 0) {
		cmd->tvc_sgl_count = 0;
		return ret;
928 929 930 931
	}
	return 0;
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
static int vhost_scsi_to_tcm_attr(int attr)
{
	switch (attr) {
	case VIRTIO_SCSI_S_SIMPLE:
		return TCM_SIMPLE_TAG;
	case VIRTIO_SCSI_S_ORDERED:
		return TCM_ORDERED_TAG;
	case VIRTIO_SCSI_S_HEAD:
		return TCM_HEAD_TAG;
	case VIRTIO_SCSI_S_ACA:
		return TCM_ACA_TAG;
	default:
		break;
	}
	return TCM_SIMPLE_TAG;
}

949
static void vhost_scsi_submission_work(struct work_struct *work)
950
{
951 952 953
	struct vhost_scsi_cmd *cmd =
		container_of(work, struct vhost_scsi_cmd, work);
	struct vhost_scsi_nexus *tv_nexus;
954
	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
955 956
	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
	int rc;
957

958
	/* FIXME: BIDI operation */
959 960
	if (cmd->tvc_sgl_count) {
		sg_ptr = cmd->tvc_sgl;
961 962 963 964 965

		if (cmd->tvc_prot_sgl_count)
			sg_prot_ptr = cmd->tvc_prot_sgl;
		else
			se_cmd->prot_pto = true;
966 967 968
	} else {
		sg_ptr = NULL;
	}
969
	tv_nexus = cmd->tvc_nexus;
970 971

	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
972 973
			cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
			cmd->tvc_lun, cmd->tvc_exp_data_len,
974 975 976 977
			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
			sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
			cmd->tvc_prot_sgl_count);
978 979
	if (rc < 0) {
		transport_send_check_condition_and_sense(se_cmd,
980
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
981 982 983 984
		transport_generic_free_cmd(se_cmd, 0);
	}
}

985 986 987 988
static void
vhost_scsi_send_bad_target(struct vhost_scsi *vs,
			   struct vhost_virtqueue *vq,
			   int head, unsigned out)
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
{
	struct virtio_scsi_cmd_resp __user *resp;
	struct virtio_scsi_cmd_resp rsp;
	int ret;

	memset(&rsp, 0, sizeof(rsp));
	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
	resp = vq->iov[out].iov_base;
	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
	if (!ret)
		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
	else
		pr_err("Faulted on virtio_scsi_cmd_resp\n");
}

1004 1005
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1006
{
1007
	struct vhost_scsi_tpg **vs_tpg, *tpg;
1008
	struct virtio_scsi_cmd_req v_req;
1009
	struct virtio_scsi_cmd_req_pi v_req_pi;
1010
	struct vhost_scsi_cmd *cmd;
1011
	struct iov_iter out_iter, in_iter, prot_iter, data_iter;
1012
	u64 tag;
1013 1014 1015 1016 1017
	u32 exp_data_len, data_direction;
	unsigned out, in;
	int head, ret, prot_bytes;
	size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
	size_t out_size, in_size;
1018 1019
	u16 lun;
	u8 *target, *lunp, task_attr;
1020
	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1021
	void *req, *cdb;
1022

1023
	mutex_lock(&vq->mutex);
1024 1025 1026 1027
	/*
	 * We can handle the vq only after the endpoint is setup by calling the
	 * VHOST_SCSI_SET_ENDPOINT ioctl.
	 */
1028
	vs_tpg = vq->private_data;
1029
	if (!vs_tpg)
1030
		goto out;
1031 1032 1033 1034

	vhost_disable_notify(&vs->dev, vq);

	for (;;) {
1035
		head = vhost_get_vq_desc(vq, vq->iov,
1036 1037
					 ARRAY_SIZE(vq->iov), &out, &in,
					 NULL, NULL);
1038
		pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1039
			 head, out, in);
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
		/* On error, stop handling until the next kick. */
		if (unlikely(head < 0))
			break;
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
			if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
				vhost_disable_notify(&vs->dev, vq);
				continue;
			}
			break;
		}
		/*
1052 1053
		 * Check for a sane response buffer so we can report early
		 * errors back to the guest.
1054
		 */
1055 1056 1057
		if (unlikely(vq->iov[out].iov_len < rsp_size)) {
			vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
				" size, got %zu bytes\n", vq->iov[out].iov_len);
1058 1059
			break;
		}
1060 1061 1062 1063 1064
		/*
		 * Setup pointers and values based upon different virtio-scsi
		 * request header if T10_PI is enabled in KVM guest.
		 */
		if (t10_pi) {
1065
			req = &v_req_pi;
1066
			req_size = sizeof(v_req_pi);
1067 1068 1069 1070
			lunp = &v_req_pi.lun[0];
			target = &v_req_pi.lun[1];
		} else {
			req = &v_req;
1071
			req_size = sizeof(v_req);
1072 1073 1074
			lunp = &v_req.lun[0];
			target = &v_req.lun[1];
		}
1075 1076 1077 1078 1079
		/*
		 * FIXME: Not correct for BIDI operation
		 */
		out_size = iov_length(vq->iov, out);
		in_size = iov_length(&vq->iov[out], in);
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
		/*
		 * Copy over the virtio-scsi request header, which for a
		 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
		 * single iovec may contain both the header + outgoing
		 * WRITE payloads.
		 *
		 * copy_from_iter() will advance out_iter, so that it will
		 * point at the start of the outgoing WRITE payload, if
		 * DMA_TO_DEVICE is set.
		 */
		iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1092

1093 1094 1095
		ret = copy_from_iter(req, req_size, &out_iter);
		if (unlikely(ret != req_size)) {
			vq_err(vq, "Faulted on copy_from_iter\n");
1096 1097
			vhost_scsi_send_bad_target(vs, vq, head, out);
			continue;
1098
		}
1099
		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
1100
		if (unlikely(*lunp != 1)) {
1101
			vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1102 1103 1104 1105
			vhost_scsi_send_bad_target(vs, vq, head, out);
			continue;
		}

1106
		tpg = ACCESS_ONCE(vs_tpg[*target]);
1107
		if (unlikely(!tpg)) {
1108
			/* Target does not exist, fail the request */
1109
			vhost_scsi_send_bad_target(vs, vq, head, out);
A
Asias He 已提交
1110 1111
			continue;
		}
1112
		/*
1113 1114 1115
		 * Determine data_direction by calculating the total outgoing
		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
		 * response headers respectively.
1116
		 *
1117 1118 1119 1120 1121 1122
		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
		 * to the right place.
		 *
		 * For DMA_FROM_DEVICE, the iovec will be just past the end
		 * of the virtio-scsi response header in either the same
		 * or immediately following iovec.
1123
		 *
1124 1125
		 * Any associated T10_PI bytes for the outgoing / incoming
		 * payloads are included in calculation of exp_data_len here.
1126
		 */
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
		prot_bytes = 0;

		if (out_size > req_size) {
			data_direction = DMA_TO_DEVICE;
			exp_data_len = out_size - req_size;
			data_iter = out_iter;
		} else if (in_size > rsp_size) {
			data_direction = DMA_FROM_DEVICE;
			exp_data_len = in_size - rsp_size;

			iov_iter_init(&in_iter, READ, &vq->iov[out], in,
				      rsp_size + exp_data_len);
			iov_iter_advance(&in_iter, rsp_size);
			data_iter = in_iter;
		} else {
			data_direction = DMA_NONE;
			exp_data_len = 0;
		}
		/*
		 * If T10_PI header + payload is present, setup prot_iter values
		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
		 * host scatterlists via get_user_pages_fast().
1149
		 */
1150
		if (t10_pi) {
1151 1152
			if (v_req_pi.pi_bytesout) {
				if (data_direction != DMA_TO_DEVICE) {
1153 1154
					vq_err(vq, "Received non zero pi_bytesout,"
						" but wrong data_direction\n");
1155 1156
					vhost_scsi_send_bad_target(vs, vq, head, out);
					continue;
1157
				}
1158
				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1159 1160
			} else if (v_req_pi.pi_bytesin) {
				if (data_direction != DMA_FROM_DEVICE) {
1161 1162
					vq_err(vq, "Received non zero pi_bytesin,"
						" but wrong data_direction\n");
1163 1164
					vhost_scsi_send_bad_target(vs, vq, head, out);
					continue;
1165
				}
1166
				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1167
			}
1168 1169 1170 1171 1172 1173 1174
			/*
			 * Set prot_iter to data_iter, and advance past any
			 * preceeding prot_bytes that may be present.
			 *
			 * Also fix up the exp_data_len to reflect only the
			 * actual data payload length.
			 */
1175
			if (prot_bytes) {
1176 1177 1178
				exp_data_len -= prot_bytes;
				prot_iter = data_iter;
				iov_iter_advance(&data_iter, prot_bytes);
1179
			}
1180
			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1181 1182 1183 1184
			task_attr = v_req_pi.task_attr;
			cdb = &v_req_pi.cdb[0];
			lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
		} else {
1185
			tag = vhost64_to_cpu(vq, v_req.tag);
1186 1187 1188 1189 1190
			task_attr = v_req.task_attr;
			cdb = &v_req.cdb[0];
			lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
		}
		/*
1191 1192 1193
		 * Check that the received CDB size does not exceeded our
		 * hardcoded max for vhost-scsi, then get a pre-allocated
		 * cmd descriptor for the new virtio-scsi tag.
1194 1195 1196
		 *
		 * TODO what if cdb was too small for varlen cdb header?
		 */
1197
		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1198 1199
			vq_err(vq, "Received SCSI CDB with command_size: %d that"
				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1200
				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1201 1202
			vhost_scsi_send_bad_target(vs, vq, head, out);
			continue;
1203 1204
		}
		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1205 1206
					 exp_data_len + prot_bytes,
					 data_direction);
1207
		if (IS_ERR(cmd)) {
1208
			vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1209
			       PTR_ERR(cmd));
1210 1211
			vhost_scsi_send_bad_target(vs, vq, head, out);
			continue;
1212
		}
1213 1214
		cmd->tvc_vhost = vs;
		cmd->tvc_vq = vq;
1215 1216
		cmd->tvc_resp_iov = &vq->iov[out];
		cmd->tvc_in_iovs = in;
1217 1218

		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1219 1220 1221
			 cmd->tvc_cdb[0], cmd->tvc_lun);
		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1222 1223

		if (data_direction != DMA_NONE) {
1224 1225 1226
			ret = vhost_scsi_mapal(cmd,
					       prot_bytes, &prot_iter,
					       exp_data_len, &data_iter);
1227 1228
			if (unlikely(ret)) {
				vq_err(vq, "Failed to map iov to sgl\n");
1229
				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1230 1231
				vhost_scsi_send_bad_target(vs, vq, head, out);
				continue;
1232 1233 1234 1235 1236
			}
		}
		/*
		 * Save the descriptor from vhost_get_vq_desc() to be used to
		 * complete the virtio-scsi request in TCM callback context via
1237
		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1238
		 */
1239
		cmd->tvc_vq_desc = head;
1240
		/*
1241 1242 1243 1244
		 * Dispatch cmd descriptor for cmwq execution in process
		 * context provided by vhost_scsi_workqueue.  This also ensures
		 * cmd is executed on the same kworker CPU as this vhost
		 * thread to gain positive L2 cache locality effects.
1245
		 */
1246 1247
		INIT_WORK(&cmd->work, vhost_scsi_submission_work);
		queue_work(vhost_scsi_workqueue, &cmd->work);
1248
	}
1249
out:
1250
	mutex_unlock(&vq->mutex);
1251 1252 1253 1254
}

static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
1255
	pr_debug("%s: The handling func for control queue.\n", __func__);
1256 1257
}

1258
static void
1259 1260
vhost_scsi_send_evt(struct vhost_scsi *vs,
		   struct vhost_scsi_tpg *tpg,
1261 1262 1263
		   struct se_lun *lun,
		   u32 event,
		   u32 reason)
1264
{
1265
	struct vhost_scsi_evt *evt;
1266

1267
	evt = vhost_scsi_allocate_evt(vs, event, reason);
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	if (!evt)
		return;

	if (tpg && lun) {
		/* TODO: share lun setup code with virtio-scsi.ko */
		/*
		 * Note: evt->event is zeroed when we allocate it and
		 * lun[4-7] need to be zero according to virtio-scsi spec.
		 */
		evt->event.lun[0] = 0x01;
1278
		evt->event.lun[1] = tpg->tport_tpgt;
1279 1280 1281 1282 1283 1284 1285 1286 1287
		if (lun->unpacked_lun >= 256)
			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
	}

	llist_add(&evt->list, &vs->vs_event_list);
	vhost_work_queue(&vs->dev, &vs->vs_event_work);
}

1288 1289
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
{
1290 1291 1292 1293 1294 1295 1296 1297 1298
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						poll.work);
	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);

	mutex_lock(&vq->mutex);
	if (!vq->private_data)
		goto out;

	if (vs->vs_events_missed)
1299
		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1300 1301
out:
	mutex_unlock(&vq->mutex);
1302 1303 1304 1305 1306 1307 1308 1309
}

static void vhost_scsi_handle_kick(struct vhost_work *work)
{
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						poll.work);
	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);

A
Asias He 已提交
1310
	vhost_scsi_handle_vq(vs, vq);
1311 1312
}

1313 1314
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{
1315
	vhost_poll_flush(&vs->vqs[index].vq.poll);
1316 1317
}

1318
/* Callers must hold dev mutex */
1319 1320
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
1321
	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1322 1323
	int i;

1324
	/* Init new inflight and remember the old inflight */
1325
	vhost_scsi_init_inflight(vs, old_inflight);
1326 1327 1328 1329 1330 1331 1332

	/*
	 * The inflight->kref was initialized to 1. We decrement it here to
	 * indicate the start of the flush operation so that it will reach 0
	 * when all the reqs are finished.
	 */
	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1333
		kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1334 1335

	/* Flush both the vhost poll and vhost work */
1336 1337 1338
	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
		vhost_scsi_flush_vq(vs, i);
	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1339
	vhost_work_flush(&vs->dev, &vs->vs_event_work);
1340 1341 1342 1343

	/* Wait for all reqs issued before the flush to be finished */
	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
		wait_for_completion(&old_inflight[i]->comp);
1344 1345
}

1346 1347
/*
 * Called from vhost_scsi_ioctl() context to walk the list of available
1348
 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1349 1350
 *
 *  The lock nesting rule is:
1351
 *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1352
 */
1353 1354 1355
static int
vhost_scsi_set_endpoint(struct vhost_scsi *vs,
			struct vhost_scsi_target *t)
1356
{
1357
	struct se_portal_group *se_tpg;
1358 1359 1360
	struct vhost_scsi_tport *tv_tport;
	struct vhost_scsi_tpg *tpg;
	struct vhost_scsi_tpg **vs_tpg;
1361 1362
	struct vhost_virtqueue *vq;
	int index, ret, i, len;
A
Asias He 已提交
1363
	bool match = false;
1364

1365
	mutex_lock(&vhost_scsi_mutex);
1366
	mutex_lock(&vs->dev.mutex);
1367

1368 1369 1370
	/* Verify that ring has been setup correctly. */
	for (index = 0; index < vs->dev.nvqs; ++index) {
		/* Verify that ring has been setup correctly. */
1371
		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1372 1373
			ret = -EFAULT;
			goto out;
1374 1375 1376
		}
	}

1377 1378 1379
	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
	vs_tpg = kzalloc(len, GFP_KERNEL);
	if (!vs_tpg) {
1380 1381
		ret = -ENOMEM;
		goto out;
1382 1383 1384 1385
	}
	if (vs->vs_tpg)
		memcpy(vs_tpg, vs->vs_tpg, len);

1386
	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1387 1388 1389
		mutex_lock(&tpg->tv_tpg_mutex);
		if (!tpg->tpg_nexus) {
			mutex_unlock(&tpg->tv_tpg_mutex);
1390 1391
			continue;
		}
1392 1393
		if (tpg->tv_tpg_vhost_count != 0) {
			mutex_unlock(&tpg->tv_tpg_mutex);
1394 1395
			continue;
		}
1396
		tv_tport = tpg->tport;
1397

A
Asias He 已提交
1398
		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1399
			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1400
				kfree(vs_tpg);
1401
				mutex_unlock(&tpg->tv_tpg_mutex);
1402 1403
				ret = -EEXIST;
				goto out;
1404
			}
1405 1406 1407 1408 1409 1410 1411
			/*
			 * In order to ensure individual vhost-scsi configfs
			 * groups cannot be removed while in use by vhost ioctl,
			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
			 * dependency now.
			 */
			se_tpg = &tpg->se_tpg;
1412
			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1413 1414 1415 1416 1417 1418
			if (ret) {
				pr_warn("configfs_depend_item() failed: %d\n", ret);
				kfree(vs_tpg);
				mutex_unlock(&tpg->tv_tpg_mutex);
				goto out;
			}
1419 1420 1421
			tpg->tv_tpg_vhost_count++;
			tpg->vhost_scsi = vs;
			vs_tpg[tpg->tport_tpgt] = tpg;
1422
			smp_mb__after_atomic();
A
Asias He 已提交
1423
			match = true;
1424
		}
1425
		mutex_unlock(&tpg->tv_tpg_mutex);
1426
	}
A
Asias He 已提交
1427 1428 1429 1430

	if (match) {
		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
		       sizeof(vs->vs_vhost_wwpn));
1431
		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1432
			vq = &vs->vqs[i].vq;
1433
			mutex_lock(&vq->mutex);
A
Asias He 已提交
1434
			vq->private_data = vs_tpg;
1435
			vhost_init_used(vq);
1436 1437
			mutex_unlock(&vq->mutex);
		}
A
Asias He 已提交
1438 1439 1440 1441 1442
		ret = 0;
	} else {
		ret = -EEXIST;
	}

1443 1444 1445 1446 1447 1448 1449 1450
	/*
	 * Act as synchronize_rcu to make sure access to
	 * old vs->vs_tpg is finished.
	 */
	vhost_scsi_flush(vs);
	kfree(vs->vs_tpg);
	vs->vs_tpg = vs_tpg;

1451
out:
A
Asias He 已提交
1452
	mutex_unlock(&vs->dev.mutex);
1453
	mutex_unlock(&vhost_scsi_mutex);
A
Asias He 已提交
1454
	return ret;
1455 1456
}

1457 1458 1459
static int
vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
			  struct vhost_scsi_target *t)
1460
{
1461
	struct se_portal_group *se_tpg;
1462 1463
	struct vhost_scsi_tport *tv_tport;
	struct vhost_scsi_tpg *tpg;
1464 1465
	struct vhost_virtqueue *vq;
	bool match = false;
A
Asias He 已提交
1466 1467
	int index, ret, i;
	u8 target;
1468

1469
	mutex_lock(&vhost_scsi_mutex);
1470 1471 1472
	mutex_lock(&vs->dev.mutex);
	/* Verify that ring has been setup correctly. */
	for (index = 0; index < vs->dev.nvqs; ++index) {
1473
		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1474
			ret = -EFAULT;
1475
			goto err_dev;
1476 1477
		}
	}
1478 1479

	if (!vs->vs_tpg) {
1480 1481
		ret = 0;
		goto err_dev;
1482 1483
	}

A
Asias He 已提交
1484 1485
	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
		target = i;
1486 1487
		tpg = vs->vs_tpg[target];
		if (!tpg)
A
Asias He 已提交
1488 1489
			continue;

1490 1491
		mutex_lock(&tpg->tv_tpg_mutex);
		tv_tport = tpg->tport;
A
Asias He 已提交
1492 1493
		if (!tv_tport) {
			ret = -ENODEV;
1494
			goto err_tpg;
A
Asias He 已提交
1495 1496 1497
		}

		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1498
			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
A
Asias He 已提交
1499
				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1500
				tv_tport->tport_name, tpg->tport_tpgt,
A
Asias He 已提交
1501 1502
				t->vhost_wwpn, t->vhost_tpgt);
			ret = -EINVAL;
1503
			goto err_tpg;
A
Asias He 已提交
1504
		}
1505 1506
		tpg->tv_tpg_vhost_count--;
		tpg->vhost_scsi = NULL;
A
Asias He 已提交
1507
		vs->vs_tpg[target] = NULL;
1508
		match = true;
1509
		mutex_unlock(&tpg->tv_tpg_mutex);
1510 1511 1512 1513 1514
		/*
		 * Release se_tpg->tpg_group.cg_item configfs dependency now
		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
		 */
		se_tpg = &tpg->se_tpg;
1515
		target_undepend_item(&se_tpg->tpg_group.cg_item);
1516
	}
1517 1518
	if (match) {
		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1519
			vq = &vs->vqs[i].vq;
1520
			mutex_lock(&vq->mutex);
A
Asias He 已提交
1521
			vq->private_data = NULL;
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
			mutex_unlock(&vq->mutex);
		}
	}
	/*
	 * Act as synchronize_rcu to make sure access to
	 * old vs->vs_tpg is finished.
	 */
	vhost_scsi_flush(vs);
	kfree(vs->vs_tpg);
	vs->vs_tpg = NULL;
1532
	WARN_ON(vs->vs_events_nr);
1533
	mutex_unlock(&vs->dev.mutex);
1534
	mutex_unlock(&vhost_scsi_mutex);
1535
	return 0;
1536

1537
err_tpg:
1538
	mutex_unlock(&tpg->tv_tpg_mutex);
1539
err_dev:
1540
	mutex_unlock(&vs->dev.mutex);
1541
	mutex_unlock(&vhost_scsi_mutex);
1542
	return ret;
1543 1544
}

1545 1546
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
1547 1548 1549
	struct vhost_virtqueue *vq;
	int i;

1550 1551 1552 1553 1554 1555 1556 1557 1558
	if (features & ~VHOST_SCSI_FEATURES)
		return -EOPNOTSUPP;

	mutex_lock(&vs->dev.mutex);
	if ((features & (1 << VHOST_F_LOG_ALL)) &&
	    !vhost_log_access_ok(&vs->dev)) {
		mutex_unlock(&vs->dev.mutex);
		return -EFAULT;
	}
1559 1560 1561 1562 1563 1564 1565

	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
		vq = &vs->vqs[i].vq;
		mutex_lock(&vq->mutex);
		vq->acked_features = features;
		mutex_unlock(&vq->mutex);
	}
1566 1567 1568 1569
	mutex_unlock(&vs->dev.mutex);
	return 0;
}

1570 1571
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
1572
	struct vhost_scsi *vs;
1573
	struct vhost_virtqueue **vqs;
1574
	int r = -ENOMEM, i;
1575

1576 1577 1578 1579 1580 1581
	vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
	if (!vs) {
		vs = vzalloc(sizeof(*vs));
		if (!vs)
			goto err_vs;
	}
1582

1583
	vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1584 1585
	if (!vqs)
		goto err_vqs;
1586

1587
	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1588
	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1589

1590 1591
	vs->vs_events_nr = 0;
	vs->vs_events_missed = false;
1592

1593 1594 1595 1596
	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1597
	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1598 1599
		vqs[i] = &vs->vqs[i].vq;
		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1600
	}
Z
Zhi Yong Wu 已提交
1601
	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1602

1603
	vhost_scsi_init_inflight(vs, NULL);
1604

1605
	f->private_data = vs;
1606
	return 0;
1607 1608

err_vqs:
1609
	kvfree(vs);
1610 1611
err_vs:
	return r;
1612 1613 1614 1615
}

static int vhost_scsi_release(struct inode *inode, struct file *f)
{
1616
	struct vhost_scsi *vs = f->private_data;
A
Asias He 已提交
1617
	struct vhost_scsi_target t;
1618

1619 1620 1621 1622 1623 1624
	mutex_lock(&vs->dev.mutex);
	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
	mutex_unlock(&vs->dev.mutex);
	vhost_scsi_clear_endpoint(vs, &t);
	vhost_dev_stop(&vs->dev);
	vhost_dev_cleanup(&vs->dev, false);
1625
	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1626 1627
	vhost_scsi_flush(vs);
	kfree(vs->dev.vqs);
1628
	kvfree(vs);
1629 1630 1631
	return 0;
}

1632 1633 1634 1635
static long
vhost_scsi_ioctl(struct file *f,
		 unsigned int ioctl,
		 unsigned long arg)
1636 1637 1638 1639 1640
{
	struct vhost_scsi *vs = f->private_data;
	struct vhost_scsi_target backend;
	void __user *argp = (void __user *)arg;
	u64 __user *featurep = argp;
1641 1642
	u32 __user *eventsp = argp;
	u32 events_missed;
1643
	u64 features;
1644
	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1645
	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1646 1647 1648 1649 1650

	switch (ioctl) {
	case VHOST_SCSI_SET_ENDPOINT:
		if (copy_from_user(&backend, argp, sizeof backend))
			return -EFAULT;
1651 1652
		if (backend.reserved != 0)
			return -EOPNOTSUPP;
1653 1654 1655 1656 1657

		return vhost_scsi_set_endpoint(vs, &backend);
	case VHOST_SCSI_CLEAR_ENDPOINT:
		if (copy_from_user(&backend, argp, sizeof backend))
			return -EFAULT;
1658 1659
		if (backend.reserved != 0)
			return -EOPNOTSUPP;
1660 1661 1662

		return vhost_scsi_clear_endpoint(vs, &backend);
	case VHOST_SCSI_GET_ABI_VERSION:
1663
		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1664 1665
			return -EFAULT;
		return 0;
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	case VHOST_SCSI_SET_EVENTS_MISSED:
		if (get_user(events_missed, eventsp))
			return -EFAULT;
		mutex_lock(&vq->mutex);
		vs->vs_events_missed = events_missed;
		mutex_unlock(&vq->mutex);
		return 0;
	case VHOST_SCSI_GET_EVENTS_MISSED:
		mutex_lock(&vq->mutex);
		events_missed = vs->vs_events_missed;
		mutex_unlock(&vq->mutex);
		if (put_user(events_missed, eventsp))
			return -EFAULT;
		return 0;
1680
	case VHOST_GET_FEATURES:
1681
		features = VHOST_SCSI_FEATURES;
1682 1683 1684 1685 1686 1687 1688 1689 1690
		if (copy_to_user(featurep, &features, sizeof features))
			return -EFAULT;
		return 0;
	case VHOST_SET_FEATURES:
		if (copy_from_user(&features, featurep, sizeof features))
			return -EFAULT;
		return vhost_scsi_set_features(vs, features);
	default:
		mutex_lock(&vs->dev.mutex);
1691 1692 1693 1694
		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
		/* TODO: flush backend after dev ioctl. */
		if (r == -ENOIOCTLCMD)
			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1695 1696 1697 1698 1699
		mutex_unlock(&vs->dev.mutex);
		return r;
	}
}

1700 1701 1702 1703 1704 1705 1706 1707
#ifdef CONFIG_COMPAT
static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
				unsigned long arg)
{
	return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
}
#endif

1708 1709 1710 1711
static const struct file_operations vhost_scsi_fops = {
	.owner          = THIS_MODULE,
	.release        = vhost_scsi_release,
	.unlocked_ioctl = vhost_scsi_ioctl,
1712 1713 1714
#ifdef CONFIG_COMPAT
	.compat_ioctl	= vhost_scsi_compat_ioctl,
#endif
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
	.open           = vhost_scsi_open,
	.llseek		= noop_llseek,
};

static struct miscdevice vhost_scsi_misc = {
	MISC_DYNAMIC_MINOR,
	"vhost-scsi",
	&vhost_scsi_fops,
};

static int __init vhost_scsi_register(void)
{
	return misc_register(&vhost_scsi_misc);
}

static int vhost_scsi_deregister(void)
{
	return misc_deregister(&vhost_scsi_misc);
}

1735
static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
{
	switch (tport->tport_proto_id) {
	case SCSI_PROTOCOL_SAS:
		return "SAS";
	case SCSI_PROTOCOL_FCP:
		return "FCP";
	case SCSI_PROTOCOL_ISCSI:
		return "iSCSI";
	default:
		break;
	}

	return "Unknown";
}

1751
static void
1752
vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1753
		  struct se_lun *lun, bool plug)
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
{

	struct vhost_scsi *vs = tpg->vhost_scsi;
	struct vhost_virtqueue *vq;
	u32 reason;

	if (!vs)
		return;

	mutex_lock(&vs->dev.mutex);

	if (plug)
		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
	else
		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;

1770
	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1771
	mutex_lock(&vq->mutex);
1772
	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1773
		vhost_scsi_send_evt(vs, tpg, lun,
1774
				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1775 1776 1777 1778
	mutex_unlock(&vq->mutex);
	mutex_unlock(&vs->dev.mutex);
}

1779
static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1780
{
1781
	vhost_scsi_do_plug(tpg, lun, true);
1782 1783
}

1784
static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1785
{
1786
	vhost_scsi_do_plug(tpg, lun, false);
1787 1788
}

1789
static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1790
			       struct se_lun *lun)
1791
{
1792 1793
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
1794

1795
	mutex_lock(&vhost_scsi_mutex);
1796

1797 1798 1799
	mutex_lock(&tpg->tv_tpg_mutex);
	tpg->tv_tpg_port_count++;
	mutex_unlock(&tpg->tv_tpg_mutex);
1800

1801
	vhost_scsi_hotplug(tpg, lun);
1802

1803
	mutex_unlock(&vhost_scsi_mutex);
1804

1805 1806 1807
	return 0;
}

1808
static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1809
				  struct se_lun *lun)
1810
{
1811 1812
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
1813

1814
	mutex_lock(&vhost_scsi_mutex);
1815

1816 1817 1818
	mutex_lock(&tpg->tv_tpg_mutex);
	tpg->tv_tpg_port_count--;
	mutex_unlock(&tpg->tv_tpg_mutex);
1819

1820
	vhost_scsi_hotunplug(tpg, lun);
1821

1822
	mutex_unlock(&vhost_scsi_mutex);
1823 1824
}

1825
static struct se_node_acl *
1826
vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
1827 1828
		       struct config_group *group,
		       const char *name)
1829 1830
{
	struct se_node_acl *se_nacl, *se_nacl_new;
1831
	struct vhost_scsi_nacl *nacl;
1832 1833 1834
	u64 wwpn = 0;
	u32 nexus_depth;

1835
	/* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
1836
		return ERR_PTR(-EINVAL); */
1837
	se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
	if (!se_nacl_new)
		return ERR_PTR(-ENOMEM);

	nexus_depth = 1;
	/*
	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
	 * when converting a NodeACL from demo mode -> explict
	 */
	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
				name, nexus_depth);
	if (IS_ERR(se_nacl)) {
1849
		vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
1850 1851 1852
		return se_nacl;
	}
	/*
1853
	 * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
1854
	 */
1855
	nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
1856 1857 1858 1859 1860
	nacl->iport_wwpn = wwpn;

	return se_nacl;
}

1861
static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
1862
{
1863 1864
	struct vhost_scsi_nacl *nacl = container_of(se_acl,
				struct vhost_scsi_nacl, se_node_acl);
1865 1866 1867 1868
	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
	kfree(nacl);
}

1869
static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1870 1871
				       struct se_session *se_sess)
{
1872
	struct vhost_scsi_cmd *tv_cmd;
1873 1874 1875 1876 1877
	unsigned int i;

	if (!se_sess->sess_cmd_map)
		return;

1878 1879
	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1880 1881

		kfree(tv_cmd->tvc_sgl);
1882
		kfree(tv_cmd->tvc_prot_sgl);
1883 1884 1885 1886
		kfree(tv_cmd->tvc_upages);
	}
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type(
	struct se_portal_group *se_tpg,
	const char *page,
	size_t count)
{
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	unsigned long val;
	int ret = kstrtoul(page, 0, &val);

	if (ret) {
		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
		return ret;
	}
	if (val != 0 && val != 1 && val != 3) {
		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
		return -EINVAL;
	}
	tpg->tv_fabric_prot_type = val;

	return count;
}

static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type(
	struct se_portal_group *se_tpg,
	char *page)
{
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);

	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
}
TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR);

static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
	&vhost_scsi_tpg_attrib_fabric_prot_type.attr,
	NULL,
};

1926
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1927
				const char *name)
1928 1929
{
	struct se_portal_group *se_tpg;
1930
	struct se_session *se_sess;
1931 1932
	struct vhost_scsi_nexus *tv_nexus;
	struct vhost_scsi_cmd *tv_cmd;
1933
	unsigned int i;
1934

1935 1936 1937 1938
	mutex_lock(&tpg->tv_tpg_mutex);
	if (tpg->tpg_nexus) {
		mutex_unlock(&tpg->tv_tpg_mutex);
		pr_debug("tpg->tpg_nexus already exists\n");
1939 1940
		return -EEXIST;
	}
1941
	se_tpg = &tpg->se_tpg;
1942

1943
	tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1944
	if (!tv_nexus) {
1945
		mutex_unlock(&tpg->tv_tpg_mutex);
1946
		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1947 1948 1949
		return -ENOMEM;
	}
	/*
1950
	 *  Initialize the struct se_session pointer and setup tagpool
1951
	 *  for struct vhost_scsi_cmd descriptors
1952
	 */
1953
	tv_nexus->tvn_se_sess = transport_init_session_tags(
1954 1955
					VHOST_SCSI_DEFAULT_TAGS,
					sizeof(struct vhost_scsi_cmd),
1956
					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1957
	if (IS_ERR(tv_nexus->tvn_se_sess)) {
1958
		mutex_unlock(&tpg->tv_tpg_mutex);
1959 1960 1961
		kfree(tv_nexus);
		return -ENOMEM;
	}
1962
	se_sess = tv_nexus->tvn_se_sess;
1963 1964
	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1965 1966

		tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1967
					VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1968 1969 1970 1971 1972 1973 1974
		if (!tv_cmd->tvc_sgl) {
			mutex_unlock(&tpg->tv_tpg_mutex);
			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
			goto out;
		}

		tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1975
					VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1976 1977 1978 1979 1980
		if (!tv_cmd->tvc_upages) {
			mutex_unlock(&tpg->tv_tpg_mutex);
			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
			goto out;
		}
1981 1982

		tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1983
					VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1984 1985 1986 1987 1988
		if (!tv_cmd->tvc_prot_sgl) {
			mutex_unlock(&tpg->tv_tpg_mutex);
			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
			goto out;
		}
1989
	}
1990 1991
	/*
	 * Since we are running in 'demo mode' this call with generate a
1992
	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
1993 1994 1995 1996 1997
	 * the SCSI Initiator port name of the passed configfs group 'name'.
	 */
	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
				se_tpg, (unsigned char *)name);
	if (!tv_nexus->tvn_se_sess->se_node_acl) {
1998
		mutex_unlock(&tpg->tv_tpg_mutex);
1999 2000
		pr_debug("core_tpg_check_initiator_node_acl() failed"
				" for %s\n", name);
2001
		goto out;
2002 2003
	}
	/*
2004
	 * Now register the TCM vhost virtual I_T Nexus as active.
2005
	 */
2006
	transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
2007
			tv_nexus->tvn_se_sess, tv_nexus);
2008
	tpg->tpg_nexus = tv_nexus;
2009

2010
	mutex_unlock(&tpg->tv_tpg_mutex);
2011
	return 0;
2012 2013

out:
2014
	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2015 2016 2017
	transport_free_session(se_sess);
	kfree(tv_nexus);
	return -ENOMEM;
2018 2019
}

2020
static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2021 2022
{
	struct se_session *se_sess;
2023
	struct vhost_scsi_nexus *tv_nexus;
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037

	mutex_lock(&tpg->tv_tpg_mutex);
	tv_nexus = tpg->tpg_nexus;
	if (!tv_nexus) {
		mutex_unlock(&tpg->tv_tpg_mutex);
		return -ENODEV;
	}

	se_sess = tv_nexus->tvn_se_sess;
	if (!se_sess) {
		mutex_unlock(&tpg->tv_tpg_mutex);
		return -ENODEV;
	}

2038
	if (tpg->tv_tpg_port_count != 0) {
2039
		mutex_unlock(&tpg->tv_tpg_mutex);
2040
		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2041
			" active TPG port count: %d\n",
2042 2043
			tpg->tv_tpg_port_count);
		return -EBUSY;
2044 2045
	}

2046
	if (tpg->tv_tpg_vhost_count != 0) {
2047
		mutex_unlock(&tpg->tv_tpg_mutex);
2048
		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2049
			" active TPG vhost count: %d\n",
2050 2051
			tpg->tv_tpg_vhost_count);
		return -EBUSY;
2052 2053
	}

2054
	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2055
		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2056
		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2057

2058
	vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
2059
	/*
2060
	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2061 2062 2063 2064 2065 2066 2067 2068 2069
	 */
	transport_deregister_session(tv_nexus->tvn_se_sess);
	tpg->tpg_nexus = NULL;
	mutex_unlock(&tpg->tv_tpg_mutex);

	kfree(tv_nexus);
	return 0;
}

2070
static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
2071
					char *page)
2072
{
2073 2074 2075
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_nexus *tv_nexus;
2076 2077
	ssize_t ret;

2078 2079
	mutex_lock(&tpg->tv_tpg_mutex);
	tv_nexus = tpg->tpg_nexus;
2080
	if (!tv_nexus) {
2081
		mutex_unlock(&tpg->tv_tpg_mutex);
2082 2083 2084 2085
		return -ENODEV;
	}
	ret = snprintf(page, PAGE_SIZE, "%s\n",
			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2086
	mutex_unlock(&tpg->tv_tpg_mutex);
2087 2088 2089 2090

	return ret;
}

2091
static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
2092 2093
					 const char *page,
					 size_t count)
2094
{
2095 2096 2097 2098
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
	struct vhost_scsi_tport *tport_wwn = tpg->tport;
	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2099 2100 2101 2102 2103
	int ret;
	/*
	 * Shutdown the active I_T nexus if 'NULL' is passed..
	 */
	if (!strncmp(page, "NULL", 4)) {
2104
		ret = vhost_scsi_drop_nexus(tpg);
2105 2106 2107 2108
		return (!ret) ? count : ret;
	}
	/*
	 * Otherwise make sure the passed virtual Initiator port WWN matches
2109 2110
	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
	 * vhost_scsi_make_nexus().
2111
	 */
2112
	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2113
		pr_err("Emulated NAA Sas Address: %s, exceeds"
2114
				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2115 2116
		return -EINVAL;
	}
2117
	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2118 2119 2120 2121 2122 2123

	ptr = strstr(i_port, "naa.");
	if (ptr) {
		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
			pr_err("Passed SAS Initiator Port %s does not"
				" match target port protoid: %s\n", i_port,
2124
				vhost_scsi_dump_proto_id(tport_wwn));
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
			return -EINVAL;
		}
		port_ptr = &i_port[0];
		goto check_newline;
	}
	ptr = strstr(i_port, "fc.");
	if (ptr) {
		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
			pr_err("Passed FCP Initiator Port %s does not"
				" match target port protoid: %s\n", i_port,
2135
				vhost_scsi_dump_proto_id(tport_wwn));
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
			return -EINVAL;
		}
		port_ptr = &i_port[3]; /* Skip over "fc." */
		goto check_newline;
	}
	ptr = strstr(i_port, "iqn.");
	if (ptr) {
		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
			pr_err("Passed iSCSI Initiator Port %s does not"
				" match target port protoid: %s\n", i_port,
2146
				vhost_scsi_dump_proto_id(tport_wwn));
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
			return -EINVAL;
		}
		port_ptr = &i_port[0];
		goto check_newline;
	}
	pr_err("Unable to locate prefix for emulated Initiator Port:"
			" %s\n", i_port);
	return -EINVAL;
	/*
	 * Clear any trailing newline for the NAA WWN
	 */
check_newline:
	if (i_port[strlen(i_port)-1] == '\n')
		i_port[strlen(i_port)-1] = '\0';

2162
	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2163 2164 2165 2166 2167 2168
	if (ret < 0)
		return ret;

	return count;
}

2169
TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
2170

2171 2172
static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
	&vhost_scsi_tpg_nexus.attr,
2173 2174 2175
	NULL,
};

2176
static struct se_portal_group *
2177
vhost_scsi_make_tpg(struct se_wwn *wwn,
2178 2179
		   struct config_group *group,
		   const char *name)
2180
{
2181 2182
	struct vhost_scsi_tport *tport = container_of(wwn,
			struct vhost_scsi_tport, tport_wwn);
2183

2184
	struct vhost_scsi_tpg *tpg;
2185
	u16 tpgt;
2186 2187 2188 2189
	int ret;

	if (strstr(name, "tpgt_") != name)
		return ERR_PTR(-EINVAL);
2190
	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2191 2192
		return ERR_PTR(-EINVAL);

2193
	tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2194
	if (!tpg) {
2195
		pr_err("Unable to allocate struct vhost_scsi_tpg");
2196 2197 2198 2199 2200 2201 2202
		return ERR_PTR(-ENOMEM);
	}
	mutex_init(&tpg->tv_tpg_mutex);
	INIT_LIST_HEAD(&tpg->tv_tpg_list);
	tpg->tport = tport;
	tpg->tport_tpgt = tpgt;

2203
	ret = core_tpg_register(&vhost_scsi_ops, wwn,
2204 2205 2206 2207 2208
				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
	if (ret < 0) {
		kfree(tpg);
		return NULL;
	}
2209 2210 2211
	mutex_lock(&vhost_scsi_mutex);
	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
	mutex_unlock(&vhost_scsi_mutex);
2212 2213 2214 2215

	return &tpg->se_tpg;
}

2216
static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2217
{
2218 2219
	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
				struct vhost_scsi_tpg, se_tpg);
2220

2221
	mutex_lock(&vhost_scsi_mutex);
2222
	list_del(&tpg->tv_tpg_list);
2223
	mutex_unlock(&vhost_scsi_mutex);
2224
	/*
2225
	 * Release the virtual I_T Nexus for this vhost TPG
2226
	 */
2227
	vhost_scsi_drop_nexus(tpg);
2228 2229 2230 2231 2232 2233 2234
	/*
	 * Deregister the se_tpg from TCM..
	 */
	core_tpg_deregister(se_tpg);
	kfree(tpg);
}

2235
static struct se_wwn *
2236
vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2237 2238
		     struct config_group *group,
		     const char *name)
2239
{
2240
	struct vhost_scsi_tport *tport;
2241 2242 2243 2244
	char *ptr;
	u64 wwpn = 0;
	int off = 0;

2245
	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2246 2247
		return ERR_PTR(-EINVAL); */

2248
	tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2249
	if (!tport) {
2250
		pr_err("Unable to allocate struct vhost_scsi_tport");
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
		return ERR_PTR(-ENOMEM);
	}
	tport->tport_wwpn = wwpn;
	/*
	 * Determine the emulated Protocol Identifier and Target Port Name
	 * based on the incoming configfs directory name.
	 */
	ptr = strstr(name, "naa.");
	if (ptr) {
		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
		goto check_len;
	}
	ptr = strstr(name, "fc.");
	if (ptr) {
		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
		off = 3; /* Skip over "fc." */
		goto check_len;
	}
	ptr = strstr(name, "iqn.");
	if (ptr) {
		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
		goto check_len;
	}

	pr_err("Unable to locate prefix for emulated Target Port:"
			" %s\n", name);
	kfree(tport);
	return ERR_PTR(-EINVAL);

check_len:
2281
	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2282
		pr_err("Emulated %s Address: %s, exceeds"
2283 2284
			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
			VHOST_SCSI_NAMELEN);
2285 2286 2287
		kfree(tport);
		return ERR_PTR(-EINVAL);
	}
2288
	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2289 2290

	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2291
		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2292 2293 2294 2295

	return &tport->tport_wwn;
}

2296
static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2297
{
2298 2299
	struct vhost_scsi_tport *tport = container_of(wwn,
				struct vhost_scsi_tport, tport_wwn);
2300 2301

	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2302
		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2303 2304 2305 2306 2307
		tport->tport_name);

	kfree(tport);
}

2308
static ssize_t
2309
vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
2310
				char *page)
2311 2312
{
	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2313
		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2314 2315 2316
		utsname()->machine);
}

2317
TF_WWN_ATTR_RO(vhost_scsi, version);
2318

2319 2320
static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
	&vhost_scsi_wwn_version.attr,
2321 2322 2323
	NULL,
};

2324
static struct target_core_fabric_ops vhost_scsi_ops = {
2325 2326
	.module				= THIS_MODULE,
	.name				= "vhost",
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
	.get_fabric_name		= vhost_scsi_get_fabric_name,
	.get_fabric_proto_ident		= vhost_scsi_get_fabric_proto_ident,
	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
	.tpg_get_tag			= vhost_scsi_get_tpgt,
	.tpg_get_default_depth		= vhost_scsi_get_default_depth,
	.tpg_get_pr_transport_id	= vhost_scsi_get_pr_transport_id,
	.tpg_get_pr_transport_id_len	= vhost_scsi_get_pr_transport_id_len,
	.tpg_parse_pr_out_transport_id	= vhost_scsi_parse_pr_out_transport_id,
	.tpg_check_demo_mode		= vhost_scsi_check_true,
	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2339
	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2340 2341 2342 2343
	.tpg_alloc_fabric_acl		= vhost_scsi_alloc_fabric_acl,
	.tpg_release_fabric_acl		= vhost_scsi_release_fabric_acl,
	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
	.release_cmd			= vhost_scsi_release_cmd,
2344
	.check_stop_free		= vhost_scsi_check_stop_free,
2345 2346 2347
	.shutdown_session		= vhost_scsi_shutdown_session,
	.close_session			= vhost_scsi_close_session,
	.sess_get_index			= vhost_scsi_sess_get_index,
2348
	.sess_get_initiator_sid		= NULL,
2349 2350 2351 2352 2353 2354 2355 2356 2357
	.write_pending			= vhost_scsi_write_pending,
	.write_pending_status		= vhost_scsi_write_pending_status,
	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
	.get_task_tag			= vhost_scsi_get_task_tag,
	.get_cmd_state			= vhost_scsi_get_cmd_state,
	.queue_data_in			= vhost_scsi_queue_data_in,
	.queue_status			= vhost_scsi_queue_status,
	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
	.aborted_task			= vhost_scsi_aborted_task,
2358 2359 2360
	/*
	 * Setup callers for generic logic in target_core_fabric_configfs.c
	 */
2361 2362 2363 2364 2365 2366
	.fabric_make_wwn		= vhost_scsi_make_tport,
	.fabric_drop_wwn		= vhost_scsi_drop_tport,
	.fabric_make_tpg		= vhost_scsi_make_tpg,
	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
	.fabric_post_link		= vhost_scsi_port_link,
	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2367 2368
	.fabric_make_np			= NULL,
	.fabric_drop_np			= NULL,
2369 2370
	.fabric_make_nodeacl		= vhost_scsi_make_nodeacl,
	.fabric_drop_nodeacl		= vhost_scsi_drop_nodeacl,
2371 2372 2373 2374

	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2375 2376
};

2377
static int __init vhost_scsi_init(void)
2378
{
2379
	int ret = -ENOMEM;
2380

2381
	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2382
		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2383 2384
		utsname()->machine);

2385 2386 2387 2388
	/*
	 * Use our own dedicated workqueue for submitting I/O into
	 * target core to avoid contention within system_wq.
	 */
2389 2390
	vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
	if (!vhost_scsi_workqueue)
2391 2392 2393 2394 2395 2396
		goto out;

	ret = vhost_scsi_register();
	if (ret < 0)
		goto out_destroy_workqueue;

2397
	ret = target_register_template(&vhost_scsi_ops);
2398 2399 2400 2401 2402 2403 2404 2405
	if (ret < 0)
		goto out_vhost_scsi_deregister;

	return 0;

out_vhost_scsi_deregister:
	vhost_scsi_deregister();
out_destroy_workqueue:
2406
	destroy_workqueue(vhost_scsi_workqueue);
2407 2408 2409 2410
out:
	return ret;
};

2411
static void vhost_scsi_exit(void)
2412
{
2413
	target_unregister_template(&vhost_scsi_ops);
2414
	vhost_scsi_deregister();
2415
	destroy_workqueue(vhost_scsi_workqueue);
2416 2417
};

M
Michael S. Tsirkin 已提交
2418 2419
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
MODULE_ALIAS("tcm_vhost");
2420
MODULE_LICENSE("GPL");
2421 2422
module_init(vhost_scsi_init);
module_exit(vhost_scsi_exit);