f_mass_storage.c 96.9 KB
Newer Older
1
/*
2
 * f_mass_storage.c -- Mass Storage USB Composite Function
3 4
 *
 * Copyright (C) 2003-2008 Alan Stern
5
 * Copyright (C) 2009 Samsung Electronics
6
 *                    Author: Michal Nazarewicz <mina86@mina86.com>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions, and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. The names of the above-listed copyright holders may not be used
 *    to endorse or promote products derived from this software without
 *    specific prior written permission.
 *
 * ALTERNATIVELY, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") as published by the Free Software
 * Foundation, either version 2 of that License or (at your option) any
 * later version.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
41 42 43 44 45
 * The Mass Storage Function acts as a USB Mass Storage device,
 * appearing to the host as a disk drive or as a CD-ROM drive.  In
 * addition to providing an example of a genuinely useful composite
 * function for a USB device, it also illustrates a technique of
 * double-buffering for increased throughput.
46
 *
47 48 49 50 51 52
 * For more information about MSF and in particular its module
 * parameters and sysfs interface read the
 * <Documentation/usb/mass-storage.txt> file.
 */

/*
53 54 55 56
 * MSF is configured by specifying a fsg_config structure.  It has the
 * following fields:
 *
 *	nluns		Number of LUNs function have (anywhere from 1
57
 *				to FSG_MAX_LUNS).
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
 *	luns		An array of LUN configuration values.  This
 *				should be filled for each LUN that
 *				function will include (ie. for "nluns"
 *				LUNs).  Each element of the array has
 *				the following fields:
 *	->filename	The path to the backing file for the LUN.
 *				Required if LUN is not marked as
 *				removable.
 *	->ro		Flag specifying access to the LUN shall be
 *				read-only.  This is implied if CD-ROM
 *				emulation is enabled as well as when
 *				it was impossible to open "filename"
 *				in R/W mode.
 *	->removable	Flag specifying that LUN shall be indicated as
 *				being removable.
 *	->cdrom		Flag specifying that LUN shall be reported as
 *				being a CD-ROM.
75 76
 *	->nofua		Flag specifying that FUA flag in SCSI WRITE(10,12)
 *				commands for this LUN shall be ignored.
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
 *
 *	vendor_name
 *	product_name
 *	release		Information used as a reply to INQUIRY
 *				request.  To use default set to NULL,
 *				NULL, 0xffff respectively.  The first
 *				field should be 8 and the second 16
 *				characters or less.
 *
 *	can_stall	Set to permit function to halt bulk endpoints.
 *				Disabled on some USB devices known not
 *				to work correctly.  You should set it
 *				to true.
 *
 * If "removable" is not set for a LUN then a backing file must be
 * specified.  If it is set, then NULL filename means the LUN's medium
 * is not loaded (an empty string as "filename" in the fsg_config
 * structure causes error).  The CD-ROM emulation includes a single
 * data track and no audio tracks; hence there need be only one
96
 * backing file per LUN.
97 98 99 100 101 102 103 104 105 106 107
 *
 * This function is heavily based on "File-backed Storage Gadget" by
 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
 * Brownell.  The driver's SCSI command interface was based on the
 * "Information technology - Small Computer System Interface - 2"
 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
 * was based on the "Universal Serial Bus Mass Storage Class UFI
 * Command Specification" document, Revision 1.0, December 14, 1998,
 * available at
108 109 110 111 112 113
 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
 */

/*
 *				Driver Design
 *
114
 * The MSF is fairly straightforward.  There is a main kernel
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
 * thread that handles most of the work.  Interrupt routines field
 * callbacks from the controller driver: bulk- and interrupt-request
 * completion notifications, endpoint-0 events, and disconnect events.
 * Completion events are passed to the main thread by wakeup calls.  Many
 * ep0 requests are handled at interrupt time, but SetInterface,
 * SetConfiguration, and device reset requests are forwarded to the
 * thread in the form of "exceptions" using SIGUSR1 signals (since they
 * should interrupt any ongoing file I/O operations).
 *
 * The thread's main routine implements the standard command/data/status
 * parts of a SCSI interaction.  It and its subroutines are full of tests
 * for pending signals/exceptions -- all this polling is necessary since
 * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
 * indication that the driver really wants to be running in userspace.)
 * An important point is that so long as the thread is alive it keeps an
 * open reference to the backing file.  This will prevent unmounting
 * the backing file's underlying filesystem and could cause problems
 * during system shutdown, for example.  To prevent such problems, the
 * thread catches INT, TERM, and KILL signals and converts them into
 * an EXIT exception.
 *
 * In normal operation the main thread is started during the gadget's
137 138
 * fsg_bind() callback and stopped during fsg_unbind().  But it can
 * also exit when it receives a signal, and there's no point leaving
139
 * the gadget running when the thread is dead.  As of this moment, MSF
140 141
 * provides no way to deregister the gadget when thread dies -- maybe
 * a callback functions is needed.
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
 *
 * To provide maximum throughput, the driver uses a circular pipeline of
 * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
 * arbitrarily long; in practice the benefits don't justify having more
 * than 2 stages (i.e., double buffering).  But it helps to think of the
 * pipeline as being a long one.  Each buffer head contains a bulk-in and
 * a bulk-out request pointer (since the buffer can be used for both
 * output and input -- directions always are given from the host's
 * point of view) as well as a pointer to the buffer and various state
 * variables.
 *
 * Use of the pipeline follows a simple protocol.  There is a variable
 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
 * At any time that buffer head may still be in use from an earlier
 * request, so each buffer head has a state variable indicating whether
 * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
 * buffer head to be EMPTY, filling the buffer either by file I/O or by
 * USB I/O (during which the buffer head is BUSY), and marking the buffer
 * head FULL when the I/O is complete.  Then the buffer will be emptied
 * (again possibly by USB I/O, during which it is marked BUSY) and
 * finally marked EMPTY again (possibly by a completion routine).
 *
 * A module parameter tells the driver to avoid stalling the bulk
 * endpoints wherever the transport specification allows.  This is
 * necessary for some UDCs like the SuperH, which cannot reliably clear a
 * halt on a bulk endpoint.  However, under certain circumstances the
 * Bulk-only specification requires a stall.  In such cases the driver
 * will halt the endpoint and set a flag indicating that it should clear
 * the halt in software during the next device reset.  Hopefully this
 * will permit everything to work correctly.  Furthermore, although the
 * specification allows the bulk-out endpoint to halt when the host sends
 * too much data, implementing this would cause an unavoidable race.
 * The driver will always use the "no-stall" approach for OUT transfers.
 *
 * One subtle point concerns sending status-stage responses for ep0
 * requests.  Some of these requests, such as device reset, can involve
 * interrupting an ongoing file I/O operation, which might take an
 * arbitrarily long time.  During that delay the host might give up on
 * the original ep0 request and issue a new one.  When that happens the
 * driver should not notify the host about completion of the original
 * request, as the host will no longer be waiting for it.  So the driver
 * assigns to each ep0 request a unique tag, and it keeps track of the
 * tag value of the request associated with a long-running exception
 * (device-reset, interface-change, or configuration-change).  When the
 * exception handler is finished, the status-stage response is submitted
 * only if the current ep0 request tag is equal to the exception request
 * tag.  Thus only the most recently received ep0 request will get a
 * status-stage response.
 *
 * Warning: This driver source file is too long.  It ought to be split up
 * into a header file plus about 3 separate .c files, to handle the details
 * of the Gadget, USB Mass Storage, and SCSI protocols.
 */


/* #define VERBOSE_DEBUG */
/* #define DUMP_MSGS */

#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/dcache.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/limits.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/freezer.h>
216
#include <linux/module.h>
217 218 219

#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
220
#include <linux/usb/composite.h>
221 222

#include "gadget_chips.h"
223
#include "configfs.h"
224 225


226
/*------------------------------------------------------------------------*/
227

228
#define FSG_DRIVER_DESC		"Mass Storage Function"
229
#define FSG_DRIVER_VERSION	"2009/09/11"
230 231 232

static const char fsg_string_interface[] = "Mass Storage";

233
#include "storage_common.h"
234
#include "f_mass_storage.h"
235

236 237 238 239 240 241 242 243 244 245
/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
static struct usb_string		fsg_strings[] = {
	{FSG_STRING_INTERFACE,		fsg_string_interface},
	{}
};

static struct usb_gadget_strings	fsg_stringtab = {
	.language	= 0x0409,		/* en-us */
	.strings	= fsg_strings,
};
246

247 248 249 250 251
static struct usb_gadget_strings *fsg_strings_array[] = {
	&fsg_stringtab,
	NULL,
};

252 253
/*-------------------------------------------------------------------------*/

254
struct fsg_dev;
255 256
struct fsg_common;

257 258
/* Data shared by all the FSG instances. */
struct fsg_common {
259
	struct usb_gadget	*gadget;
260
	struct usb_composite_dev *cdev;
261 262
	struct fsg_dev		*fsg, *new_fsg;
	wait_queue_head_t	fsg_wait;
263

264 265 266
	/* filesem protects: backing files in use */
	struct rw_semaphore	filesem;

267 268 269 270 271 272 273
	/* lock protects: state, all the req_busy's */
	spinlock_t		lock;

	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
	struct usb_request	*ep0req;	/* Copy of cdev->req */
	unsigned int		ep0_req_tag;

274 275
	struct fsg_buffhd	*next_buffhd_to_fill;
	struct fsg_buffhd	*next_buffhd_to_drain;
276
	struct fsg_buffhd	*buffhds;
277
	unsigned int		fsg_num_buffers;
278 279 280 281 282 283

	int			cmnd_size;
	u8			cmnd[MAX_COMMAND_SIZE];

	unsigned int		nluns;
	unsigned int		lun;
284
	struct fsg_lun		**luns;
285
	struct fsg_lun		*curlun;
286

287 288 289 290 291 292 293 294 295 296 297
	unsigned int		bulk_out_maxpacket;
	enum fsg_state		state;		/* For exception handling */
	unsigned int		exception_req_tag;

	enum data_direction	data_dir;
	u32			data_size;
	u32			data_size_from_cmnd;
	u32			tag;
	u32			residue;
	u32			usb_amount_left;

298
	unsigned int		can_stall:1;
299
	unsigned int		free_storage_on_release:1;
300 301 302 303
	unsigned int		phase_error:1;
	unsigned int		short_packet_received:1;
	unsigned int		bad_lun_okay:1;
	unsigned int		running:1;
304
	unsigned int		sysfs:1;
305

306 307 308
	int			thread_wakeup_needed;
	struct completion	thread_notifier;
	struct task_struct	*thread_task;
309

310 311
	/* Callback functions. */
	const struct fsg_operations	*ops;
312 313 314
	/* Gadget's private data. */
	void			*private_data;

315 316 317 318
	/*
	 * Vendor (8 chars), product (16 chars), release (4
	 * hexadecimal digits) and NUL byte
	 */
319 320
	char inquiry_string[8 + 16 + 4 + 1];

321
	struct kref		ref;
322 323
};

324
struct fsg_dev {
325 326
	struct usb_function	function;
	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
327 328
	struct fsg_common	*common;

329 330
	u16			interface_number;

331 332
	unsigned int		bulk_in_enabled:1;
	unsigned int		bulk_out_enabled:1;
333 334

	unsigned long		atomic_bitflags;
335
#define IGNORE_BULK_OUT		0
336 337 338

	struct usb_ep		*bulk_in;
	struct usb_ep		*bulk_out;
339
};
340

341 342 343 344 345 346
static inline int __fsg_is_set(struct fsg_common *common,
			       const char *func, unsigned line)
{
	if (common->fsg)
		return 1;
	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
347
	WARN_ON(1);
348 349 350 351
	return 0;
}

#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
352

353 354 355 356 357
static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
{
	return container_of(f, struct fsg_dev, function);
}

358 359
typedef void (*fsg_routine_t)(struct fsg_dev *);

360
static int exception_in_progress(struct fsg_common *common)
361
{
362
	return common->state > FSG_STATE_IDLE;
363 364
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378
/* Make bulk-out requests be divisible by the maxpacket size */
static void set_bulk_out_req_length(struct fsg_common *common,
				    struct fsg_buffhd *bh, unsigned int length)
{
	unsigned int	rem;

	bh->bulk_out_intended_length = length;
	rem = length % common->bulk_out_maxpacket;
	if (rem > 0)
		length += common->bulk_out_maxpacket - rem;
	bh->outreq->length = length;
}


379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
/*-------------------------------------------------------------------------*/

static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
{
	const char	*name;

	if (ep == fsg->bulk_in)
		name = "bulk-in";
	else if (ep == fsg->bulk_out)
		name = "bulk-out";
	else
		name = ep->name;
	DBG(fsg, "%s set halt\n", name);
	return usb_ep_set_halt(ep);
}


/*-------------------------------------------------------------------------*/

/* These routines may be called in process context or in_irq */

/* Caller must hold fsg->lock */
401
static void wakeup_thread(struct fsg_common *common)
402
{
403
	smp_wmb();	/* ensure the write of bh->state is complete */
404
	/* Tell the main thread that something has happened */
405 406 407
	common->thread_wakeup_needed = 1;
	if (common->thread_task)
		wake_up_process(common->thread_task);
408 409
}

410
static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
411 412 413
{
	unsigned long		flags;

414 415
	/*
	 * Do nothing if a higher-priority exception is already in progress.
416
	 * If a lower-or-equal priority exception is in progress, preempt it
417 418
	 * and notify the main thread by sending it a signal.
	 */
419 420 421 422 423
	spin_lock_irqsave(&common->lock, flags);
	if (common->state <= new_state) {
		common->exception_req_tag = common->ep0_req_tag;
		common->state = new_state;
		if (common->thread_task)
424
			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
425
				      common->thread_task);
426
	}
427
	spin_unlock_irqrestore(&common->lock, flags);
428 429 430 431 432
}


/*-------------------------------------------------------------------------*/

433
static int ep0_queue(struct fsg_common *common)
434 435 436
{
	int	rc;

437 438
	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
	common->ep0->driver_data = common;
439 440
	if (rc != 0 && rc != -ESHUTDOWN) {
		/* We can't do much more than wait for a reset */
441 442
		WARNING(common, "error in submission: %s --> %d\n",
			common->ep0->name, rc);
443 444 445 446
	}
	return rc;
}

447

448 449
/*-------------------------------------------------------------------------*/

450
/* Completion handlers. These always run in_irq. */
451 452 453

static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
{
454
	struct fsg_common	*common = ep->driver_data;
455 456 457
	struct fsg_buffhd	*bh = req->context;

	if (req->status || req->actual != req->length)
458
		DBG(common, "%s --> %d, %u/%u\n", __func__,
459
		    req->status, req->actual, req->length);
460
	if (req->status == -ECONNRESET)		/* Request was cancelled */
461 462 463 464
		usb_ep_fifo_flush(ep);

	/* Hold the lock while we update the request and buffer states */
	smp_wmb();
465
	spin_lock(&common->lock);
466 467
	bh->inreq_busy = 0;
	bh->state = BUF_STATE_EMPTY;
468 469
	wakeup_thread(common);
	spin_unlock(&common->lock);
470 471 472 473
}

static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
{
474
	struct fsg_common	*common = ep->driver_data;
475 476
	struct fsg_buffhd	*bh = req->context;

477
	dump_msg(common, "bulk-out", req->buf, req->actual);
478
	if (req->status || req->actual != bh->bulk_out_intended_length)
479
		DBG(common, "%s --> %d, %u/%u\n", __func__,
480
		    req->status, req->actual, bh->bulk_out_intended_length);
481
	if (req->status == -ECONNRESET)		/* Request was cancelled */
482 483 484 485
		usb_ep_fifo_flush(ep);

	/* Hold the lock while we update the request and buffer states */
	smp_wmb();
486
	spin_lock(&common->lock);
487 488
	bh->outreq_busy = 0;
	bh->state = BUF_STATE_FULL;
489 490
	wakeup_thread(common);
	spin_unlock(&common->lock);
491 492
}

493
static int fsg_setup(struct usb_function *f,
494
		     const struct usb_ctrlrequest *ctrl)
495
{
496
	struct fsg_dev		*fsg = fsg_from_func(f);
497
	struct usb_request	*req = fsg->common->ep0req;
498
	u16			w_index = le16_to_cpu(ctrl->wIndex);
499
	u16			w_value = le16_to_cpu(ctrl->wValue);
500 501
	u16			w_length = le16_to_cpu(ctrl->wLength);

502
	if (!fsg_is_set(fsg->common))
503
		return -EOPNOTSUPP;
504

505 506 507 508 509
	++fsg->common->ep0_req_tag;	/* Record arrival of a new request */
	req->context = NULL;
	req->length = 0;
	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));

510
	switch (ctrl->bRequest) {
511

512
	case US_BULK_RESET_REQUEST:
513 514
		if (ctrl->bRequestType !=
		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
515
			break;
516 517
		if (w_index != fsg->interface_number || w_value != 0 ||
				w_length != 0)
518
			return -EDOM;
519

520 521 522 523
		/*
		 * Raise an exception to stop the current operation
		 * and reinitialize our state.
		 */
524
		DBG(fsg, "bulk reset request\n");
525
		raise_exception(fsg->common, FSG_STATE_RESET);
526
		return USB_GADGET_DELAYED_STATUS;
527

528
	case US_BULK_GET_MAX_LUN:
529 530
		if (ctrl->bRequestType !=
		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
531
			break;
532 533
		if (w_index != fsg->interface_number || w_value != 0 ||
				w_length != 1)
534 535
			return -EDOM;
		VDBG(fsg, "get max LUN\n");
536
		*(u8 *)req->buf = fsg->common->nluns - 1;
537 538

		/* Respond with data/status */
539
		req->length = min((u16)1, w_length);
540
		return ep0_queue(fsg->common);
541 542 543
	}

	VDBG(fsg,
544
	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
545 546 547
	     ctrl->bRequestType, ctrl->bRequest,
	     le16_to_cpu(ctrl->wValue), w_index, w_length);
	return -EOPNOTSUPP;
548 549 550 551 552 553 554 555 556
}


/*-------------------------------------------------------------------------*/

/* All the following routines run in process context */

/* Use this for bulk or interrupt transfers, not ep0 */
static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
557 558
			   struct usb_request *req, int *pbusy,
			   enum fsg_buffer_state *state)
559 560 561 562 563 564
{
	int	rc;

	if (ep == fsg->bulk_in)
		dump_msg(fsg, "bulk-in", req->buf, req->length);

565
	spin_lock_irq(&fsg->common->lock);
566 567
	*pbusy = 1;
	*state = BUF_STATE_BUSY;
568
	spin_unlock_irq(&fsg->common->lock);
569

570
	rc = usb_ep_queue(ep, req, GFP_KERNEL);
571 572
	if (rc == 0)
		return;  /* All good, we're done */
573

574 575
	*pbusy = 0;
	*state = BUF_STATE_EMPTY;
576

577 578 579 580 581 582 583 584
	/* We can't do much more than wait for a reset */

	/*
	 * Note: currently the net2280 driver fails zero-length
	 * submissions if DMA is enabled.
	 */
	if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0))
		WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc);
585 586
}

587 588 589 590 591 592 593 594
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
	if (!fsg_is_set(common))
		return false;
	start_transfer(common->fsg, common->fsg->bulk_in,
		       bh->inreq, &bh->inreq_busy, &bh->state);
	return true;
}
595

596 597 598 599 600 601 602 603
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
	if (!fsg_is_set(common))
		return false;
	start_transfer(common->fsg, common->fsg->bulk_out,
		       bh->outreq, &bh->outreq_busy, &bh->state);
	return true;
}
604

605
static int sleep_thread(struct fsg_common *common, bool can_freeze)
606 607 608 609 610
{
	int	rc = 0;

	/* Wait until a signal arrives or we are woken up */
	for (;;) {
611 612
		if (can_freeze)
			try_to_freeze();
613 614 615 616 617
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
618
		if (common->thread_wakeup_needed)
619 620 621 622
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);
623
	common->thread_wakeup_needed = 0;
624
	smp_rmb();	/* ensure the latest bh->state is visible */
625 626 627 628 629 630
	return rc;
}


/*-------------------------------------------------------------------------*/

631
static int do_read(struct fsg_common *common)
632
{
633
	struct fsg_lun		*curlun = common->curlun;
634 635 636 637 638 639 640 641
	u32			lba;
	struct fsg_buffhd	*bh;
	int			rc;
	u32			amount_left;
	loff_t			file_offset, file_offset_tmp;
	unsigned int		amount;
	ssize_t			nread;

642 643 644 645
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big.
	 */
646
	if (common->cmnd[0] == READ_6)
647
		lba = get_unaligned_be24(&common->cmnd[1]);
648
	else {
649
		lba = get_unaligned_be32(&common->cmnd[2]);
650

651 652
		/*
		 * We allow DPO (Disable Page Out = don't save data in the
653
		 * cache) and FUA (Force Unit Access = don't read from the
654 655
		 * cache), but we don't implement them.
		 */
656
		if ((common->cmnd[1] & ~0x18) != 0) {
657 658 659 660 661 662 663 664
			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}
665
	file_offset = ((loff_t) lba) << curlun->blkbits;
666 667

	/* Carry out the file reads */
668
	amount_left = common->data_size_from_cmnd;
669
	if (unlikely(amount_left == 0))
670
		return -EIO;		/* No default reply */
671 672

	for (;;) {
673 674
		/*
		 * Figure out how much we need to read:
675 676 677
		 * Try to read the remaining amount.
		 * But don't read more than the buffer size.
		 * And don't try to read past the end of the file.
678
		 */
679
		amount = min(amount_left, FSG_BUFLEN);
680 681
		amount = min((loff_t)amount,
			     curlun->file_length - file_offset);
682 683

		/* Wait for the next buffer to become available */
684
		bh = common->next_buffhd_to_fill;
685
		while (bh->state != BUF_STATE_EMPTY) {
686
			rc = sleep_thread(common, false);
687 688 689 690
			if (rc)
				return rc;
		}

691 692 693 694
		/*
		 * If we were asked to read past the end of file,
		 * end with an empty buffer.
		 */
695 696 697
		if (amount == 0) {
			curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
698 699
			curlun->sense_data_info =
					file_offset >> curlun->blkbits;
700 701 702 703 704 705 706 707 708
			curlun->info_valid = 1;
			bh->inreq->length = 0;
			bh->state = BUF_STATE_FULL;
			break;
		}

		/* Perform the read */
		file_offset_tmp = file_offset;
		nread = vfs_read(curlun->filp,
709 710
				 (char __user *)bh->buf,
				 amount, &file_offset_tmp);
711
		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
712
		      (unsigned long long)file_offset, (int)nread);
713 714 715 716
		if (signal_pending(current))
			return -EINTR;

		if (nread < 0) {
717
			LDBG(curlun, "error in file read: %d\n", (int)nread);
718 719 720
			nread = 0;
		} else if (nread < amount) {
			LDBG(curlun, "partial file read: %d/%u\n",
721
			     (int)nread, amount);
722
			nread = round_down(nread, curlun->blksize);
723 724 725
		}
		file_offset  += nread;
		amount_left  -= nread;
726
		common->residue -= nread;
727 728 729 730 731 732

		/*
		 * Except at the end of the transfer, nread will be
		 * equal to the buffer size, which is divisible by the
		 * bulk-in maxpacket size.
		 */
733 734 735 736 737 738
		bh->inreq->length = nread;
		bh->state = BUF_STATE_FULL;

		/* If an error occurred, report it and its position */
		if (nread < amount) {
			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
739 740
			curlun->sense_data_info =
					file_offset >> curlun->blkbits;
741 742 743 744 745
			curlun->info_valid = 1;
			break;
		}

		if (amount_left == 0)
746
			break;		/* No more left to read */
747 748 749

		/* Send this buffer and go read some more */
		bh->inreq->zero = 0;
750 751
		if (!start_in_transfer(common, bh))
			/* Don't know what to do if common->fsg is NULL */
752 753
			return -EIO;
		common->next_buffhd_to_fill = bh->next;
754 755
	}

756
	return -EIO;		/* No default reply */
757 758 759 760 761
}


/*-------------------------------------------------------------------------*/

762
static int do_write(struct fsg_common *common)
763
{
764
	struct fsg_lun		*curlun = common->curlun;
765 766 767 768 769 770 771 772 773 774 775 776 777 778
	u32			lba;
	struct fsg_buffhd	*bh;
	int			get_some_more;
	u32			amount_left_to_req, amount_left_to_write;
	loff_t			usb_offset, file_offset, file_offset_tmp;
	unsigned int		amount;
	ssize_t			nwritten;
	int			rc;

	if (curlun->ro) {
		curlun->sense_data = SS_WRITE_PROTECTED;
		return -EINVAL;
	}
	spin_lock(&curlun->filp->f_lock);
779
	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
780 781
	spin_unlock(&curlun->filp->f_lock);

782 783 784 785
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big
	 */
786
	if (common->cmnd[0] == WRITE_6)
787
		lba = get_unaligned_be24(&common->cmnd[1]);
788
	else {
789
		lba = get_unaligned_be32(&common->cmnd[2]);
790

791 792
		/*
		 * We allow DPO (Disable Page Out = don't save data in the
793 794
		 * cache) and FUA (Force Unit Access = write directly to the
		 * medium).  We don't implement DPO; we implement FUA by
795 796
		 * performing synchronous output.
		 */
797
		if (common->cmnd[1] & ~0x18) {
798 799 800
			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
801
		if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
802 803 804 805 806 807 808 809 810 811 812 813
			spin_lock(&curlun->filp->f_lock);
			curlun->filp->f_flags |= O_SYNC;
			spin_unlock(&curlun->filp->f_lock);
		}
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

	/* Carry out the file writes */
	get_some_more = 1;
814
	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
815 816
	amount_left_to_req = common->data_size_from_cmnd;
	amount_left_to_write = common->data_size_from_cmnd;
817 818 819 820

	while (amount_left_to_write > 0) {

		/* Queue a request for more data from the host */
821
		bh = common->next_buffhd_to_fill;
822 823
		if (bh->state == BUF_STATE_EMPTY && get_some_more) {

824 825
			/*
			 * Figure out how much we want to get:
826 827
			 * Try to get the remaining amount,
			 * but not more than the buffer size.
828
			 */
829
			amount = min(amount_left_to_req, FSG_BUFLEN);
830 831 832

			/* Beyond the end of the backing file? */
			if (usb_offset >= curlun->file_length) {
833 834 835
				get_some_more = 0;
				curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
836 837
				curlun->sense_data_info =
					usb_offset >> curlun->blkbits;
838 839 840 841 842 843
				curlun->info_valid = 1;
				continue;
			}

			/* Get the next buffer */
			usb_offset += amount;
844
			common->usb_amount_left -= amount;
845 846 847 848
			amount_left_to_req -= amount;
			if (amount_left_to_req == 0)
				get_some_more = 0;

849
			/*
850 851 852
			 * Except at the end of the transfer, amount will be
			 * equal to the buffer size, which is divisible by
			 * the bulk-out maxpacket size.
853
			 */
854
			set_bulk_out_req_length(common, bh, amount);
855
			if (!start_out_transfer(common, bh))
856
				/* Dunno what to do if common->fsg is NULL */
857 858
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
859 860 861 862
			continue;
		}

		/* Write the received data to the backing file */
863
		bh = common->next_buffhd_to_drain;
864
		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
865
			break;			/* We stopped early */
866 867
		if (bh->state == BUF_STATE_FULL) {
			smp_rmb();
868
			common->next_buffhd_to_drain = bh->next;
869 870 871 872 873
			bh->state = BUF_STATE_EMPTY;

			/* Did something go wrong with the transfer? */
			if (bh->outreq->status != 0) {
				curlun->sense_data = SS_COMMUNICATION_FAILURE;
874 875
				curlun->sense_data_info =
					file_offset >> curlun->blkbits;
876 877 878 879 880 881 882
				curlun->info_valid = 1;
				break;
			}

			amount = bh->outreq->actual;
			if (curlun->file_length - file_offset < amount) {
				LERROR(curlun,
883 884 885
				       "write %u @ %llu beyond end %llu\n",
				       amount, (unsigned long long)file_offset,
				       (unsigned long long)curlun->file_length);
886 887 888
				amount = curlun->file_length - file_offset;
			}

889 890 891 892 893
			/* Don't accept excess data.  The spec doesn't say
			 * what to do in this case.  We'll ignore the error.
			 */
			amount = min(amount, bh->bulk_out_intended_length);

894 895 896 897 898
			/* Don't write a partial block */
			amount = round_down(amount, curlun->blksize);
			if (amount == 0)
				goto empty_write;

899 900 901
			/* Perform the write */
			file_offset_tmp = file_offset;
			nwritten = vfs_write(curlun->filp,
902 903
					     (char __user *)bh->buf,
					     amount, &file_offset_tmp);
904
			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
905
			      (unsigned long long)file_offset, (int)nwritten);
906
			if (signal_pending(current))
907
				return -EINTR;		/* Interrupted! */
908 909 910

			if (nwritten < 0) {
				LDBG(curlun, "error in file write: %d\n",
911
				     (int)nwritten);
912 913 914
				nwritten = 0;
			} else if (nwritten < amount) {
				LDBG(curlun, "partial file write: %d/%u\n",
915
				     (int)nwritten, amount);
916
				nwritten = round_down(nwritten, curlun->blksize);
917 918 919
			}
			file_offset += nwritten;
			amount_left_to_write -= nwritten;
920
			common->residue -= nwritten;
921 922 923 924

			/* If an error occurred, report it and its position */
			if (nwritten < amount) {
				curlun->sense_data = SS_WRITE_ERROR;
925 926
				curlun->sense_data_info =
					file_offset >> curlun->blkbits;
927 928 929 930
				curlun->info_valid = 1;
				break;
			}

931
 empty_write:
932
			/* Did the host decide to stop early? */
933
			if (bh->outreq->actual < bh->bulk_out_intended_length) {
934
				common->short_packet_received = 1;
935 936 937 938 939 940
				break;
			}
			continue;
		}

		/* Wait for something to happen */
941
		rc = sleep_thread(common, false);
942 943 944 945
		if (rc)
			return rc;
	}

946
	return -EIO;		/* No default reply */
947 948 949 950 951
}


/*-------------------------------------------------------------------------*/

952
static int do_synchronize_cache(struct fsg_common *common)
953
{
954
	struct fsg_lun	*curlun = common->curlun;
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	int		rc;

	/* We ignore the requested LBA and write out all file's
	 * dirty data buffers. */
	rc = fsg_lun_fsync_sub(curlun);
	if (rc)
		curlun->sense_data = SS_WRITE_ERROR;
	return 0;
}


/*-------------------------------------------------------------------------*/

static void invalidate_sub(struct fsg_lun *curlun)
{
	struct file	*filp = curlun->filp;
A
Al Viro 已提交
971
	struct inode	*inode = file_inode(filp);
972 973 974
	unsigned long	rc;

	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
975
	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
976 977
}

978
static int do_verify(struct fsg_common *common)
979
{
980
	struct fsg_lun		*curlun = common->curlun;
981 982
	u32			lba;
	u32			verification_length;
983
	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
984 985 986 987 988
	loff_t			file_offset, file_offset_tmp;
	u32			amount_left;
	unsigned int		amount;
	ssize_t			nread;

989 990 991 992
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big.
	 */
993
	lba = get_unaligned_be32(&common->cmnd[2]);
994 995 996 997 998
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

999 1000 1001 1002
	/*
	 * We allow DPO (Disable Page Out = don't save data in the
	 * cache) but we don't implement it.
	 */
1003
	if (common->cmnd[1] & ~0x10) {
1004 1005 1006 1007
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

1008
	verification_length = get_unaligned_be16(&common->cmnd[7]);
1009
	if (unlikely(verification_length == 0))
1010
		return -EIO;		/* No default reply */
1011 1012

	/* Prepare to carry out the file verify */
1013 1014
	amount_left = verification_length << curlun->blkbits;
	file_offset = ((loff_t) lba) << curlun->blkbits;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

	/* Write out all the dirty buffers before invalidating them */
	fsg_lun_fsync_sub(curlun);
	if (signal_pending(current))
		return -EINTR;

	invalidate_sub(curlun);
	if (signal_pending(current))
		return -EINTR;

	/* Just try to read the requested blocks */
	while (amount_left > 0) {
1027 1028
		/*
		 * Figure out how much we need to read:
1029 1030 1031
		 * Try to read the remaining amount, but not more than
		 * the buffer size.
		 * And don't try to read past the end of the file.
1032
		 */
1033
		amount = min(amount_left, FSG_BUFLEN);
1034 1035
		amount = min((loff_t)amount,
			     curlun->file_length - file_offset);
1036 1037 1038
		if (amount == 0) {
			curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1039 1040
			curlun->sense_data_info =
				file_offset >> curlun->blkbits;
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
			curlun->info_valid = 1;
			break;
		}

		/* Perform the read */
		file_offset_tmp = file_offset;
		nread = vfs_read(curlun->filp,
				(char __user *) bh->buf,
				amount, &file_offset_tmp);
		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
				(unsigned long long) file_offset,
				(int) nread);
		if (signal_pending(current))
			return -EINTR;

		if (nread < 0) {
1057
			LDBG(curlun, "error in file verify: %d\n", (int)nread);
1058 1059 1060
			nread = 0;
		} else if (nread < amount) {
			LDBG(curlun, "partial file verify: %d/%u\n",
1061
			     (int)nread, amount);
1062
			nread = round_down(nread, curlun->blksize);
1063 1064 1065
		}
		if (nread == 0) {
			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1066 1067
			curlun->sense_data_info =
				file_offset >> curlun->blkbits;
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
			curlun->info_valid = 1;
			break;
		}
		file_offset += nread;
		amount_left -= nread;
	}
	return 0;
}


/*-------------------------------------------------------------------------*/

1080
static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1081
{
1082
	struct fsg_lun *curlun = common->curlun;
1083 1084
	u8	*buf = (u8 *) bh->buf;

1085
	if (!curlun) {		/* Unsupported LUNs are okay */
1086
		common->bad_lun_okay = 1;
1087
		memset(buf, 0, 36);
1088
		buf[0] = TYPE_NO_LUN;	/* Unsupported, no device-type */
1089
		buf[4] = 31;		/* Additional length */
1090 1091 1092
		return 36;
	}

1093
	buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1094
	buf[1] = curlun->removable ? 0x80 : 0;
1095 1096 1097 1098
	buf[2] = 2;		/* ANSI SCSI level 2 */
	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
	buf[4] = 31;		/* Additional length */
	buf[5] = 0;		/* No special options */
1099 1100
	buf[6] = 0;
	buf[7] = 0;
1101
	memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
1102 1103 1104
	return 36;
}

1105
static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1106
{
1107
	struct fsg_lun	*curlun = common->curlun;
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	u8		*buf = (u8 *) bh->buf;
	u32		sd, sdinfo;
	int		valid;

	/*
	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
	 *
	 * If a REQUEST SENSE command is received from an initiator
	 * with a pending unit attention condition (before the target
	 * generates the contingent allegiance condition), then the
	 * target shall either:
	 *   a) report any pending sense data and preserve the unit
	 *	attention condition on the logical unit, or,
	 *   b) report the unit attention condition, may discard any
	 *	pending sense data, and clear the unit attention
	 *	condition on the logical unit for that initiator.
	 *
	 * FSG normally uses option a); enable this code to use option b).
	 */
#if 0
	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
		curlun->sense_data = curlun->unit_attention_data;
		curlun->unit_attention_data = SS_NO_SENSE;
	}
#endif

1134
	if (!curlun) {		/* Unsupported LUNs are okay */
1135
		common->bad_lun_okay = 1;
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
		sdinfo = 0;
		valid = 0;
	} else {
		sd = curlun->sense_data;
		sdinfo = curlun->sense_data_info;
		valid = curlun->info_valid << 7;
		curlun->sense_data = SS_NO_SENSE;
		curlun->sense_data_info = 0;
		curlun->info_valid = 0;
	}

	memset(buf, 0, 18);
1149
	buf[0] = valid | 0x70;			/* Valid, current error */
1150 1151
	buf[2] = SK(sd);
	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
1152
	buf[7] = 18 - 8;			/* Additional sense length */
1153 1154 1155 1156 1157
	buf[12] = ASC(sd);
	buf[13] = ASCQ(sd);
	return 18;
}

1158
static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1159
{
1160 1161 1162
	struct fsg_lun	*curlun = common->curlun;
	u32		lba = get_unaligned_be32(&common->cmnd[2]);
	int		pmi = common->cmnd[8];
1163
	u8		*buf = (u8 *)bh->buf;
1164 1165 1166 1167 1168 1169 1170 1171 1172

	/* Check the PMI and LBA fields */
	if (pmi > 1 || (pmi == 0 && lba != 0)) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
						/* Max logical block */
1173
	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1174 1175 1176
	return 8;
}

1177
static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1178
{
1179 1180 1181
	struct fsg_lun	*curlun = common->curlun;
	int		msf = common->cmnd[1] & 0x02;
	u32		lba = get_unaligned_be32(&common->cmnd[2]);
1182
	u8		*buf = (u8 *)bh->buf;
1183

1184
	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

	memset(buf, 0, 8);
	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
	store_cdrom_address(&buf[4], msf, lba);
	return 8;
}

1199
static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1200
{
1201 1202 1203
	struct fsg_lun	*curlun = common->curlun;
	int		msf = common->cmnd[1] & 0x02;
	int		start_track = common->cmnd[6];
1204
	u8		*buf = (u8 *)bh->buf;
1205

1206
	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
			start_track > 1) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	memset(buf, 0, 20);
	buf[1] = (20-2);		/* TOC data length */
	buf[2] = 1;			/* First track number */
	buf[3] = 1;			/* Last track number */
	buf[5] = 0x16;			/* Data track, copying allowed */
	buf[6] = 0x01;			/* Only track is number 1 */
	store_cdrom_address(&buf[8], msf, 0);

	buf[13] = 0x16;			/* Lead-out track is data */
	buf[14] = 0xAA;			/* Lead-out track number */
	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
	return 20;
}

1226
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1227
{
1228 1229
	struct fsg_lun	*curlun = common->curlun;
	int		mscmnd = common->cmnd[0];
1230 1231 1232 1233 1234 1235 1236
	u8		*buf = (u8 *) bh->buf;
	u8		*buf0 = buf;
	int		pc, page_code;
	int		changeable_values, all_pages;
	int		valid_page = 0;
	int		len, limit;

1237
	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
1238 1239 1240
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}
1241 1242
	pc = common->cmnd[2] >> 6;
	page_code = common->cmnd[2] & 0x3f;
1243 1244 1245 1246 1247 1248 1249
	if (pc == 3) {
		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
		return -EINVAL;
	}
	changeable_values = (pc == 1);
	all_pages = (page_code == 0x3f);

1250 1251
	/*
	 * Write the mode parameter header.  Fixed values are: default
1252 1253
	 * medium type, no cache control (DPOFUA), and no block descriptors.
	 * The only variable value is the WriteProtect bit.  We will fill in
1254 1255
	 * the mode data length later.
	 */
1256
	memset(buf, 0, 8);
1257
	if (mscmnd == MODE_SENSE) {
1258
		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1259 1260
		buf += 4;
		limit = 255;
1261
	} else {			/* MODE_SENSE_10 */
1262
		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1263
		buf += 8;
1264
		limit = 65535;		/* Should really be FSG_BUFLEN */
1265 1266 1267 1268
	}

	/* No block descriptors */

1269 1270 1271 1272
	/*
	 * The mode pages, in numerical order.  The only page we support
	 * is the Caching page.
	 */
1273 1274
	if (page_code == 0x08 || all_pages) {
		valid_page = 1;
1275 1276 1277
		buf[0] = 0x08;		/* Page code */
		buf[1] = 10;		/* Page length */
		memset(buf+2, 0, 10);	/* None of the fields are changeable */
1278 1279

		if (!changeable_values) {
1280 1281 1282
			buf[2] = 0x04;	/* Write cache enable, */
					/* Read cache not disabled */
					/* No cache retention priorities */
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
			put_unaligned_be16(0xffff, &buf[4]);
					/* Don't disable prefetch */
					/* Minimum prefetch = 0 */
			put_unaligned_be16(0xffff, &buf[8]);
					/* Maximum prefetch */
			put_unaligned_be16(0xffff, &buf[10]);
					/* Maximum prefetch ceiling */
		}
		buf += 12;
	}

1294 1295 1296 1297
	/*
	 * Check that a valid page was requested and the mode data length
	 * isn't too long.
	 */
1298 1299 1300 1301 1302 1303 1304
	len = buf - buf0;
	if (!valid_page || len > limit) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	/*  Store the mode data length */
1305
	if (mscmnd == MODE_SENSE)
1306 1307 1308 1309 1310 1311
		buf0[0] = len - 1;
	else
		put_unaligned_be16(len - 2, buf0);
	return len;
}

1312
static int do_start_stop(struct fsg_common *common)
1313
{
1314 1315 1316 1317
	struct fsg_lun	*curlun = common->curlun;
	int		loej, start;

	if (!curlun) {
1318
		return -EINVAL;
1319 1320
	} else if (!curlun->removable) {
		curlun->sense_data = SS_INVALID_COMMAND;
1321
		return -EINVAL;
1322 1323
	} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
		   (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1324 1325 1326 1327
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

1328 1329
	loej  = common->cmnd[4] & 0x02;
	start = common->cmnd[4] & 0x01;
1330

1331 1332 1333 1334
	/*
	 * Our emulation doesn't support mounting; the medium is
	 * available for use as soon as it is loaded.
	 */
1335
	if (start) {
1336 1337 1338 1339
		if (!fsg_lun_is_open(curlun)) {
			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
			return -EINVAL;
		}
1340
		return 0;
1341
	}
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358

	/* Are we allowed to unload the media? */
	if (curlun->prevent_medium_removal) {
		LDBG(curlun, "unload attempt prevented\n");
		curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
		return -EINVAL;
	}

	if (!loej)
		return 0;

	up_read(&common->filesem);
	down_write(&common->filesem);
	fsg_lun_close(curlun);
	up_write(&common->filesem);
	down_read(&common->filesem);

1359
	return 0;
1360 1361
}

1362
static int do_prevent_allow(struct fsg_common *common)
1363
{
1364
	struct fsg_lun	*curlun = common->curlun;
1365 1366
	int		prevent;

1367
	if (!common->curlun) {
1368
		return -EINVAL;
1369 1370
	} else if (!common->curlun->removable) {
		common->curlun->sense_data = SS_INVALID_COMMAND;
1371 1372 1373
		return -EINVAL;
	}

1374 1375
	prevent = common->cmnd[4] & 0x01;
	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	if (curlun->prevent_medium_removal && !prevent)
		fsg_lun_fsync_sub(curlun);
	curlun->prevent_medium_removal = prevent;
	return 0;
}

1386
static int do_read_format_capacities(struct fsg_common *common,
1387 1388
			struct fsg_buffhd *bh)
{
1389
	struct fsg_lun	*curlun = common->curlun;
1390 1391 1392
	u8		*buf = (u8 *) bh->buf;

	buf[0] = buf[1] = buf[2] = 0;
1393
	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
1394 1395 1396 1397
	buf += 4;

	put_unaligned_be32(curlun->num_sectors, &buf[0]);
						/* Number of blocks */
1398
	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1399 1400 1401 1402
	buf[4] = 0x02;				/* Current capacity */
	return 12;
}

1403
static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1404
{
1405
	struct fsg_lun	*curlun = common->curlun;
1406 1407

	/* We don't support MODE SELECT */
1408 1409
	if (curlun)
		curlun->sense_data = SS_INVALID_COMMAND;
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	return -EINVAL;
}


/*-------------------------------------------------------------------------*/

static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
{
	int	rc;

	rc = fsg_set_halt(fsg, fsg->bulk_in);
	if (rc == -EAGAIN)
		VDBG(fsg, "delayed bulk-in endpoint halt\n");
	while (rc != 0) {
		if (rc != -EAGAIN) {
			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
			rc = 0;
			break;
		}

		/* Wait for a short time and then try again */
		if (msleep_interruptible(100) != 0)
			return -EINTR;
		rc = usb_ep_set_halt(fsg->bulk_in);
	}
	return rc;
}

static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
{
	int	rc;

	DBG(fsg, "bulk-in set wedge\n");
	rc = usb_ep_set_wedge(fsg->bulk_in);
	if (rc == -EAGAIN)
		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
	while (rc != 0) {
		if (rc != -EAGAIN) {
			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
			rc = 0;
			break;
		}

		/* Wait for a short time and then try again */
		if (msleep_interruptible(100) != 0)
			return -EINTR;
		rc = usb_ep_set_wedge(fsg->bulk_in);
	}
	return rc;
}

1461
static int throw_away_data(struct fsg_common *common)
1462 1463 1464 1465 1466
{
	struct fsg_buffhd	*bh;
	u32			amount;
	int			rc;

1467 1468 1469
	for (bh = common->next_buffhd_to_drain;
	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
	     bh = common->next_buffhd_to_drain) {
1470 1471 1472 1473 1474

		/* Throw away the data in a filled buffer */
		if (bh->state == BUF_STATE_FULL) {
			smp_rmb();
			bh->state = BUF_STATE_EMPTY;
1475
			common->next_buffhd_to_drain = bh->next;
1476 1477

			/* A short packet or an error ends everything */
1478
			if (bh->outreq->actual < bh->bulk_out_intended_length ||
1479
			    bh->outreq->status != 0) {
1480 1481
				raise_exception(common,
						FSG_STATE_ABORT_BULK_OUT);
1482 1483 1484 1485 1486 1487
				return -EINTR;
			}
			continue;
		}

		/* Try to submit another request if we need one */
1488 1489 1490 1491
		bh = common->next_buffhd_to_fill;
		if (bh->state == BUF_STATE_EMPTY
		 && common->usb_amount_left > 0) {
			amount = min(common->usb_amount_left, FSG_BUFLEN);
1492

1493
			/*
1494 1495
			 * Except at the end of the transfer, amount will be
			 * equal to the buffer size, which is divisible by
1496 1497
			 * the bulk-out maxpacket size.
			 */
1498
			set_bulk_out_req_length(common, bh, amount);
1499
			if (!start_out_transfer(common, bh))
1500
				/* Dunno what to do if common->fsg is NULL */
1501 1502 1503
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
			common->usb_amount_left -= amount;
1504 1505 1506 1507
			continue;
		}

		/* Otherwise wait for something to happen */
1508
		rc = sleep_thread(common, true);
1509 1510 1511 1512 1513 1514
		if (rc)
			return rc;
	}
	return 0;
}

1515
static int finish_reply(struct fsg_common *common)
1516
{
1517
	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
1518 1519
	int			rc = 0;

1520
	switch (common->data_dir) {
1521
	case DATA_DIR_NONE:
1522
		break;			/* Nothing to send */
1523

1524 1525
	/*
	 * If we don't know whether the host wants to read or write,
1526 1527
	 * this must be CB or CBI with an unknown command.  We mustn't
	 * try to send or receive any data.  So stall both bulk pipes
1528 1529
	 * if we can and wait for a reset.
	 */
1530
	case DATA_DIR_UNKNOWN:
1531 1532 1533 1534 1535 1536 1537 1538
		if (!common->can_stall) {
			/* Nothing */
		} else if (fsg_is_set(common)) {
			fsg_set_halt(common->fsg, common->fsg->bulk_out);
			rc = halt_bulk_in_endpoint(common->fsg);
		} else {
			/* Don't know what to do if common->fsg is NULL */
			rc = -EIO;
1539 1540 1541 1542 1543
		}
		break;

	/* All but the last buffer of data must have already been sent */
	case DATA_DIR_TO_HOST:
1544
		if (common->data_size == 0) {
1545
			/* Nothing to send */
1546

1547 1548 1549 1550
		/* Don't know what to do if common->fsg is NULL */
		} else if (!fsg_is_set(common)) {
			rc = -EIO;

1551
		/* If there's no residue, simply send the last buffer */
1552
		} else if (common->residue == 0) {
1553
			bh->inreq->zero = 0;
1554
			if (!start_in_transfer(common, bh))
1555 1556
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
1557

1558
		/*
1559 1560 1561 1562 1563
		 * For Bulk-only, mark the end of the data with a short
		 * packet.  If we are allowed to stall, halt the bulk-in
		 * endpoint.  (Note: This violates the Bulk-Only Transport
		 * specification, which requires us to pad the data if we
		 * don't halt the endpoint.  Presumably nobody will mind.)
1564
		 */
1565
		} else {
1566
			bh->inreq->zero = 1;
1567
			if (!start_in_transfer(common, bh))
1568 1569
				rc = -EIO;
			common->next_buffhd_to_fill = bh->next;
1570
			if (common->can_stall)
1571
				rc = halt_bulk_in_endpoint(common->fsg);
1572 1573 1574
		}
		break;

1575 1576 1577 1578
	/*
	 * We have processed all we want from the data the host has sent.
	 * There may still be outstanding bulk-out requests.
	 */
1579
	case DATA_DIR_FROM_HOST:
1580
		if (common->residue == 0) {
1581
			/* Nothing to receive */
1582 1583

		/* Did the host stop sending unexpectedly early? */
1584 1585
		} else if (common->short_packet_received) {
			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1586 1587
			rc = -EINTR;

1588 1589
		/*
		 * We haven't processed all the incoming data.  Even though
1590 1591 1592 1593
		 * we may be allowed to stall, doing so would cause a race.
		 * The controller may already have ACK'ed all the remaining
		 * bulk-out packets, in which case the host wouldn't see a
		 * STALL.  Not realizing the endpoint was halted, it wouldn't
1594 1595
		 * clear the halt -- leading to problems later on.
		 */
1596
#if 0
1597 1598 1599 1600 1601
		} else if (common->can_stall) {
			if (fsg_is_set(common))
				fsg_set_halt(common->fsg,
					     common->fsg->bulk_out);
			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1602 1603 1604
			rc = -EINTR;
#endif

1605 1606 1607 1608
		/*
		 * We can't stall.  Read in the excess data and throw it
		 * all away.
		 */
1609
		} else {
1610
			rc = throw_away_data(common);
1611
		}
1612 1613 1614 1615 1616
		break;
	}
	return rc;
}

1617
static int send_status(struct fsg_common *common)
1618
{
1619
	struct fsg_lun		*curlun = common->curlun;
1620
	struct fsg_buffhd	*bh;
1621
	struct bulk_cs_wrap	*csw;
1622
	int			rc;
1623
	u8			status = US_BULK_STAT_OK;
1624 1625 1626
	u32			sd, sdinfo = 0;

	/* Wait for the next buffer to become available */
1627
	bh = common->next_buffhd_to_fill;
1628
	while (bh->state != BUF_STATE_EMPTY) {
1629
		rc = sleep_thread(common, true);
1630 1631 1632 1633 1634 1635 1636
		if (rc)
			return rc;
	}

	if (curlun) {
		sd = curlun->sense_data;
		sdinfo = curlun->sense_data_info;
1637
	} else if (common->bad_lun_okay)
1638 1639 1640 1641
		sd = SS_NO_SENSE;
	else
		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;

1642 1643
	if (common->phase_error) {
		DBG(common, "sending phase-error status\n");
1644
		status = US_BULK_STAT_PHASE;
1645 1646
		sd = SS_INVALID_COMMAND;
	} else if (sd != SS_NO_SENSE) {
1647
		DBG(common, "sending command-failure status\n");
1648
		status = US_BULK_STAT_FAIL;
1649
		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1650 1651 1652 1653
				"  info x%x\n",
				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
	}

1654
	/* Store and send the Bulk-only CSW */
1655
	csw = (void *)bh->buf;
1656

1657
	csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
1658 1659
	csw->Tag = common->tag;
	csw->Residue = cpu_to_le32(common->residue);
1660
	csw->Status = status;
1661

1662
	bh->inreq->length = US_BULK_CS_WRAP_LEN;
1663
	bh->inreq->zero = 0;
1664
	if (!start_in_transfer(common, bh))
1665 1666
		/* Don't know what to do if common->fsg is NULL */
		return -EIO;
1667

1668
	common->next_buffhd_to_fill = bh->next;
1669 1670 1671 1672 1673 1674
	return 0;
}


/*-------------------------------------------------------------------------*/

1675 1676 1677 1678
/*
 * Check whether the command is properly formed and whether its data size
 * and direction agree with the values we already have.
 */
1679
static int check_command(struct fsg_common *common, int cmnd_size,
1680 1681
			 enum data_direction data_dir, unsigned int mask,
			 int needs_medium, const char *name)
1682 1683
{
	int			i;
1684
	unsigned int		lun = common->cmnd[1] >> 5;
1685 1686 1687 1688 1689
	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
	char			hdlen[20];
	struct fsg_lun		*curlun;

	hdlen[0] = 0;
1690 1691
	if (common->data_dir != DATA_DIR_UNKNOWN)
		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1692
			common->data_size);
1693
	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
1694
	     name, cmnd_size, dirletter[(int) data_dir],
1695
	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
1696

1697 1698 1699 1700
	/*
	 * We can't reply at all until we know the correct data direction
	 * and size.
	 */
1701
	if (common->data_size_from_cmnd == 0)
1702
		data_dir = DATA_DIR_NONE;
1703
	if (common->data_size < common->data_size_from_cmnd) {
1704 1705
		/*
		 * Host data size < Device data size is a phase error.
1706
		 * Carry out the command, but only transfer as much as
1707 1708
		 * we are allowed.
		 */
1709 1710
		common->data_size_from_cmnd = common->data_size;
		common->phase_error = 1;
1711
	}
1712 1713
	common->residue = common->data_size;
	common->usb_amount_left = common->data_size;
1714 1715

	/* Conflicting data directions is a phase error */
1716
	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1717
		common->phase_error = 1;
1718 1719 1720 1721
		return -EINVAL;
	}

	/* Verify the length of the command itself */
1722
	if (cmnd_size != common->cmnd_size) {
1723

1724 1725
		/*
		 * Special case workaround: There are plenty of buggy SCSI
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
		 * implementations. Many have issues with cbw->Length
		 * field passing a wrong command size. For those cases we
		 * always try to work around the problem by using the length
		 * sent by the host side provided it is at least as large
		 * as the correct command length.
		 * Examples of such cases would be MS-Windows, which issues
		 * REQUEST SENSE with cbw->Length == 12 where it should
		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
		 * REQUEST SENSE with cbw->Length == 10 where it should
		 * be 6 as well.
		 */
1737 1738
		if (cmnd_size <= common->cmnd_size) {
			DBG(common, "%s is buggy! Expected length %d "
1739
			    "but we got %d\n", name,
1740 1741
			    cmnd_size, common->cmnd_size);
			cmnd_size = common->cmnd_size;
1742
		} else {
1743
			common->phase_error = 1;
1744 1745 1746 1747 1748
			return -EINVAL;
		}
	}

	/* Check that the LUN values are consistent */
1749
	if (common->lun != lun)
1750
		DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
1751
		    common->lun, lun);
1752 1753

	/* Check the LUN */
1754 1755
	curlun = common->curlun;
	if (curlun) {
1756
		if (common->cmnd[0] != REQUEST_SENSE) {
1757 1758 1759 1760 1761
			curlun->sense_data = SS_NO_SENSE;
			curlun->sense_data_info = 0;
			curlun->info_valid = 0;
		}
	} else {
1762
		common->bad_lun_okay = 0;
1763

1764 1765 1766 1767
		/*
		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
		 * to use unsupported LUNs; all others may not.
		 */
1768 1769
		if (common->cmnd[0] != INQUIRY &&
		    common->cmnd[0] != REQUEST_SENSE) {
1770
			DBG(common, "unsupported LUN %u\n", common->lun);
1771 1772 1773 1774
			return -EINVAL;
		}
	}

1775 1776 1777 1778
	/*
	 * If a unit attention condition exists, only INQUIRY and
	 * REQUEST SENSE commands are allowed; anything else must fail.
	 */
1779
	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1780 1781
	    common->cmnd[0] != INQUIRY &&
	    common->cmnd[0] != REQUEST_SENSE) {
1782 1783 1784 1785 1786 1787
		curlun->sense_data = curlun->unit_attention_data;
		curlun->unit_attention_data = SS_NO_SENSE;
		return -EINVAL;
	}

	/* Check that only command bytes listed in the mask are non-zero */
1788
	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
1789
	for (i = 1; i < cmnd_size; ++i) {
1790
		if (common->cmnd[i] && !(mask & (1 << i))) {
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
			if (curlun)
				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
	}

	/* If the medium isn't mounted and the command needs to access
	 * it, return an error. */
	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
		return -EINVAL;
	}

	return 0;
}

1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
/* wrapper of check_command for data size in blocks handling */
static int check_command_size_in_blocks(struct fsg_common *common,
		int cmnd_size, enum data_direction data_dir,
		unsigned int mask, int needs_medium, const char *name)
{
	if (common->curlun)
		common->data_size_from_cmnd <<= common->curlun->blkbits;
	return check_command(common, cmnd_size, data_dir,
			mask, needs_medium, name);
}

1818
static int do_scsi_command(struct fsg_common *common)
1819 1820 1821 1822 1823 1824 1825
{
	struct fsg_buffhd	*bh;
	int			rc;
	int			reply = -EINVAL;
	int			i;
	static char		unknown[16];

1826
	dump_cdb(common);
1827 1828

	/* Wait for the next buffer to become available for data or status */
1829 1830
	bh = common->next_buffhd_to_fill;
	common->next_buffhd_to_drain = bh;
1831
	while (bh->state != BUF_STATE_EMPTY) {
1832
		rc = sleep_thread(common, true);
1833 1834 1835
		if (rc)
			return rc;
	}
1836 1837
	common->phase_error = 0;
	common->short_packet_received = 0;
1838

1839 1840
	down_read(&common->filesem);	/* We're using the backing file */
	switch (common->cmnd[0]) {
1841

1842
	case INQUIRY:
1843 1844
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1845 1846 1847
				      (1<<4), 0,
				      "INQUIRY");
		if (reply == 0)
1848
			reply = do_inquiry(common, bh);
1849 1850
		break;

1851
	case MODE_SELECT:
1852 1853
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1854 1855 1856
				      (1<<1) | (1<<4), 0,
				      "MODE SELECT(6)");
		if (reply == 0)
1857
			reply = do_mode_select(common, bh);
1858 1859
		break;

1860
	case MODE_SELECT_10:
1861 1862 1863
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1864 1865 1866
				      (1<<1) | (3<<7), 0,
				      "MODE SELECT(10)");
		if (reply == 0)
1867
			reply = do_mode_select(common, bh);
1868 1869
		break;

1870
	case MODE_SENSE:
1871 1872
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1873 1874 1875
				      (1<<1) | (1<<2) | (1<<4), 0,
				      "MODE SENSE(6)");
		if (reply == 0)
1876
			reply = do_mode_sense(common, bh);
1877 1878
		break;

1879
	case MODE_SENSE_10:
1880 1881 1882
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1883 1884 1885
				      (1<<1) | (1<<2) | (3<<7), 0,
				      "MODE SENSE(10)");
		if (reply == 0)
1886
			reply = do_mode_sense(common, bh);
1887 1888
		break;

1889
	case ALLOW_MEDIUM_REMOVAL:
1890 1891
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
1892 1893 1894
				      (1<<4), 0,
				      "PREVENT-ALLOW MEDIUM REMOVAL");
		if (reply == 0)
1895
			reply = do_prevent_allow(common);
1896 1897
		break;

1898
	case READ_6:
1899
		i = common->cmnd[4];
1900 1901 1902
		common->data_size_from_cmnd = (i == 0) ? 256 : i;
		reply = check_command_size_in_blocks(common, 6,
				      DATA_DIR_TO_HOST,
1903 1904 1905
				      (7<<1) | (1<<4), 1,
				      "READ(6)");
		if (reply == 0)
1906
			reply = do_read(common);
1907 1908
		break;

1909
	case READ_10:
1910
		common->data_size_from_cmnd =
1911 1912 1913
				get_unaligned_be16(&common->cmnd[7]);
		reply = check_command_size_in_blocks(common, 10,
				      DATA_DIR_TO_HOST,
1914 1915 1916
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "READ(10)");
		if (reply == 0)
1917
			reply = do_read(common);
1918 1919
		break;

1920
	case READ_12:
1921
		common->data_size_from_cmnd =
1922 1923 1924
				get_unaligned_be32(&common->cmnd[6]);
		reply = check_command_size_in_blocks(common, 12,
				      DATA_DIR_TO_HOST,
1925 1926 1927
				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
				      "READ(12)");
		if (reply == 0)
1928
			reply = do_read(common);
1929 1930
		break;

1931
	case READ_CAPACITY:
1932 1933
		common->data_size_from_cmnd = 8;
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1934 1935 1936
				      (0xf<<2) | (1<<8), 1,
				      "READ CAPACITY");
		if (reply == 0)
1937
			reply = do_read_capacity(common, bh);
1938 1939
		break;

1940
	case READ_HEADER:
1941
		if (!common->curlun || !common->curlun->cdrom)
1942
			goto unknown_cmnd;
1943 1944 1945
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1946 1947 1948
				      (3<<7) | (0x1f<<1), 1,
				      "READ HEADER");
		if (reply == 0)
1949
			reply = do_read_header(common, bh);
1950 1951
		break;

1952
	case READ_TOC:
1953
		if (!common->curlun || !common->curlun->cdrom)
1954
			goto unknown_cmnd;
1955 1956 1957
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1958 1959 1960
				      (7<<6) | (1<<1), 1,
				      "READ TOC");
		if (reply == 0)
1961
			reply = do_read_toc(common, bh);
1962 1963
		break;

1964
	case READ_FORMAT_CAPACITIES:
1965 1966 1967
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1968 1969 1970
				      (3<<7), 1,
				      "READ FORMAT CAPACITIES");
		if (reply == 0)
1971
			reply = do_read_format_capacities(common, bh);
1972 1973
		break;

1974
	case REQUEST_SENSE:
1975 1976
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1977 1978 1979
				      (1<<4), 0,
				      "REQUEST SENSE");
		if (reply == 0)
1980
			reply = do_request_sense(common, bh);
1981 1982
		break;

1983
	case START_STOP:
1984 1985
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
1986 1987 1988
				      (1<<1) | (1<<4), 0,
				      "START-STOP UNIT");
		if (reply == 0)
1989
			reply = do_start_stop(common);
1990 1991
		break;

1992
	case SYNCHRONIZE_CACHE:
1993 1994
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 10, DATA_DIR_NONE,
1995 1996 1997
				      (0xf<<2) | (3<<7), 1,
				      "SYNCHRONIZE CACHE");
		if (reply == 0)
1998
			reply = do_synchronize_cache(common);
1999 2000
		break;

2001
	case TEST_UNIT_READY:
2002 2003
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
2004 2005 2006 2007
				0, 1,
				"TEST UNIT READY");
		break;

2008 2009 2010 2011
	/*
	 * Although optional, this command is used by MS-Windows.  We
	 * support a minimal version: BytChk must be 0.
	 */
2012
	case VERIFY:
2013 2014
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 10, DATA_DIR_NONE,
2015 2016 2017
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "VERIFY");
		if (reply == 0)
2018
			reply = do_verify(common);
2019 2020
		break;

2021
	case WRITE_6:
2022
		i = common->cmnd[4];
2023 2024 2025
		common->data_size_from_cmnd = (i == 0) ? 256 : i;
		reply = check_command_size_in_blocks(common, 6,
				      DATA_DIR_FROM_HOST,
2026 2027 2028
				      (7<<1) | (1<<4), 1,
				      "WRITE(6)");
		if (reply == 0)
2029
			reply = do_write(common);
2030 2031
		break;

2032
	case WRITE_10:
2033
		common->data_size_from_cmnd =
2034 2035 2036
				get_unaligned_be16(&common->cmnd[7]);
		reply = check_command_size_in_blocks(common, 10,
				      DATA_DIR_FROM_HOST,
2037 2038 2039
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "WRITE(10)");
		if (reply == 0)
2040
			reply = do_write(common);
2041 2042
		break;

2043
	case WRITE_12:
2044
		common->data_size_from_cmnd =
2045 2046 2047
				get_unaligned_be32(&common->cmnd[6]);
		reply = check_command_size_in_blocks(common, 12,
				      DATA_DIR_FROM_HOST,
2048 2049 2050
				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
				      "WRITE(12)");
		if (reply == 0)
2051
			reply = do_write(common);
2052 2053
		break;

2054 2055
	/*
	 * Some mandatory commands that we recognize but don't implement.
2056 2057
	 * They don't mean much in this setting.  It's left as an exercise
	 * for anyone interested to implement RESERVE and RELEASE in terms
2058 2059
	 * of Posix locks.
	 */
2060 2061 2062 2063
	case FORMAT_UNIT:
	case RELEASE:
	case RESERVE:
	case SEND_DIAGNOSTIC:
2064
		/* Fall through */
2065 2066

	default:
2067
unknown_cmnd:
2068 2069 2070
		common->data_size_from_cmnd = 0;
		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
		reply = check_command(common, common->cmnd_size,
2071
				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
2072
		if (reply == 0) {
2073
			common->curlun->sense_data = SS_INVALID_COMMAND;
2074 2075 2076 2077
			reply = -EINVAL;
		}
		break;
	}
2078
	up_read(&common->filesem);
2079 2080 2081 2082 2083 2084

	if (reply == -EINTR || signal_pending(current))
		return -EINTR;

	/* Set up the single reply buffer for finish_reply() */
	if (reply == -EINVAL)
2085
		reply = 0;		/* Error reply length */
2086
	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2087
		reply = min((u32)reply, common->data_size_from_cmnd);
2088 2089
		bh->inreq->length = reply;
		bh->state = BUF_STATE_FULL;
2090
		common->residue -= reply;
2091
	}				/* Otherwise it's already set */
2092 2093 2094 2095 2096 2097 2098 2099 2100

	return 0;
}


/*-------------------------------------------------------------------------*/

static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
2101
	struct usb_request	*req = bh->outreq;
2102
	struct bulk_cb_wrap	*cbw = req->buf;
2103
	struct fsg_common	*common = fsg->common;
2104 2105 2106 2107 2108 2109

	/* Was this a real packet?  Should it be ignored? */
	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
		return -EINVAL;

	/* Is the CBW valid? */
2110
	if (req->actual != US_BULK_CB_WRAP_LEN ||
2111
			cbw->Signature != cpu_to_le32(
2112
				US_BULK_CB_SIGN)) {
2113 2114 2115 2116
		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
				req->actual,
				le32_to_cpu(cbw->Signature));

2117 2118
		/*
		 * The Bulk-only spec says we MUST stall the IN endpoint
2119 2120 2121 2122 2123 2124 2125
		 * (6.6.1), so it's unavoidable.  It also says we must
		 * retain this state until the next reset, but there's
		 * no way to tell the controller driver it should ignore
		 * Clear-Feature(HALT) requests.
		 *
		 * We aren't required to halt the OUT endpoint; instead
		 * we can simply accept and discard any data received
2126 2127
		 * until the next reset.
		 */
2128 2129 2130 2131 2132 2133
		wedge_bulk_in_endpoint(fsg);
		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
		return -EINVAL;
	}

	/* Is the CBW meaningful? */
2134
	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
2135 2136 2137 2138 2139
			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
				"cmdlen %u\n",
				cbw->Lun, cbw->Flags, cbw->Length);

2140 2141 2142 2143
		/*
		 * We can do anything we want here, so let's stall the
		 * bulk pipes if we are allowed to.
		 */
2144
		if (common->can_stall) {
2145 2146 2147 2148 2149 2150 2151
			fsg_set_halt(fsg, fsg->bulk_out);
			halt_bulk_in_endpoint(fsg);
		}
		return -EINVAL;
	}

	/* Save the command for later */
2152 2153
	common->cmnd_size = cbw->Length;
	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2154
	if (cbw->Flags & US_BULK_FLAG_IN)
2155
		common->data_dir = DATA_DIR_TO_HOST;
2156
	else
2157 2158 2159 2160 2161
		common->data_dir = DATA_DIR_FROM_HOST;
	common->data_size = le32_to_cpu(cbw->DataTransferLength);
	if (common->data_size == 0)
		common->data_dir = DATA_DIR_NONE;
	common->lun = cbw->Lun;
2162
	if (common->lun < common->nluns)
2163
		common->curlun = common->luns[common->lun];
2164 2165
	else
		common->curlun = NULL;
2166
	common->tag = cbw->Tag;
2167 2168 2169
	return 0;
}

2170
static int get_next_command(struct fsg_common *common)
2171 2172 2173 2174
{
	struct fsg_buffhd	*bh;
	int			rc = 0;

2175
	/* Wait for the next buffer to become available */
2176
	bh = common->next_buffhd_to_fill;
2177
	while (bh->state != BUF_STATE_EMPTY) {
2178
		rc = sleep_thread(common, true);
2179 2180 2181
		if (rc)
			return rc;
	}
2182

2183
	/* Queue a request to read a Bulk-only CBW */
2184
	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
2185
	if (!start_out_transfer(common, bh))
2186 2187
		/* Don't know what to do if common->fsg is NULL */
		return -EIO;
2188

2189 2190
	/*
	 * We will drain the buffer in software, which means we
2191
	 * can reuse it for the next filling.  No need to advance
2192 2193
	 * next_buffhd_to_fill.
	 */
2194

2195 2196
	/* Wait for the CBW to arrive */
	while (bh->state != BUF_STATE_FULL) {
2197
		rc = sleep_thread(common, true);
2198 2199
		if (rc)
			return rc;
2200
	}
2201
	smp_rmb();
2202
	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2203 2204
	bh->state = BUF_STATE_EMPTY;

2205 2206 2207 2208 2209 2210
	return rc;
}


/*-------------------------------------------------------------------------*/

2211
static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2212 2213 2214 2215 2216
		struct usb_request **preq)
{
	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
	if (*preq)
		return 0;
2217
	ERROR(common, "can't allocate request for %s\n", ep->name);
2218 2219 2220
	return -ENOMEM;
}

2221 2222
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2223
{
2224 2225
	struct fsg_dev *fsg;
	int i, rc = 0;
2226

2227 2228
	if (common->running)
		DBG(common, "reset interface\n");
2229 2230 2231

reset:
	/* Deallocate the requests */
2232 2233
	if (common->fsg) {
		fsg = common->fsg;
2234

2235
		for (i = 0; i < common->fsg_num_buffers; ++i) {
2236
			struct fsg_buffhd *bh = &common->buffhds[i];
2237

2238 2239 2240 2241 2242 2243 2244 2245
			if (bh->inreq) {
				usb_ep_free_request(fsg->bulk_in, bh->inreq);
				bh->inreq = NULL;
			}
			if (bh->outreq) {
				usb_ep_free_request(fsg->bulk_out, bh->outreq);
				bh->outreq = NULL;
			}
2246
		}
2247 2248 2249 2250

		/* Disable the endpoints */
		if (fsg->bulk_in_enabled) {
			usb_ep_disable(fsg->bulk_in);
2251
			fsg->bulk_in->driver_data = NULL;
2252 2253 2254 2255
			fsg->bulk_in_enabled = 0;
		}
		if (fsg->bulk_out_enabled) {
			usb_ep_disable(fsg->bulk_out);
2256
			fsg->bulk_out->driver_data = NULL;
2257
			fsg->bulk_out_enabled = 0;
2258 2259
		}

2260 2261
		common->fsg = NULL;
		wake_up(&common->fsg_wait);
2262 2263
	}

2264
	common->running = 0;
2265
	if (!new_fsg || rc)
2266 2267
		return rc;

2268 2269
	common->fsg = new_fsg;
	fsg = common->fsg;
2270

2271
	/* Enable the endpoints */
2272 2273 2274 2275
	rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
	if (rc)
		goto reset;
	rc = usb_ep_enable(fsg->bulk_in);
2276 2277
	if (rc)
		goto reset;
2278
	fsg->bulk_in->driver_data = common;
2279
	fsg->bulk_in_enabled = 1;
2280

2281 2282 2283 2284 2285
	rc = config_ep_by_speed(common->gadget, &(fsg->function),
				fsg->bulk_out);
	if (rc)
		goto reset;
	rc = usb_ep_enable(fsg->bulk_out);
2286 2287
	if (rc)
		goto reset;
2288
	fsg->bulk_out->driver_data = common;
2289
	fsg->bulk_out_enabled = 1;
2290
	common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
2291 2292 2293
	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);

	/* Allocate the requests */
2294
	for (i = 0; i < common->fsg_num_buffers; ++i) {
2295 2296 2297
		struct fsg_buffhd	*bh = &common->buffhds[i];

		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2298
		if (rc)
2299
			goto reset;
2300
		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2301
		if (rc)
2302
			goto reset;
2303 2304 2305 2306
		bh->inreq->buf = bh->outreq->buf = bh->buf;
		bh->inreq->context = bh->outreq->context = bh;
		bh->inreq->complete = bulk_in_complete;
		bh->outreq->complete = bulk_out_complete;
2307
	}
2308

2309 2310
	common->running = 1;
	for (i = 0; i < common->nluns; ++i)
2311 2312 2313
		if (common->luns[i])
			common->luns[i]->unit_attention_data =
				SS_RESET_OCCURRED;
2314 2315 2316 2317
	return rc;
}


2318 2319 2320 2321 2322
/****************************** ALT CONFIGS ******************************/

static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
	struct fsg_dev *fsg = fsg_from_func(f);
2323
	fsg->common->new_fsg = fsg;
2324
	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2325
	return USB_GADGET_DELAYED_STATUS;
2326 2327 2328 2329 2330
}

static void fsg_disable(struct usb_function *f)
{
	struct fsg_dev *fsg = fsg_from_func(f);
2331
	fsg->common->new_fsg = NULL;
2332
	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2333 2334 2335
}


2336 2337
/*-------------------------------------------------------------------------*/

2338
static void handle_exception(struct fsg_common *common)
2339 2340 2341 2342 2343 2344 2345 2346
{
	siginfo_t		info;
	int			i;
	struct fsg_buffhd	*bh;
	enum fsg_state		old_state;
	struct fsg_lun		*curlun;
	unsigned int		exception_req_tag;

2347 2348 2349 2350
	/*
	 * Clear the existing signals.  Anything but SIGUSR1 is converted
	 * into a high-priority EXIT exception.
	 */
2351
	for (;;) {
2352 2353
		int sig =
			dequeue_signal_lock(current, &current->blocked, &info);
2354 2355 2356
		if (!sig)
			break;
		if (sig != SIGUSR1) {
2357 2358 2359
			if (common->state < FSG_STATE_EXIT)
				DBG(common, "Main thread exiting on signal\n");
			raise_exception(common, FSG_STATE_EXIT);
2360 2361 2362 2363
		}
	}

	/* Cancel all the pending transfers */
2364
	if (likely(common->fsg)) {
2365
		for (i = 0; i < common->fsg_num_buffers; ++i) {
2366 2367 2368 2369 2370 2371
			bh = &common->buffhds[i];
			if (bh->inreq_busy)
				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
			if (bh->outreq_busy)
				usb_ep_dequeue(common->fsg->bulk_out,
					       bh->outreq);
2372 2373
		}

2374 2375 2376
		/* Wait until everything is idle */
		for (;;) {
			int num_active = 0;
2377
			for (i = 0; i < common->fsg_num_buffers; ++i) {
2378 2379 2380 2381 2382
				bh = &common->buffhds[i];
				num_active += bh->inreq_busy + bh->outreq_busy;
			}
			if (num_active == 0)
				break;
2383
			if (sleep_thread(common, true))
2384 2385 2386 2387 2388 2389 2390 2391 2392
				return;
		}

		/* Clear out the controller's fifos */
		if (common->fsg->bulk_in_enabled)
			usb_ep_fifo_flush(common->fsg->bulk_in);
		if (common->fsg->bulk_out_enabled)
			usb_ep_fifo_flush(common->fsg->bulk_out);
	}
2393

2394 2395 2396 2397
	/*
	 * Reset the I/O buffer states and pointers, the SCSI
	 * state, and the exception.  Then invoke the handler.
	 */
2398
	spin_lock_irq(&common->lock);
2399

2400
	for (i = 0; i < common->fsg_num_buffers; ++i) {
2401
		bh = &common->buffhds[i];
2402 2403
		bh->state = BUF_STATE_EMPTY;
	}
2404 2405 2406 2407
	common->next_buffhd_to_fill = &common->buffhds[0];
	common->next_buffhd_to_drain = &common->buffhds[0];
	exception_req_tag = common->exception_req_tag;
	old_state = common->state;
2408 2409

	if (old_state == FSG_STATE_ABORT_BULK_OUT)
2410
		common->state = FSG_STATE_STATUS_PHASE;
2411
	else {
2412
		for (i = 0; i < common->nluns; ++i) {
2413 2414 2415
			curlun = common->luns[i];
			if (!curlun)
				continue;
2416
			curlun->prevent_medium_removal = 0;
2417 2418
			curlun->sense_data = SS_NO_SENSE;
			curlun->unit_attention_data = SS_NO_SENSE;
2419 2420 2421
			curlun->sense_data_info = 0;
			curlun->info_valid = 0;
		}
2422
		common->state = FSG_STATE_IDLE;
2423
	}
2424
	spin_unlock_irq(&common->lock);
2425 2426 2427 2428

	/* Carry out any extra actions required for the exception */
	switch (old_state) {
	case FSG_STATE_ABORT_BULK_OUT:
2429 2430 2431 2432 2433
		send_status(common);
		spin_lock_irq(&common->lock);
		if (common->state == FSG_STATE_STATUS_PHASE)
			common->state = FSG_STATE_IDLE;
		spin_unlock_irq(&common->lock);
2434 2435 2436
		break;

	case FSG_STATE_RESET:
2437 2438
		/*
		 * In case we were forced against our will to halt a
2439
		 * bulk endpoint, clear the halt now.  (The SuperH UDC
2440 2441
		 * requires this.)
		 */
2442 2443 2444 2445 2446
		if (!fsg_is_set(common))
			break;
		if (test_and_clear_bit(IGNORE_BULK_OUT,
				       &common->fsg->atomic_bitflags))
			usb_ep_clear_halt(common->fsg->bulk_in);
2447

2448 2449
		if (common->ep0_req_tag == exception_req_tag)
			ep0_queue(common);	/* Complete the status stage */
2450

2451 2452
		/*
		 * Technically this should go here, but it would only be
2453
		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
2454 2455
		 * CONFIG_CHANGE cases.
		 */
2456
		/* for (i = 0; i < common->nluns; ++i) */
2457 2458 2459
		/*	if (common->luns[i]) */
		/*		common->luns[i]->unit_attention_data = */
		/*			SS_RESET_OCCURRED;  */
2460 2461 2462
		break;

	case FSG_STATE_CONFIG_CHANGE:
2463
		do_set_interface(common, common->new_fsg);
2464 2465
		if (common->new_fsg)
			usb_composite_setup_continue(common->cdev);
2466 2467 2468 2469
		break;

	case FSG_STATE_EXIT:
	case FSG_STATE_TERMINATED:
2470
		do_set_interface(common, NULL);		/* Free resources */
2471 2472 2473
		spin_lock_irq(&common->lock);
		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
		spin_unlock_irq(&common->lock);
2474
		break;
2475 2476 2477 2478 2479 2480 2481 2482

	case FSG_STATE_INTERFACE_CHANGE:
	case FSG_STATE_DISCONNECT:
	case FSG_STATE_COMMAND_PHASE:
	case FSG_STATE_DATA_PHASE:
	case FSG_STATE_STATUS_PHASE:
	case FSG_STATE_IDLE:
		break;
2483 2484 2485 2486 2487 2488
	}
}


/*-------------------------------------------------------------------------*/

2489
static int fsg_main_thread(void *common_)
2490
{
2491
	struct fsg_common	*common = common_;
2492

2493 2494 2495 2496
	/*
	 * Allow the thread to be killed by a signal, but set the signal mask
	 * to block everything but INT, TERM, KILL, and USR1.
	 */
2497 2498 2499 2500 2501 2502 2503 2504
	allow_signal(SIGINT);
	allow_signal(SIGTERM);
	allow_signal(SIGKILL);
	allow_signal(SIGUSR1);

	/* Allow the thread to be frozen */
	set_freezable();

2505 2506
	/*
	 * Arrange for userspace references to be interpreted as kernel
2507
	 * pointers.  That way we can pass a kernel pointer to a routine
2508 2509
	 * that expects a __user pointer and it will work okay.
	 */
2510 2511 2512
	set_fs(get_ds());

	/* The main loop */
2513 2514 2515
	while (common->state != FSG_STATE_TERMINATED) {
		if (exception_in_progress(common) || signal_pending(current)) {
			handle_exception(common);
2516 2517 2518
			continue;
		}

2519
		if (!common->running) {
2520
			sleep_thread(common, true);
2521 2522 2523
			continue;
		}

2524
		if (get_next_command(common))
2525 2526
			continue;

2527 2528 2529 2530
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_DATA_PHASE;
		spin_unlock_irq(&common->lock);
2531

2532
		if (do_scsi_command(common) || finish_reply(common))
2533 2534
			continue;

2535 2536 2537 2538
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_STATUS_PHASE;
		spin_unlock_irq(&common->lock);
2539

2540
		if (send_status(common))
2541 2542
			continue;

2543 2544 2545 2546
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_IDLE;
		spin_unlock_irq(&common->lock);
2547
	}
2548

2549 2550 2551
	spin_lock_irq(&common->lock);
	common->thread_task = NULL;
	spin_unlock_irq(&common->lock);
2552

2553 2554
	if (!common->ops || !common->ops->thread_exits
	 || common->ops->thread_exits(common) < 0) {
2555
		struct fsg_lun **curlun_it = common->luns;
2556 2557 2558
		unsigned i = common->nluns;

		down_write(&common->filesem);
2559 2560 2561
		for (; i--; ++curlun_it) {
			struct fsg_lun *curlun = *curlun_it;
			if (!curlun || !fsg_lun_is_open(curlun))
2562 2563 2564 2565 2566 2567 2568
				continue;

			fsg_lun_close(curlun);
			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
		}
		up_write(&common->filesem);
	}
2569

2570
	/* Let fsg_unbind() know the thread has exited */
2571
	complete_and_exit(&common->thread_notifier, 0);
2572 2573 2574
}


2575
/*************************** DEVICE ATTRIBUTES ***************************/
2576

2577 2578
static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
{
2579 2580 2581
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);

	return fsg_show_ro(curlun, buf);
2582 2583 2584 2585 2586
}

static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
			  char *buf)
{
2587 2588 2589
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);

	return fsg_show_nofua(curlun, buf);
2590 2591 2592 2593 2594
}

static ssize_t file_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
2595 2596 2597 2598
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
	struct rw_semaphore	*filesem = dev_get_drvdata(dev);

	return fsg_show_file(curlun, filesem, buf);
2599 2600 2601 2602 2603
}

static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
{
2604 2605 2606 2607
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
	struct rw_semaphore	*filesem = dev_get_drvdata(dev);

	return fsg_store_ro(curlun, filesem, buf, count);
2608 2609 2610 2611 2612
}

static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
			   const char *buf, size_t count)
{
2613 2614 2615
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);

	return fsg_store_nofua(curlun, buf, count);
2616 2617 2618 2619 2620
}

static ssize_t file_store(struct device *dev, struct device_attribute *attr,
			  const char *buf, size_t count)
{
2621 2622 2623 2624
	struct fsg_lun		*curlun = fsg_lun_from_dev(dev);
	struct rw_semaphore	*filesem = dev_get_drvdata(dev);

	return fsg_store_file(curlun, filesem, buf, count);
2625 2626
}

2627
static DEVICE_ATTR_RW(nofua);
2628 2629 2630
/* mode wil be set in fsg_lun_attr_is_visible() */
static DEVICE_ATTR(ro, 0, ro_show, ro_store);
static DEVICE_ATTR(file, 0, file_show, file_store);
2631

2632 2633 2634
/****************************** FSG COMMON ******************************/

static void fsg_common_release(struct kref *ref);
2635

2636
static void fsg_lun_release(struct device *dev)
2637
{
2638
	/* Nothing needs to be done */
2639 2640
}

2641
void fsg_common_get(struct fsg_common *common)
2642
{
2643
	kref_get(&common->ref);
2644
}
2645
EXPORT_SYMBOL_GPL(fsg_common_get);
2646

2647
void fsg_common_put(struct fsg_common *common)
2648 2649 2650
{
	kref_put(&common->ref, fsg_common_release);
}
2651
EXPORT_SYMBOL_GPL(fsg_common_put);
2652

2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
/* check if fsg_num_buffers is within a valid range */
static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
{
	if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
		return 0;
	pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
	       fsg_num_buffers, 2, 4);
	return -EINVAL;
}

2663
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
{
	if (!common) {
		common = kzalloc(sizeof(*common), GFP_KERNEL);
		if (!common)
			return ERR_PTR(-ENOMEM);
		common->free_storage_on_release = 1;
	} else {
		common->free_storage_on_release = 0;
	}
	init_rwsem(&common->filesem);
	spin_lock_init(&common->lock);
	kref_init(&common->ref);
	init_completion(&common->thread_notifier);
	init_waitqueue_head(&common->fsg_wait);
	common->state = FSG_STATE_TERMINATED;

	return common;
}

2683 2684 2685 2686
void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
{
	common->sysfs = sysfs;
}
2687
EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
2688

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700
static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
{
	if (buffhds) {
		struct fsg_buffhd *bh = buffhds;
		while (n--) {
			kfree(bh->buf);
			++bh;
		}
		kfree(buffhds);
	}
}

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
{
	struct fsg_buffhd *bh, *buffhds;
	int i, rc;

	rc = fsg_num_buffers_validate(n);
	if (rc != 0)
		return rc;

	buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
	if (!buffhds)
		return -ENOMEM;

	/* Data buffers cyclic list */
	bh = buffhds;
	i = n;
	goto buffhds_first_it;
	do {
		bh->next = bh + 1;
		++bh;
buffhds_first_it:
		bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
		if (unlikely(!bh->buf))
			goto error_release;
	} while (--i);
	bh->next = buffhds;

	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
	common->fsg_num_buffers = n;
	common->buffhds = buffhds;

	return 0;

error_release:
	/*
	 * "buf"s pointed to by heads after n - i are NULL
	 * so releasing them won't hurt
	 */
	_fsg_common_free_buffers(buffhds, n);

	return -ENOMEM;
}
2743
EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
2744

2745
void fsg_common_remove_lun(struct fsg_lun *lun)
2746
{
2747
	if (device_is_registered(&lun->dev))
2748 2749 2750 2751
		device_unregister(&lun->dev);
	fsg_lun_close(lun);
	kfree(lun);
}
2752
EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
2753 2754 2755 2756 2757 2758 2759

static void _fsg_common_remove_luns(struct fsg_common *common, int n)
{
	int i;

	for (i = 0; i < n; ++i)
		if (common->luns[i]) {
2760
			fsg_common_remove_lun(common->luns[i]);
2761 2762 2763 2764 2765 2766 2767 2768
			common->luns[i] = NULL;
		}
}

void fsg_common_remove_luns(struct fsg_common *common)
{
	_fsg_common_remove_luns(common, common->nluns);
}
2769
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2770 2771 2772 2773 2774 2775 2776

void fsg_common_free_luns(struct fsg_common *common)
{
	fsg_common_remove_luns(common);
	kfree(common->luns);
	common->luns = NULL;
}
2777
EXPORT_SYMBOL_GPL(fsg_common_free_luns);
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788

int fsg_common_set_nluns(struct fsg_common *common, int nluns)
{
	struct fsg_lun **curlun;

	/* Find out how many LUNs there should be */
	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
		pr_err("invalid number of LUNs: %u\n", nluns);
		return -EINVAL;
	}

2789
	curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
	if (unlikely(!curlun))
		return -ENOMEM;

	if (common->luns)
		fsg_common_free_luns(common);

	common->luns = curlun;
	common->nluns = nluns;

	return 0;
}
2801
EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
2802

2803 2804 2805 2806 2807
void fsg_common_set_ops(struct fsg_common *common,
			const struct fsg_operations *ops)
{
	common->ops = ops;
}
2808
EXPORT_SYMBOL_GPL(fsg_common_set_ops);
2809

2810 2811 2812 2813 2814
void fsg_common_free_buffers(struct fsg_common *common)
{
	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
	common->buffhds = NULL;
}
2815
EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
2816

2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
int fsg_common_set_cdev(struct fsg_common *common,
			 struct usb_composite_dev *cdev, bool can_stall)
{
	struct usb_string *us;

	common->gadget = cdev->gadget;
	common->ep0 = cdev->gadget->ep0;
	common->ep0req = cdev->req;
	common->cdev = cdev;

	us = usb_gstrings_attach(cdev, fsg_strings_array,
				 ARRAY_SIZE(fsg_strings));
	if (IS_ERR(us))
		return PTR_ERR(us);

	fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;

	/*
	 * Some peripheral controllers are known not to be able to
	 * halt bulk endpoints correctly.  If one of them is present,
	 * disable stalls.
	 */
2839 2840
	common->can_stall = can_stall &&
			gadget_is_stall_supported(common->gadget);
2841 2842 2843

	return 0;
}
2844
EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
2845

2846 2847 2848 2849 2850 2851
static struct attribute *fsg_lun_dev_attrs[] = {
	&dev_attr_ro.attr,
	&dev_attr_file.attr,
	&dev_attr_nofua.attr,
	NULL
};
2852

2853 2854 2855 2856 2857
static umode_t fsg_lun_dev_is_visible(struct kobject *kobj,
				      struct attribute *attr, int idx)
{
	struct device *dev = kobj_to_dev(kobj);
	struct fsg_lun *lun = fsg_lun_from_dev(dev);
2858

2859 2860 2861 2862 2863 2864
	if (attr == &dev_attr_ro.attr)
		return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO);
	if (attr == &dev_attr_file.attr)
		return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO;
	return attr->mode;
}
2865

2866 2867 2868 2869
static const struct attribute_group fsg_lun_dev_group = {
	.attrs = fsg_lun_dev_attrs,
	.is_visible = fsg_lun_dev_is_visible,
};
2870

2871 2872 2873 2874
static const struct attribute_group *fsg_lun_dev_groups[] = {
	&fsg_lun_dev_group,
	NULL
};
2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911

int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
			  unsigned int id, const char *name,
			  const char **name_pfx)
{
	struct fsg_lun *lun;
	char *pathbuf, *p;
	int rc = -ENOMEM;

	if (!common->nluns || !common->luns)
		return -ENODEV;

	if (common->luns[id])
		return -EBUSY;

	if (!cfg->filename && !cfg->removable) {
		pr_err("no file given for LUN%d\n", id);
		return -EINVAL;
	}

	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
	if (!lun)
		return -ENOMEM;

	lun->name_pfx = name_pfx;

	lun->cdrom = !!cfg->cdrom;
	lun->ro = cfg->cdrom || cfg->ro;
	lun->initially_ro = lun->ro;
	lun->removable = !!cfg->removable;

	if (!common->sysfs) {
		/* we DON'T own the name!*/
		lun->name = name;
	} else {
		lun->dev.release = fsg_lun_release;
		lun->dev.parent = &common->gadget->dev;
2912
		lun->dev.groups = fsg_lun_dev_groups;
2913
		dev_set_drvdata(&lun->dev, &common->filesem);
2914
		dev_set_name(&lun->dev, "%s", name);
2915 2916
		lun->name = dev_name(&lun->dev);

2917
		rc = device_register(&lun->dev);
2918 2919
		if (rc) {
			pr_info("failed to register LUN%d: %d\n", id, rc);
2920
			put_device(&lun->dev);
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
			goto error_sysfs;
		}
	}

	common->luns[id] = lun;

	if (cfg->filename) {
		rc = fsg_lun_open(lun, cfg->filename);
		if (rc)
			goto error_lun;
	}

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	p = "(no medium)";
	if (fsg_lun_is_open(lun)) {
		p = "(error)";
		if (pathbuf) {
M
Miklos Szeredi 已提交
2938
			p = file_path(lun->filp, pathbuf, PATH_MAX);
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
			if (IS_ERR(p))
				p = "(error)";
		}
	}
	pr_info("LUN: %s%s%sfile: %s\n",
	      lun->removable ? "removable " : "",
	      lun->ro ? "read only " : "",
	      lun->cdrom ? "CD-ROM " : "",
	      p);
	kfree(pathbuf);

	return 0;

error_lun:
2953
	if (device_is_registered(&lun->dev))
2954 2955 2956 2957 2958 2959 2960
		device_unregister(&lun->dev);
	fsg_lun_close(lun);
	common->luns[id] = NULL;
error_sysfs:
	kfree(lun);
	return rc;
}
2961
EXPORT_SYMBOL_GPL(fsg_common_create_lun);
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982

int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
{
	char buf[8]; /* enough for 100000000 different numbers, decimal */
	int i, rc;

	for (i = 0; i < common->nluns; ++i) {
		snprintf(buf, sizeof(buf), "lun%d", i);
		rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
		if (rc)
			goto fail;
	}

	pr_info("Number of LUNs=%d\n", common->nluns);

	return 0;

fail:
	_fsg_common_remove_luns(common, i);
	return rc;
}
2983
EXPORT_SYMBOL_GPL(fsg_common_create_luns);
2984

2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
				   const char *pn)
{
	int i;

	/* Prepare inquiryString */
	i = get_default_bcdDevice();
	snprintf(common->inquiry_string, sizeof(common->inquiry_string),
		 "%-8s%-16s%04x", vn ?: "Linux",
		 /* Assume product name dependent on the first LUN */
		 pn ?: ((*common->luns)->cdrom
		     ? "File-CD Gadget"
		     : "File-Stor Gadget"),
		 i);
}
3000
EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
3001

3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
int fsg_common_run_thread(struct fsg_common *common)
{
	common->state = FSG_STATE_IDLE;
	/* Tell the thread to start working */
	common->thread_task =
		kthread_create(fsg_main_thread, common, "file-storage");
	if (IS_ERR(common->thread_task)) {
		common->state = FSG_STATE_TERMINATED;
		return PTR_ERR(common->thread_task);
	}

	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));

	wake_up_process(common->thread_task);

	return 0;
}
3019
EXPORT_SYMBOL_GPL(fsg_common_run_thread);
3020 3021 3022

static void fsg_common_release(struct kref *ref)
{
3023
	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
3024

3025 3026 3027 3028 3029 3030
	/* If the thread isn't already dead, tell it to exit now */
	if (common->state != FSG_STATE_TERMINATED) {
		raise_exception(common, FSG_STATE_EXIT);
		wait_for_completion(&common->thread_notifier);
	}

3031
	if (likely(common->luns)) {
3032
		struct fsg_lun **lun_it = common->luns;
3033 3034 3035
		unsigned i = common->nluns;

		/* In error recovery common->nluns may be zero. */
3036 3037 3038 3039
		for (; i; --i, ++lun_it) {
			struct fsg_lun *lun = *lun_it;
			if (!lun)
				continue;
3040
			fsg_lun_close(lun);
3041
		if (device_is_registered(&lun->dev))
3042
				device_unregister(&lun->dev);
3043
			kfree(lun);
3044
		}
3045

3046
		kfree(common->luns);
3047 3048
	}

3049
	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
3050 3051 3052 3053 3054 3055 3056
	if (common->free_storage_on_release)
		kfree(common);
}


/*-------------------------------------------------------------------------*/

3057
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
3058
{
3059 3060
	struct fsg_dev		*fsg = fsg_from_func(f);
	struct usb_gadget	*gadget = c->cdev->gadget;
3061 3062
	int			i;
	struct usb_ep		*ep;
3063 3064
	unsigned		max_burst;
	int			ret;
3065
	struct fsg_opts		*opts;
3066

3067 3068 3069 3070 3071 3072
	opts = fsg_opts_from_func_inst(f->fi);
	if (!opts->no_configfs) {
		ret = fsg_common_set_cdev(fsg->common, c->cdev,
					  fsg->common->can_stall);
		if (ret)
			return ret;
3073
		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
3074 3075 3076 3077 3078
		ret = fsg_common_run_thread(fsg->common);
		if (ret)
			return ret;
	}

3079 3080
	fsg->gadget = gadget;

3081 3082 3083
	/* New interface */
	i = usb_interface_id(c, f);
	if (i < 0)
3084
		goto fail;
3085 3086
	fsg_intf_desc.bInterfaceNumber = i;
	fsg->interface_number = i;
3087 3088 3089 3090 3091

	/* Find all the endpoints we will use */
	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
	if (!ep)
		goto autoconf_fail;
3092
	ep->driver_data = fsg->common;	/* claim the endpoint */
3093 3094 3095 3096 3097
	fsg->bulk_in = ep;

	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
	if (!ep)
		goto autoconf_fail;
3098
	ep->driver_data = fsg->common;	/* claim the endpoint */
3099 3100
	fsg->bulk_out = ep;

3101 3102 3103 3104 3105
	/* Assume endpoint addresses are the same for both speeds */
	fsg_hs_bulk_in_desc.bEndpointAddress =
		fsg_fs_bulk_in_desc.bEndpointAddress;
	fsg_hs_bulk_out_desc.bEndpointAddress =
		fsg_fs_bulk_out_desc.bEndpointAddress;
3106

3107 3108
	/* Calculate bMaxBurst, we know packet size is 1024 */
	max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
3109

3110 3111 3112
	fsg_ss_bulk_in_desc.bEndpointAddress =
		fsg_fs_bulk_in_desc.bEndpointAddress;
	fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
3113

3114 3115 3116
	fsg_ss_bulk_out_desc.bEndpointAddress =
		fsg_fs_bulk_out_desc.bEndpointAddress;
	fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3117

3118 3119 3120 3121
	ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
			fsg_ss_function);
	if (ret)
		goto autoconf_fail;
3122

3123 3124 3125 3126
	return 0;

autoconf_fail:
	ERROR(fsg, "unable to autoconfigure all endpoints\n");
3127 3128 3129 3130 3131 3132 3133 3134
	i = -ENOTSUPP;
fail:
	/* terminate the thread */
	if (fsg->common->state != FSG_STATE_TERMINATED) {
		raise_exception(fsg->common, FSG_STATE_EXIT);
		wait_for_completion(&fsg->common->thread_notifier);
	}
	return i;
3135 3136
}

3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
/****************************** ALLOCATE FUNCTION *************************/

static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
{
	struct fsg_dev		*fsg = fsg_from_func(f);
	struct fsg_common	*common = fsg->common;

	DBG(fsg, "unbind\n");
	if (fsg->common->fsg == fsg) {
		fsg->common->new_fsg = NULL;
		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
		/* FIXME: make interruptible or killable somehow? */
		wait_event(common->fsg_wait, common->fsg != fsg);
	}

	usb_free_all_descriptors(&fsg->function);
3153
}
3154

3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
{
	return container_of(to_config_group(item), struct fsg_lun_opts, group);
}

static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
{
	return container_of(to_config_group(item), struct fsg_opts,
			    func_inst.group);
}

CONFIGFS_ATTR_STRUCT(fsg_lun_opts);
CONFIGFS_ATTR_OPS(fsg_lun_opts);

static void fsg_lun_attr_release(struct config_item *item)
{
	struct fsg_lun_opts *lun_opts;

	lun_opts = to_fsg_lun_opts(item);
	kfree(lun_opts);
}

static struct configfs_item_operations fsg_lun_item_ops = {
3178 3179 3180
	.release		= fsg_lun_attr_release,
	.show_attribute		= fsg_lun_opts_attr_show,
	.store_attribute	= fsg_lun_opts_attr_store,
3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
};

static ssize_t fsg_lun_opts_file_show(struct fsg_lun_opts *opts, char *page)
{
	struct fsg_opts *fsg_opts;

	fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);

	return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
}

static ssize_t fsg_lun_opts_file_store(struct fsg_lun_opts *opts,
				       const char *page, size_t len)
{
	struct fsg_opts *fsg_opts;

	fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);

	return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
}

static struct fsg_lun_opts_attribute fsg_lun_opts_file =
	__CONFIGFS_ATTR(file, S_IRUGO | S_IWUSR, fsg_lun_opts_file_show,
			fsg_lun_opts_file_store);

static ssize_t fsg_lun_opts_ro_show(struct fsg_lun_opts *opts, char *page)
{
	return fsg_show_ro(opts->lun, page);
}

static ssize_t fsg_lun_opts_ro_store(struct fsg_lun_opts *opts,
				       const char *page, size_t len)
{
	struct fsg_opts *fsg_opts;

	fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);

	return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
}

static struct fsg_lun_opts_attribute fsg_lun_opts_ro =
	__CONFIGFS_ATTR(ro, S_IRUGO | S_IWUSR, fsg_lun_opts_ro_show,
			fsg_lun_opts_ro_store);

static ssize_t fsg_lun_opts_removable_show(struct fsg_lun_opts *opts,
					   char *page)
{
	return fsg_show_removable(opts->lun, page);
}

static ssize_t fsg_lun_opts_removable_store(struct fsg_lun_opts *opts,
				       const char *page, size_t len)
{
	return fsg_store_removable(opts->lun, page, len);
}

static struct fsg_lun_opts_attribute fsg_lun_opts_removable =
	__CONFIGFS_ATTR(removable, S_IRUGO | S_IWUSR,
			fsg_lun_opts_removable_show,
			fsg_lun_opts_removable_store);

static ssize_t fsg_lun_opts_cdrom_show(struct fsg_lun_opts *opts, char *page)
{
	return fsg_show_cdrom(opts->lun, page);
}

static ssize_t fsg_lun_opts_cdrom_store(struct fsg_lun_opts *opts,
				       const char *page, size_t len)
{
3250 3251 3252 3253 3254 3255
	struct fsg_opts *fsg_opts;

	fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);

	return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
			       len);
3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
}

static struct fsg_lun_opts_attribute fsg_lun_opts_cdrom =
	__CONFIGFS_ATTR(cdrom, S_IRUGO | S_IWUSR, fsg_lun_opts_cdrom_show,
			fsg_lun_opts_cdrom_store);

static ssize_t fsg_lun_opts_nofua_show(struct fsg_lun_opts *opts, char *page)
{
	return fsg_show_nofua(opts->lun, page);
}

static ssize_t fsg_lun_opts_nofua_store(struct fsg_lun_opts *opts,
				       const char *page, size_t len)
{
	return fsg_store_nofua(opts->lun, page, len);
}

static struct fsg_lun_opts_attribute fsg_lun_opts_nofua =
	__CONFIGFS_ATTR(nofua, S_IRUGO | S_IWUSR, fsg_lun_opts_nofua_show,
			fsg_lun_opts_nofua_store);

static struct configfs_attribute *fsg_lun_attrs[] = {
	&fsg_lun_opts_file.attr,
	&fsg_lun_opts_ro.attr,
	&fsg_lun_opts_removable.attr,
	&fsg_lun_opts_cdrom.attr,
	&fsg_lun_opts_nofua.attr,
	NULL,
};

static struct config_item_type fsg_lun_type = {
	.ct_item_ops	= &fsg_lun_item_ops,
	.ct_attrs	= fsg_lun_attrs,
	.ct_owner	= THIS_MODULE,
};

static struct config_group *fsg_lun_make(struct config_group *group,
					 const char *name)
{
	struct fsg_lun_opts *opts;
	struct fsg_opts *fsg_opts;
	struct fsg_lun_config config;
	char *num_str;
	u8 num;
	int ret;

	num_str = strchr(name, '.');
	if (!num_str) {
		pr_err("Unable to locate . in LUN.NUMBER\n");
		return ERR_PTR(-EINVAL);
	}
	num_str++;

	ret = kstrtou8(num_str, 0, &num);
	if (ret)
		return ERR_PTR(ret);

	fsg_opts = to_fsg_opts(&group->cg_item);
	if (num >= FSG_MAX_LUNS)
3315 3316
		return ERR_PTR(-ERANGE);

3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365
	mutex_lock(&fsg_opts->lock);
	if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
		ret = -EBUSY;
		goto out;
	}

	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
	if (!opts) {
		ret = -ENOMEM;
		goto out;
	}

	memset(&config, 0, sizeof(config));
	config.removable = true;

	ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
				    (const char **)&group->cg_item.ci_name);
	if (ret) {
		kfree(opts);
		goto out;
	}
	opts->lun = fsg_opts->common->luns[num];
	opts->lun_id = num;
	mutex_unlock(&fsg_opts->lock);

	config_group_init_type_name(&opts->group, name, &fsg_lun_type);

	return &opts->group;
out:
	mutex_unlock(&fsg_opts->lock);
	return ERR_PTR(ret);
}

static void fsg_lun_drop(struct config_group *group, struct config_item *item)
{
	struct fsg_lun_opts *lun_opts;
	struct fsg_opts *fsg_opts;

	lun_opts = to_fsg_lun_opts(item);
	fsg_opts = to_fsg_opts(&group->cg_item);

	mutex_lock(&fsg_opts->lock);
	if (fsg_opts->refcnt) {
		struct config_item *gadget;

		gadget = group->cg_item.ci_parent->ci_parent;
		unregister_gadget_item(gadget);
	}

3366
	fsg_common_remove_lun(lun_opts->lun);
3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384
	fsg_opts->common->luns[lun_opts->lun_id] = NULL;
	lun_opts->lun_id = 0;
	mutex_unlock(&fsg_opts->lock);

	config_item_put(item);
}

CONFIGFS_ATTR_STRUCT(fsg_opts);
CONFIGFS_ATTR_OPS(fsg_opts);

static void fsg_attr_release(struct config_item *item)
{
	struct fsg_opts *opts = to_fsg_opts(item);

	usb_put_function_instance(&opts->func_inst);
}

static struct configfs_item_operations fsg_item_ops = {
3385 3386 3387
	.release		= fsg_attr_release,
	.show_attribute		= fsg_opts_attr_show,
	.store_attribute	= fsg_opts_attr_store,
3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
};

static ssize_t fsg_opts_stall_show(struct fsg_opts *opts, char *page)
{
	int result;

	mutex_lock(&opts->lock);
	result = sprintf(page, "%d", opts->common->can_stall);
	mutex_unlock(&opts->lock);

	return result;
}

static ssize_t fsg_opts_stall_store(struct fsg_opts *opts, const char *page,
				    size_t len)
{
	int ret;
3405
	bool stall;
3406 3407

	mutex_lock(&opts->lock);
3408

3409
	if (opts->refcnt) {
3410 3411
		mutex_unlock(&opts->lock);
		return -EBUSY;
3412 3413
	}

3414 3415 3416 3417 3418
	ret = strtobool(page, &stall);
	if (!ret) {
		opts->common->can_stall = stall;
		ret = len;
	}
3419 3420

	mutex_unlock(&opts->lock);
3421

3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
	return ret;
}

static struct fsg_opts_attribute fsg_opts_stall =
	__CONFIGFS_ATTR(stall, S_IRUGO | S_IWUSR, fsg_opts_stall_show,
			fsg_opts_stall_store);

#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static ssize_t fsg_opts_num_buffers_show(struct fsg_opts *opts, char *page)
{
	int result;

	mutex_lock(&opts->lock);
	result = sprintf(page, "%d", opts->common->fsg_num_buffers);
	mutex_unlock(&opts->lock);

	return result;
}

static ssize_t fsg_opts_num_buffers_store(struct fsg_opts *opts,
					  const char *page, size_t len)
{
	int ret;
	u8 num;

	mutex_lock(&opts->lock);
	if (opts->refcnt) {
		ret = -EBUSY;
		goto end;
	}
	ret = kstrtou8(page, 0, &num);
	if (ret)
		goto end;

	ret = fsg_num_buffers_validate(num);
	if (ret)
		goto end;

	fsg_common_set_num_buffers(opts->common, num);
	ret = len;

end:
	mutex_unlock(&opts->lock);
	return ret;
}

static struct fsg_opts_attribute fsg_opts_num_buffers =
	__CONFIGFS_ATTR(num_buffers, S_IRUGO | S_IWUSR,
			fsg_opts_num_buffers_show,
			fsg_opts_num_buffers_store);

#endif

static struct configfs_attribute *fsg_attrs[] = {
	&fsg_opts_stall.attr,
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
	&fsg_opts_num_buffers.attr,
#endif
	NULL,
};

static struct configfs_group_operations fsg_group_ops = {
	.make_group	= fsg_lun_make,
	.drop_item	= fsg_lun_drop,
};

static struct config_item_type fsg_func_type = {
	.ct_item_ops	= &fsg_item_ops,
	.ct_group_ops	= &fsg_group_ops,
	.ct_attrs	= fsg_attrs,
	.ct_owner	= THIS_MODULE,
};

3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
static void fsg_free_inst(struct usb_function_instance *fi)
{
	struct fsg_opts *opts;

	opts = fsg_opts_from_func_inst(fi);
	fsg_common_put(opts->common);
	kfree(opts);
}

static struct usb_function_instance *fsg_alloc_inst(void)
{
	struct fsg_opts *opts;
3507
	struct fsg_lun_config config;
3508 3509 3510 3511 3512
	int rc;

	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
	if (!opts)
		return ERR_PTR(-ENOMEM);
3513
	mutex_init(&opts->lock);
3514
	opts->func_inst.free_func_inst = fsg_free_inst;
3515
	opts->common = fsg_common_setup(opts->common);
3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	if (IS_ERR(opts->common)) {
		rc = PTR_ERR(opts->common);
		goto release_opts;
	}
	rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
	if (rc)
		goto release_opts;

	rc = fsg_common_set_num_buffers(opts->common,
					CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
	if (rc)
		goto release_luns;

	pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");

3531 3532 3533 3534
	memset(&config, 0, sizeof(config));
	config.removable = true;
	rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
			(const char **)&opts->func_inst.group.cg_item.ci_name);
3535 3536 3537
	if (rc)
		goto release_buffers;

3538 3539 3540 3541 3542 3543 3544 3545
	opts->lun0.lun = opts->common->luns[0];
	opts->lun0.lun_id = 0;
	config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
	opts->default_groups[0] = &opts->lun0.group;
	opts->func_inst.group.default_groups = opts->default_groups;

	config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);

3546 3547
	return &opts->func_inst;

3548 3549
release_buffers:
	fsg_common_free_buffers(opts->common);
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559
release_luns:
	kfree(opts->common->luns);
release_opts:
	kfree(opts);
	return ERR_PTR(rc);
}

static void fsg_free(struct usb_function *f)
{
	struct fsg_dev *fsg;
3560
	struct fsg_opts *opts;
3561 3562

	fsg = container_of(f, struct fsg_dev, function);
3563 3564 3565 3566 3567
	opts = container_of(f->fi, struct fsg_opts, func_inst);

	mutex_lock(&opts->lock);
	opts->refcnt--;
	mutex_unlock(&opts->lock);
3568 3569 3570 3571 3572 3573 3574 3575 3576

	kfree(fsg);
}

static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
{
	struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
	struct fsg_common *common = opts->common;
	struct fsg_dev *fsg;
3577
	unsigned nluns, i;
3578 3579 3580 3581 3582

	fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
	if (unlikely(!fsg))
		return ERR_PTR(-ENOMEM);

3583
	mutex_lock(&opts->lock);
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
	if (!opts->refcnt) {
		for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
			if (common->luns[i])
				nluns = i + 1;
		if (!nluns)
			pr_warn("No LUNS defined, continuing anyway\n");
		else
			common->nluns = nluns;
		pr_info("Number of LUNs=%u\n", common->nluns);
	}
3594 3595
	opts->refcnt++;
	mutex_unlock(&opts->lock);
3596

3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
	fsg->function.name	= FSG_DRIVER_DESC;
	fsg->function.bind	= fsg_bind;
	fsg->function.unbind	= fsg_unbind;
	fsg->function.setup	= fsg_setup;
	fsg->function.set_alt	= fsg_set_alt;
	fsg->function.disable	= fsg_disable;
	fsg->function.free_func	= fsg_free;

	fsg->common               = common;

	return &fsg->function;
}

DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Nazarewicz");

3614 3615
/************************* Module parameters *************************/

3616

3617
void fsg_config_from_params(struct fsg_config *cfg,
3618 3619
		       const struct fsg_module_parameters *params,
		       unsigned int fsg_num_buffers)
3620 3621
{
	struct fsg_lun_config *lun;
3622
	unsigned i;
3623 3624

	/* Configure LUNs */
3625 3626 3627 3628
	cfg->nluns =
		min(params->luns ?: (params->file_count ?: 1u),
		    (unsigned)FSG_MAX_LUNS);
	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3629 3630
		lun->ro = !!params->ro[i];
		lun->cdrom = !!params->cdrom[i];
3631
		lun->removable = !!params->removable[i];
3632 3633 3634
		lun->filename =
			params->file_count > i && params->file[i][0]
			? params->file[i]
3635
			: NULL;
3636 3637
	}

3638
	/* Let MSF use defaults */
3639 3640
	cfg->vendor_name = NULL;
	cfg->product_name = NULL;
3641

3642 3643
	cfg->ops = NULL;
	cfg->private_data = NULL;
3644

3645 3646
	/* Finalise */
	cfg->can_stall = params->stall;
3647
	cfg->fsg_num_buffers = fsg_num_buffers;
3648
}
3649
EXPORT_SYMBOL_GPL(fsg_config_from_params);