f_mass_storage.c 85.3 KB
Newer Older
1
/*
2
 * f_mass_storage.c -- Mass Storage USB Composite Function
3 4
 *
 * Copyright (C) 2003-2008 Alan Stern
5
 * Copyright (C) 2009 Samsung Electronics
6
 *                    Author: Michal Nazarewicz <mina86@mina86.com>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions, and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. The names of the above-listed copyright holders may not be used
 *    to endorse or promote products derived from this software without
 *    specific prior written permission.
 *
 * ALTERNATIVELY, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") as published by the Free Software
 * Foundation, either version 2 of that License or (at your option) any
 * later version.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
41 42 43 44 45
 * The Mass Storage Function acts as a USB Mass Storage device,
 * appearing to the host as a disk drive or as a CD-ROM drive.  In
 * addition to providing an example of a genuinely useful composite
 * function for a USB device, it also illustrates a technique of
 * double-buffering for increased throughput.
46
 *
47 48 49 50 51 52
 * For more information about MSF and in particular its module
 * parameters and sysfs interface read the
 * <Documentation/usb/mass-storage.txt> file.
 */

/*
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
 * MSF is configured by specifying a fsg_config structure.  It has the
 * following fields:
 *
 *	nluns		Number of LUNs function have (anywhere from 1
 *				to FSG_MAX_LUNS which is 8).
 *	luns		An array of LUN configuration values.  This
 *				should be filled for each LUN that
 *				function will include (ie. for "nluns"
 *				LUNs).  Each element of the array has
 *				the following fields:
 *	->filename	The path to the backing file for the LUN.
 *				Required if LUN is not marked as
 *				removable.
 *	->ro		Flag specifying access to the LUN shall be
 *				read-only.  This is implied if CD-ROM
 *				emulation is enabled as well as when
 *				it was impossible to open "filename"
 *				in R/W mode.
 *	->removable	Flag specifying that LUN shall be indicated as
 *				being removable.
 *	->cdrom		Flag specifying that LUN shall be reported as
 *				being a CD-ROM.
75 76
 *	->nofua		Flag specifying that FUA flag in SCSI WRITE(10,12)
 *				commands for this LUN shall be ignored.
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
 *
 *	vendor_name
 *	product_name
 *	release		Information used as a reply to INQUIRY
 *				request.  To use default set to NULL,
 *				NULL, 0xffff respectively.  The first
 *				field should be 8 and the second 16
 *				characters or less.
 *
 *	can_stall	Set to permit function to halt bulk endpoints.
 *				Disabled on some USB devices known not
 *				to work correctly.  You should set it
 *				to true.
 *
 * If "removable" is not set for a LUN then a backing file must be
 * specified.  If it is set, then NULL filename means the LUN's medium
 * is not loaded (an empty string as "filename" in the fsg_config
 * structure causes error).  The CD-ROM emulation includes a single
 * data track and no audio tracks; hence there need be only one
96
 * backing file per LUN.
97 98 99 100 101 102 103 104 105 106 107
 *
 * This function is heavily based on "File-backed Storage Gadget" by
 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
 * Brownell.  The driver's SCSI command interface was based on the
 * "Information technology - Small Computer System Interface - 2"
 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
 * was based on the "Universal Serial Bus Mass Storage Class UFI
 * Command Specification" document, Revision 1.0, December 14, 1998,
 * available at
108 109 110 111 112 113
 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
 */

/*
 *				Driver Design
 *
114
 * The MSF is fairly straightforward.  There is a main kernel
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
 * thread that handles most of the work.  Interrupt routines field
 * callbacks from the controller driver: bulk- and interrupt-request
 * completion notifications, endpoint-0 events, and disconnect events.
 * Completion events are passed to the main thread by wakeup calls.  Many
 * ep0 requests are handled at interrupt time, but SetInterface,
 * SetConfiguration, and device reset requests are forwarded to the
 * thread in the form of "exceptions" using SIGUSR1 signals (since they
 * should interrupt any ongoing file I/O operations).
 *
 * The thread's main routine implements the standard command/data/status
 * parts of a SCSI interaction.  It and its subroutines are full of tests
 * for pending signals/exceptions -- all this polling is necessary since
 * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
 * indication that the driver really wants to be running in userspace.)
 * An important point is that so long as the thread is alive it keeps an
 * open reference to the backing file.  This will prevent unmounting
 * the backing file's underlying filesystem and could cause problems
 * during system shutdown, for example.  To prevent such problems, the
 * thread catches INT, TERM, and KILL signals and converts them into
 * an EXIT exception.
 *
 * In normal operation the main thread is started during the gadget's
137 138
 * fsg_bind() callback and stopped during fsg_unbind().  But it can
 * also exit when it receives a signal, and there's no point leaving
139
 * the gadget running when the thread is dead.  As of this moment, MSF
140 141
 * provides no way to deregister the gadget when thread dies -- maybe
 * a callback functions is needed.
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
 *
 * To provide maximum throughput, the driver uses a circular pipeline of
 * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
 * arbitrarily long; in practice the benefits don't justify having more
 * than 2 stages (i.e., double buffering).  But it helps to think of the
 * pipeline as being a long one.  Each buffer head contains a bulk-in and
 * a bulk-out request pointer (since the buffer can be used for both
 * output and input -- directions always are given from the host's
 * point of view) as well as a pointer to the buffer and various state
 * variables.
 *
 * Use of the pipeline follows a simple protocol.  There is a variable
 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
 * At any time that buffer head may still be in use from an earlier
 * request, so each buffer head has a state variable indicating whether
 * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
 * buffer head to be EMPTY, filling the buffer either by file I/O or by
 * USB I/O (during which the buffer head is BUSY), and marking the buffer
 * head FULL when the I/O is complete.  Then the buffer will be emptied
 * (again possibly by USB I/O, during which it is marked BUSY) and
 * finally marked EMPTY again (possibly by a completion routine).
 *
 * A module parameter tells the driver to avoid stalling the bulk
 * endpoints wherever the transport specification allows.  This is
 * necessary for some UDCs like the SuperH, which cannot reliably clear a
 * halt on a bulk endpoint.  However, under certain circumstances the
 * Bulk-only specification requires a stall.  In such cases the driver
 * will halt the endpoint and set a flag indicating that it should clear
 * the halt in software during the next device reset.  Hopefully this
 * will permit everything to work correctly.  Furthermore, although the
 * specification allows the bulk-out endpoint to halt when the host sends
 * too much data, implementing this would cause an unavoidable race.
 * The driver will always use the "no-stall" approach for OUT transfers.
 *
 * One subtle point concerns sending status-stage responses for ep0
 * requests.  Some of these requests, such as device reset, can involve
 * interrupting an ongoing file I/O operation, which might take an
 * arbitrarily long time.  During that delay the host might give up on
 * the original ep0 request and issue a new one.  When that happens the
 * driver should not notify the host about completion of the original
 * request, as the host will no longer be waiting for it.  So the driver
 * assigns to each ep0 request a unique tag, and it keeps track of the
 * tag value of the request associated with a long-running exception
 * (device-reset, interface-change, or configuration-change).  When the
 * exception handler is finished, the status-stage response is submitted
 * only if the current ep0 request tag is equal to the exception request
 * tag.  Thus only the most recently received ep0 request will get a
 * status-stage response.
 *
 * Warning: This driver source file is too long.  It ought to be split up
 * into a header file plus about 3 separate .c files, to handle the details
 * of the Gadget, USB Mass Storage, and SCSI protocols.
 */


/* #define VERBOSE_DEBUG */
/* #define DUMP_MSGS */

#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/dcache.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/limits.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/freezer.h>

#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
219
#include <linux/usb/composite.h>
220 221 222 223

#include "gadget_chips.h"


224
/*------------------------------------------------------------------------*/
225

226
#define FSG_DRIVER_DESC		"Mass Storage Function"
227
#define FSG_DRIVER_VERSION	"2009/09/11"
228 229 230 231 232 233 234 235

static const char fsg_string_interface[] = "Mass Storage";

#include "storage_common.c"


/*-------------------------------------------------------------------------*/

236
struct fsg_dev;
237 238 239 240
struct fsg_common;

/* FSF callback functions */
struct fsg_operations {
241 242
	/*
	 * Callback function to call when thread exits.  If no
243 244 245
	 * callback is set or it returns value lower then zero MSF
	 * will force eject all LUNs it operates on (including those
	 * marked as non-removable or with prevent_medium_removal flag
246 247
	 * set).
	 */
248 249
	int (*thread_exits)(struct fsg_common *common);

250 251
	/*
	 * Called prior to ejection.  Negative return means error,
252
	 * zero means to continue with ejection, positive means not to
253 254
	 * eject.
	 */
255 256
	int (*pre_eject)(struct fsg_common *common,
			 struct fsg_lun *lun, int num);
257 258 259 260
	/*
	 * Called after ejection.  Negative return means error, zero
	 * or positive is just a success.
	 */
261 262 263
	int (*post_eject)(struct fsg_common *common,
			  struct fsg_lun *lun, int num);
};
264

265 266
/* Data shared by all the FSG instances. */
struct fsg_common {
267
	struct usb_gadget	*gadget;
268
	struct usb_composite_dev *cdev;
269 270
	struct fsg_dev		*fsg, *new_fsg;
	wait_queue_head_t	fsg_wait;
271

272 273 274
	/* filesem protects: backing files in use */
	struct rw_semaphore	filesem;

275 276 277 278 279 280 281
	/* lock protects: state, all the req_busy's */
	spinlock_t		lock;

	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
	struct usb_request	*ep0req;	/* Copy of cdev->req */
	unsigned int		ep0_req_tag;

282 283
	struct fsg_buffhd	*next_buffhd_to_fill;
	struct fsg_buffhd	*next_buffhd_to_drain;
284
	struct fsg_buffhd	*buffhds;
285 286 287 288 289 290 291 292

	int			cmnd_size;
	u8			cmnd[MAX_COMMAND_SIZE];

	unsigned int		nluns;
	unsigned int		lun;
	struct fsg_lun		*luns;
	struct fsg_lun		*curlun;
293

294 295 296 297 298 299 300 301 302 303 304
	unsigned int		bulk_out_maxpacket;
	enum fsg_state		state;		/* For exception handling */
	unsigned int		exception_req_tag;

	enum data_direction	data_dir;
	u32			data_size;
	u32			data_size_from_cmnd;
	u32			tag;
	u32			residue;
	u32			usb_amount_left;

305
	unsigned int		can_stall:1;
306
	unsigned int		free_storage_on_release:1;
307 308 309 310
	unsigned int		phase_error:1;
	unsigned int		short_packet_received:1;
	unsigned int		bad_lun_okay:1;
	unsigned int		running:1;
311

312 313 314
	int			thread_wakeup_needed;
	struct completion	thread_notifier;
	struct task_struct	*thread_task;
315

316 317
	/* Callback functions. */
	const struct fsg_operations	*ops;
318 319 320
	/* Gadget's private data. */
	void			*private_data;

321 322 323 324
	/*
	 * Vendor (8 chars), product (16 chars), release (4
	 * hexadecimal digits) and NUL byte
	 */
325 326
	char inquiry_string[8 + 16 + 4 + 1];

327
	struct kref		ref;
328 329
};

330 331 332 333 334 335 336
struct fsg_config {
	unsigned nluns;
	struct fsg_lun_config {
		const char *filename;
		char ro;
		char removable;
		char cdrom;
337
		char nofua;
338 339
	} luns[FSG_MAX_LUNS];

340 341
	/* Callback functions. */
	const struct fsg_operations	*ops;
342 343 344
	/* Gadget's private data. */
	void			*private_data;

345 346 347 348 349 350
	const char *vendor_name;		/*  8 characters or less */
	const char *product_name;		/* 16 characters or less */

	char			can_stall;
};

351
struct fsg_dev {
352 353
	struct usb_function	function;
	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
354 355
	struct fsg_common	*common;

356 357
	u16			interface_number;

358 359
	unsigned int		bulk_in_enabled:1;
	unsigned int		bulk_out_enabled:1;
360 361

	unsigned long		atomic_bitflags;
362
#define IGNORE_BULK_OUT		0
363 364 365

	struct usb_ep		*bulk_in;
	struct usb_ep		*bulk_out;
366
};
367

368 369 370 371 372 373
static inline int __fsg_is_set(struct fsg_common *common,
			       const char *func, unsigned line)
{
	if (common->fsg)
		return 1;
	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
374
	WARN_ON(1);
375 376 377 378
	return 0;
}

#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
379

380 381 382 383 384
static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
{
	return container_of(f, struct fsg_dev, function);
}

385 386
typedef void (*fsg_routine_t)(struct fsg_dev *);

387
static int exception_in_progress(struct fsg_common *common)
388
{
389
	return common->state > FSG_STATE_IDLE;
390 391
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405
/* Make bulk-out requests be divisible by the maxpacket size */
static void set_bulk_out_req_length(struct fsg_common *common,
				    struct fsg_buffhd *bh, unsigned int length)
{
	unsigned int	rem;

	bh->bulk_out_intended_length = length;
	rem = length % common->bulk_out_maxpacket;
	if (rem > 0)
		length += common->bulk_out_maxpacket - rem;
	bh->outreq->length = length;
}


406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
/*-------------------------------------------------------------------------*/

static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
{
	const char	*name;

	if (ep == fsg->bulk_in)
		name = "bulk-in";
	else if (ep == fsg->bulk_out)
		name = "bulk-out";
	else
		name = ep->name;
	DBG(fsg, "%s set halt\n", name);
	return usb_ep_set_halt(ep);
}


/*-------------------------------------------------------------------------*/

/* These routines may be called in process context or in_irq */

/* Caller must hold fsg->lock */
428
static void wakeup_thread(struct fsg_common *common)
429 430
{
	/* Tell the main thread that something has happened */
431 432 433
	common->thread_wakeup_needed = 1;
	if (common->thread_task)
		wake_up_process(common->thread_task);
434 435
}

436
static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
437 438 439
{
	unsigned long		flags;

440 441
	/*
	 * Do nothing if a higher-priority exception is already in progress.
442
	 * If a lower-or-equal priority exception is in progress, preempt it
443 444
	 * and notify the main thread by sending it a signal.
	 */
445 446 447 448 449
	spin_lock_irqsave(&common->lock, flags);
	if (common->state <= new_state) {
		common->exception_req_tag = common->ep0_req_tag;
		common->state = new_state;
		if (common->thread_task)
450
			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
451
				      common->thread_task);
452
	}
453
	spin_unlock_irqrestore(&common->lock, flags);
454 455 456 457 458
}


/*-------------------------------------------------------------------------*/

459
static int ep0_queue(struct fsg_common *common)
460 461 462
{
	int	rc;

463 464
	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
	common->ep0->driver_data = common;
465 466
	if (rc != 0 && rc != -ESHUTDOWN) {
		/* We can't do much more than wait for a reset */
467 468
		WARNING(common, "error in submission: %s --> %d\n",
			common->ep0->name, rc);
469 470 471 472
	}
	return rc;
}

473

474 475
/*-------------------------------------------------------------------------*/

476
/* Completion handlers. These always run in_irq. */
477 478 479

static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
{
480
	struct fsg_common	*common = ep->driver_data;
481 482 483
	struct fsg_buffhd	*bh = req->context;

	if (req->status || req->actual != req->length)
484
		DBG(common, "%s --> %d, %u/%u\n", __func__,
485
		    req->status, req->actual, req->length);
486
	if (req->status == -ECONNRESET)		/* Request was cancelled */
487 488 489 490
		usb_ep_fifo_flush(ep);

	/* Hold the lock while we update the request and buffer states */
	smp_wmb();
491
	spin_lock(&common->lock);
492 493
	bh->inreq_busy = 0;
	bh->state = BUF_STATE_EMPTY;
494 495
	wakeup_thread(common);
	spin_unlock(&common->lock);
496 497 498 499
}

static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
{
500
	struct fsg_common	*common = ep->driver_data;
501 502
	struct fsg_buffhd	*bh = req->context;

503
	dump_msg(common, "bulk-out", req->buf, req->actual);
504
	if (req->status || req->actual != bh->bulk_out_intended_length)
505
		DBG(common, "%s --> %d, %u/%u\n", __func__,
506
		    req->status, req->actual, bh->bulk_out_intended_length);
507
	if (req->status == -ECONNRESET)		/* Request was cancelled */
508 509 510 511
		usb_ep_fifo_flush(ep);

	/* Hold the lock while we update the request and buffer states */
	smp_wmb();
512
	spin_lock(&common->lock);
513 514
	bh->outreq_busy = 0;
	bh->state = BUF_STATE_FULL;
515 516
	wakeup_thread(common);
	spin_unlock(&common->lock);
517 518
}

519
static int fsg_setup(struct usb_function *f,
520
		     const struct usb_ctrlrequest *ctrl)
521
{
522
	struct fsg_dev		*fsg = fsg_from_func(f);
523
	struct usb_request	*req = fsg->common->ep0req;
524
	u16			w_index = le16_to_cpu(ctrl->wIndex);
525
	u16			w_value = le16_to_cpu(ctrl->wValue);
526 527
	u16			w_length = le16_to_cpu(ctrl->wLength);

528
	if (!fsg_is_set(fsg->common))
529
		return -EOPNOTSUPP;
530

531 532 533 534 535
	++fsg->common->ep0_req_tag;	/* Record arrival of a new request */
	req->context = NULL;
	req->length = 0;
	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));

536
	switch (ctrl->bRequest) {
537

538
	case US_BULK_RESET_REQUEST:
539 540
		if (ctrl->bRequestType !=
		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
541
			break;
542 543
		if (w_index != fsg->interface_number || w_value != 0 ||
				w_length != 0)
544
			return -EDOM;
545

546 547 548 549
		/*
		 * Raise an exception to stop the current operation
		 * and reinitialize our state.
		 */
550
		DBG(fsg, "bulk reset request\n");
551
		raise_exception(fsg->common, FSG_STATE_RESET);
552
		return DELAYED_STATUS;
553

554
	case US_BULK_GET_MAX_LUN:
555 556
		if (ctrl->bRequestType !=
		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
557
			break;
558 559
		if (w_index != fsg->interface_number || w_value != 0 ||
				w_length != 1)
560 561
			return -EDOM;
		VDBG(fsg, "get max LUN\n");
562
		*(u8 *)req->buf = fsg->common->nluns - 1;
563 564

		/* Respond with data/status */
565
		req->length = min((u16)1, w_length);
566
		return ep0_queue(fsg->common);
567 568 569
	}

	VDBG(fsg,
570
	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
571 572 573
	     ctrl->bRequestType, ctrl->bRequest,
	     le16_to_cpu(ctrl->wValue), w_index, w_length);
	return -EOPNOTSUPP;
574 575 576 577 578 579 580 581 582
}


/*-------------------------------------------------------------------------*/

/* All the following routines run in process context */

/* Use this for bulk or interrupt transfers, not ep0 */
static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
583 584
			   struct usb_request *req, int *pbusy,
			   enum fsg_buffer_state *state)
585 586 587 588 589 590
{
	int	rc;

	if (ep == fsg->bulk_in)
		dump_msg(fsg, "bulk-in", req->buf, req->length);

591
	spin_lock_irq(&fsg->common->lock);
592 593
	*pbusy = 1;
	*state = BUF_STATE_BUSY;
594
	spin_unlock_irq(&fsg->common->lock);
595 596 597 598 599 600 601
	rc = usb_ep_queue(ep, req, GFP_KERNEL);
	if (rc != 0) {
		*pbusy = 0;
		*state = BUF_STATE_EMPTY;

		/* We can't do much more than wait for a reset */

602 603 604 605 606 607
		/*
		 * Note: currently the net2280 driver fails zero-length
		 * submissions if DMA is enabled.
		 */
		if (rc != -ESHUTDOWN &&
		    !(rc == -EOPNOTSUPP && req->length == 0))
608
			WARNING(fsg, "error in submission: %s --> %d\n",
609
				ep->name, rc);
610 611 612
	}
}

613 614 615 616 617 618 619 620
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
	if (!fsg_is_set(common))
		return false;
	start_transfer(common->fsg, common->fsg->bulk_in,
		       bh->inreq, &bh->inreq_busy, &bh->state);
	return true;
}
621

622 623 624 625 626 627 628 629
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
	if (!fsg_is_set(common))
		return false;
	start_transfer(common->fsg, common->fsg->bulk_out,
		       bh->outreq, &bh->outreq_busy, &bh->state);
	return true;
}
630

631
static int sleep_thread(struct fsg_common *common)
632 633 634 635 636 637 638 639 640 641 642
{
	int	rc = 0;

	/* Wait until a signal arrives or we are woken up */
	for (;;) {
		try_to_freeze();
		set_current_state(TASK_INTERRUPTIBLE);
		if (signal_pending(current)) {
			rc = -EINTR;
			break;
		}
643
		if (common->thread_wakeup_needed)
644 645 646 647
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);
648
	common->thread_wakeup_needed = 0;
649 650 651 652 653 654
	return rc;
}


/*-------------------------------------------------------------------------*/

655
static int do_read(struct fsg_common *common)
656
{
657
	struct fsg_lun		*curlun = common->curlun;
658 659 660 661 662 663 664 665
	u32			lba;
	struct fsg_buffhd	*bh;
	int			rc;
	u32			amount_left;
	loff_t			file_offset, file_offset_tmp;
	unsigned int		amount;
	ssize_t			nread;

666 667 668 669
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big.
	 */
670
	if (common->cmnd[0] == READ_6)
671
		lba = get_unaligned_be24(&common->cmnd[1]);
672
	else {
673
		lba = get_unaligned_be32(&common->cmnd[2]);
674

675 676
		/*
		 * We allow DPO (Disable Page Out = don't save data in the
677
		 * cache) and FUA (Force Unit Access = don't read from the
678 679
		 * cache), but we don't implement them.
		 */
680
		if ((common->cmnd[1] & ~0x18) != 0) {
681 682 683 684 685 686 687 688
			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}
689
	file_offset = ((loff_t) lba) << curlun->blkbits;
690 691

	/* Carry out the file reads */
692
	amount_left = common->data_size_from_cmnd;
693
	if (unlikely(amount_left == 0))
694
		return -EIO;		/* No default reply */
695 696

	for (;;) {
697 698
		/*
		 * Figure out how much we need to read:
699 700 701
		 * Try to read the remaining amount.
		 * But don't read more than the buffer size.
		 * And don't try to read past the end of the file.
702
		 */
703
		amount = min(amount_left, FSG_BUFLEN);
704 705
		amount = min((loff_t)amount,
			     curlun->file_length - file_offset);
706 707

		/* Wait for the next buffer to become available */
708
		bh = common->next_buffhd_to_fill;
709
		while (bh->state != BUF_STATE_EMPTY) {
710
			rc = sleep_thread(common);
711 712 713 714
			if (rc)
				return rc;
		}

715 716 717 718
		/*
		 * If we were asked to read past the end of file,
		 * end with an empty buffer.
		 */
719 720 721
		if (amount == 0) {
			curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
722 723
			curlun->sense_data_info =
					file_offset >> curlun->blkbits;
724 725 726 727 728 729 730 731 732
			curlun->info_valid = 1;
			bh->inreq->length = 0;
			bh->state = BUF_STATE_FULL;
			break;
		}

		/* Perform the read */
		file_offset_tmp = file_offset;
		nread = vfs_read(curlun->filp,
733 734
				 (char __user *)bh->buf,
				 amount, &file_offset_tmp);
735
		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
736
		      (unsigned long long)file_offset, (int)nread);
737 738 739 740
		if (signal_pending(current))
			return -EINTR;

		if (nread < 0) {
741
			LDBG(curlun, "error in file read: %d\n", (int)nread);
742 743 744
			nread = 0;
		} else if (nread < amount) {
			LDBG(curlun, "partial file read: %d/%u\n",
745
			     (int)nread, amount);
746
			nread = round_down(nread, curlun->blksize);
747 748 749
		}
		file_offset  += nread;
		amount_left  -= nread;
750
		common->residue -= nread;
751 752 753 754 755 756

		/*
		 * Except at the end of the transfer, nread will be
		 * equal to the buffer size, which is divisible by the
		 * bulk-in maxpacket size.
		 */
757 758 759 760 761 762
		bh->inreq->length = nread;
		bh->state = BUF_STATE_FULL;

		/* If an error occurred, report it and its position */
		if (nread < amount) {
			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
763 764
			curlun->sense_data_info =
					file_offset >> curlun->blkbits;
765 766 767 768 769
			curlun->info_valid = 1;
			break;
		}

		if (amount_left == 0)
770
			break;		/* No more left to read */
771 772 773

		/* Send this buffer and go read some more */
		bh->inreq->zero = 0;
774 775
		if (!start_in_transfer(common, bh))
			/* Don't know what to do if common->fsg is NULL */
776 777
			return -EIO;
		common->next_buffhd_to_fill = bh->next;
778 779
	}

780
	return -EIO;		/* No default reply */
781 782 783 784 785
}


/*-------------------------------------------------------------------------*/

786
static int do_write(struct fsg_common *common)
787
{
788
	struct fsg_lun		*curlun = common->curlun;
789 790 791 792 793 794 795 796 797 798 799 800 801 802
	u32			lba;
	struct fsg_buffhd	*bh;
	int			get_some_more;
	u32			amount_left_to_req, amount_left_to_write;
	loff_t			usb_offset, file_offset, file_offset_tmp;
	unsigned int		amount;
	ssize_t			nwritten;
	int			rc;

	if (curlun->ro) {
		curlun->sense_data = SS_WRITE_PROTECTED;
		return -EINVAL;
	}
	spin_lock(&curlun->filp->f_lock);
803
	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
804 805
	spin_unlock(&curlun->filp->f_lock);

806 807 808 809
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big
	 */
810
	if (common->cmnd[0] == WRITE_6)
811
		lba = get_unaligned_be24(&common->cmnd[1]);
812
	else {
813
		lba = get_unaligned_be32(&common->cmnd[2]);
814

815 816
		/*
		 * We allow DPO (Disable Page Out = don't save data in the
817 818
		 * cache) and FUA (Force Unit Access = write directly to the
		 * medium).  We don't implement DPO; we implement FUA by
819 820
		 * performing synchronous output.
		 */
821
		if (common->cmnd[1] & ~0x18) {
822 823 824
			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
825
		if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
826 827 828 829 830 831 832 833 834 835 836 837
			spin_lock(&curlun->filp->f_lock);
			curlun->filp->f_flags |= O_SYNC;
			spin_unlock(&curlun->filp->f_lock);
		}
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

	/* Carry out the file writes */
	get_some_more = 1;
838
	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
839 840
	amount_left_to_req = common->data_size_from_cmnd;
	amount_left_to_write = common->data_size_from_cmnd;
841 842 843 844

	while (amount_left_to_write > 0) {

		/* Queue a request for more data from the host */
845
		bh = common->next_buffhd_to_fill;
846 847
		if (bh->state == BUF_STATE_EMPTY && get_some_more) {

848 849
			/*
			 * Figure out how much we want to get:
850 851
			 * Try to get the remaining amount,
			 * but not more than the buffer size.
852
			 */
853
			amount = min(amount_left_to_req, FSG_BUFLEN);
854 855 856

			/* Beyond the end of the backing file? */
			if (usb_offset >= curlun->file_length) {
857 858 859
				get_some_more = 0;
				curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
860 861
				curlun->sense_data_info =
					usb_offset >> curlun->blkbits;
862 863 864 865 866 867
				curlun->info_valid = 1;
				continue;
			}

			/* Get the next buffer */
			usb_offset += amount;
868
			common->usb_amount_left -= amount;
869 870 871 872
			amount_left_to_req -= amount;
			if (amount_left_to_req == 0)
				get_some_more = 0;

873
			/*
874 875 876
			 * Except at the end of the transfer, amount will be
			 * equal to the buffer size, which is divisible by
			 * the bulk-out maxpacket size.
877
			 */
878
			set_bulk_out_req_length(common, bh, amount);
879
			if (!start_out_transfer(common, bh))
880
				/* Dunno what to do if common->fsg is NULL */
881 882
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
883 884 885 886
			continue;
		}

		/* Write the received data to the backing file */
887
		bh = common->next_buffhd_to_drain;
888
		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
889
			break;			/* We stopped early */
890 891
		if (bh->state == BUF_STATE_FULL) {
			smp_rmb();
892
			common->next_buffhd_to_drain = bh->next;
893 894 895 896 897
			bh->state = BUF_STATE_EMPTY;

			/* Did something go wrong with the transfer? */
			if (bh->outreq->status != 0) {
				curlun->sense_data = SS_COMMUNICATION_FAILURE;
898 899
				curlun->sense_data_info =
					file_offset >> curlun->blkbits;
900 901 902 903 904 905 906
				curlun->info_valid = 1;
				break;
			}

			amount = bh->outreq->actual;
			if (curlun->file_length - file_offset < amount) {
				LERROR(curlun,
907 908 909
				       "write %u @ %llu beyond end %llu\n",
				       amount, (unsigned long long)file_offset,
				       (unsigned long long)curlun->file_length);
910 911 912
				amount = curlun->file_length - file_offset;
			}

913 914 915 916 917
			/* Don't accept excess data.  The spec doesn't say
			 * what to do in this case.  We'll ignore the error.
			 */
			amount = min(amount, bh->bulk_out_intended_length);

918 919 920 921 922
			/* Don't write a partial block */
			amount = round_down(amount, curlun->blksize);
			if (amount == 0)
				goto empty_write;

923 924 925
			/* Perform the write */
			file_offset_tmp = file_offset;
			nwritten = vfs_write(curlun->filp,
926 927
					     (char __user *)bh->buf,
					     amount, &file_offset_tmp);
928
			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
929
			      (unsigned long long)file_offset, (int)nwritten);
930
			if (signal_pending(current))
931
				return -EINTR;		/* Interrupted! */
932 933 934

			if (nwritten < 0) {
				LDBG(curlun, "error in file write: %d\n",
935
				     (int)nwritten);
936 937 938
				nwritten = 0;
			} else if (nwritten < amount) {
				LDBG(curlun, "partial file write: %d/%u\n",
939
				     (int)nwritten, amount);
940
				nwritten = round_down(nwritten, curlun->blksize);
941 942 943
			}
			file_offset += nwritten;
			amount_left_to_write -= nwritten;
944
			common->residue -= nwritten;
945 946 947 948

			/* If an error occurred, report it and its position */
			if (nwritten < amount) {
				curlun->sense_data = SS_WRITE_ERROR;
949 950
				curlun->sense_data_info =
					file_offset >> curlun->blkbits;
951 952 953 954
				curlun->info_valid = 1;
				break;
			}

955
 empty_write:
956
			/* Did the host decide to stop early? */
957
			if (bh->outreq->actual < bh->bulk_out_intended_length) {
958
				common->short_packet_received = 1;
959 960 961 962 963 964
				break;
			}
			continue;
		}

		/* Wait for something to happen */
965
		rc = sleep_thread(common);
966 967 968 969
		if (rc)
			return rc;
	}

970
	return -EIO;		/* No default reply */
971 972 973 974 975
}


/*-------------------------------------------------------------------------*/

976
static int do_synchronize_cache(struct fsg_common *common)
977
{
978
	struct fsg_lun	*curlun = common->curlun;
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	int		rc;

	/* We ignore the requested LBA and write out all file's
	 * dirty data buffers. */
	rc = fsg_lun_fsync_sub(curlun);
	if (rc)
		curlun->sense_data = SS_WRITE_ERROR;
	return 0;
}


/*-------------------------------------------------------------------------*/

static void invalidate_sub(struct fsg_lun *curlun)
{
	struct file	*filp = curlun->filp;
A
Al Viro 已提交
995
	struct inode	*inode = file_inode(filp);
996 997 998
	unsigned long	rc;

	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
999
	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1000 1001
}

1002
static int do_verify(struct fsg_common *common)
1003
{
1004
	struct fsg_lun		*curlun = common->curlun;
1005 1006
	u32			lba;
	u32			verification_length;
1007
	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
1008 1009 1010 1011 1012
	loff_t			file_offset, file_offset_tmp;
	u32			amount_left;
	unsigned int		amount;
	ssize_t			nread;

1013 1014 1015 1016
	/*
	 * Get the starting Logical Block Address and check that it's
	 * not too big.
	 */
1017
	lba = get_unaligned_be32(&common->cmnd[2]);
1018 1019 1020 1021 1022
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

1023 1024 1025 1026
	/*
	 * We allow DPO (Disable Page Out = don't save data in the
	 * cache) but we don't implement it.
	 */
1027
	if (common->cmnd[1] & ~0x10) {
1028 1029 1030 1031
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

1032
	verification_length = get_unaligned_be16(&common->cmnd[7]);
1033
	if (unlikely(verification_length == 0))
1034
		return -EIO;		/* No default reply */
1035 1036

	/* Prepare to carry out the file verify */
1037 1038
	amount_left = verification_length << curlun->blkbits;
	file_offset = ((loff_t) lba) << curlun->blkbits;
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

	/* Write out all the dirty buffers before invalidating them */
	fsg_lun_fsync_sub(curlun);
	if (signal_pending(current))
		return -EINTR;

	invalidate_sub(curlun);
	if (signal_pending(current))
		return -EINTR;

	/* Just try to read the requested blocks */
	while (amount_left > 0) {
1051 1052
		/*
		 * Figure out how much we need to read:
1053 1054 1055
		 * Try to read the remaining amount, but not more than
		 * the buffer size.
		 * And don't try to read past the end of the file.
1056
		 */
1057
		amount = min(amount_left, FSG_BUFLEN);
1058 1059
		amount = min((loff_t)amount,
			     curlun->file_length - file_offset);
1060 1061 1062
		if (amount == 0) {
			curlun->sense_data =
					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1063 1064
			curlun->sense_data_info =
				file_offset >> curlun->blkbits;
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
			curlun->info_valid = 1;
			break;
		}

		/* Perform the read */
		file_offset_tmp = file_offset;
		nread = vfs_read(curlun->filp,
				(char __user *) bh->buf,
				amount, &file_offset_tmp);
		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
				(unsigned long long) file_offset,
				(int) nread);
		if (signal_pending(current))
			return -EINTR;

		if (nread < 0) {
1081
			LDBG(curlun, "error in file verify: %d\n", (int)nread);
1082 1083 1084
			nread = 0;
		} else if (nread < amount) {
			LDBG(curlun, "partial file verify: %d/%u\n",
1085
			     (int)nread, amount);
1086
			nread = round_down(nread, curlun->blksize);
1087 1088 1089
		}
		if (nread == 0) {
			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1090 1091
			curlun->sense_data_info =
				file_offset >> curlun->blkbits;
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
			curlun->info_valid = 1;
			break;
		}
		file_offset += nread;
		amount_left -= nread;
	}
	return 0;
}


/*-------------------------------------------------------------------------*/

1104
static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1105
{
1106
	struct fsg_lun *curlun = common->curlun;
1107 1108
	u8	*buf = (u8 *) bh->buf;

1109
	if (!curlun) {		/* Unsupported LUNs are okay */
1110
		common->bad_lun_okay = 1;
1111
		memset(buf, 0, 36);
1112 1113
		buf[0] = 0x7f;		/* Unsupported, no device-type */
		buf[4] = 31;		/* Additional length */
1114 1115 1116
		return 36;
	}

1117
	buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1118
	buf[1] = curlun->removable ? 0x80 : 0;
1119 1120 1121 1122
	buf[2] = 2;		/* ANSI SCSI level 2 */
	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
	buf[4] = 31;		/* Additional length */
	buf[5] = 0;		/* No special options */
1123 1124
	buf[6] = 0;
	buf[7] = 0;
1125
	memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
1126 1127 1128
	return 36;
}

1129
static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1130
{
1131
	struct fsg_lun	*curlun = common->curlun;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
	u8		*buf = (u8 *) bh->buf;
	u32		sd, sdinfo;
	int		valid;

	/*
	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
	 *
	 * If a REQUEST SENSE command is received from an initiator
	 * with a pending unit attention condition (before the target
	 * generates the contingent allegiance condition), then the
	 * target shall either:
	 *   a) report any pending sense data and preserve the unit
	 *	attention condition on the logical unit, or,
	 *   b) report the unit attention condition, may discard any
	 *	pending sense data, and clear the unit attention
	 *	condition on the logical unit for that initiator.
	 *
	 * FSG normally uses option a); enable this code to use option b).
	 */
#if 0
	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
		curlun->sense_data = curlun->unit_attention_data;
		curlun->unit_attention_data = SS_NO_SENSE;
	}
#endif

1158
	if (!curlun) {		/* Unsupported LUNs are okay */
1159
		common->bad_lun_okay = 1;
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
		sdinfo = 0;
		valid = 0;
	} else {
		sd = curlun->sense_data;
		sdinfo = curlun->sense_data_info;
		valid = curlun->info_valid << 7;
		curlun->sense_data = SS_NO_SENSE;
		curlun->sense_data_info = 0;
		curlun->info_valid = 0;
	}

	memset(buf, 0, 18);
1173
	buf[0] = valid | 0x70;			/* Valid, current error */
1174 1175
	buf[2] = SK(sd);
	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
1176
	buf[7] = 18 - 8;			/* Additional sense length */
1177 1178 1179 1180 1181
	buf[12] = ASC(sd);
	buf[13] = ASCQ(sd);
	return 18;
}

1182
static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1183
{
1184 1185 1186
	struct fsg_lun	*curlun = common->curlun;
	u32		lba = get_unaligned_be32(&common->cmnd[2]);
	int		pmi = common->cmnd[8];
1187
	u8		*buf = (u8 *)bh->buf;
1188 1189 1190 1191 1192 1193 1194 1195 1196

	/* Check the PMI and LBA fields */
	if (pmi > 1 || (pmi == 0 && lba != 0)) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
						/* Max logical block */
1197
	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1198 1199 1200
	return 8;
}

1201
static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1202
{
1203 1204 1205
	struct fsg_lun	*curlun = common->curlun;
	int		msf = common->cmnd[1] & 0x02;
	u32		lba = get_unaligned_be32(&common->cmnd[2]);
1206
	u8		*buf = (u8 *)bh->buf;
1207

1208
	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}
	if (lba >= curlun->num_sectors) {
		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
		return -EINVAL;
	}

	memset(buf, 0, 8);
	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
	store_cdrom_address(&buf[4], msf, lba);
	return 8;
}

1223
static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1224
{
1225 1226 1227
	struct fsg_lun	*curlun = common->curlun;
	int		msf = common->cmnd[1] & 0x02;
	int		start_track = common->cmnd[6];
1228
	u8		*buf = (u8 *)bh->buf;
1229

1230
	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
			start_track > 1) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	memset(buf, 0, 20);
	buf[1] = (20-2);		/* TOC data length */
	buf[2] = 1;			/* First track number */
	buf[3] = 1;			/* Last track number */
	buf[5] = 0x16;			/* Data track, copying allowed */
	buf[6] = 0x01;			/* Only track is number 1 */
	store_cdrom_address(&buf[8], msf, 0);

	buf[13] = 0x16;			/* Lead-out track is data */
	buf[14] = 0xAA;			/* Lead-out track number */
	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
	return 20;
}

1250
static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1251
{
1252 1253
	struct fsg_lun	*curlun = common->curlun;
	int		mscmnd = common->cmnd[0];
1254 1255 1256 1257 1258 1259 1260
	u8		*buf = (u8 *) bh->buf;
	u8		*buf0 = buf;
	int		pc, page_code;
	int		changeable_values, all_pages;
	int		valid_page = 0;
	int		len, limit;

1261
	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
1262 1263 1264
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}
1265 1266
	pc = common->cmnd[2] >> 6;
	page_code = common->cmnd[2] & 0x3f;
1267 1268 1269 1270 1271 1272 1273
	if (pc == 3) {
		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
		return -EINVAL;
	}
	changeable_values = (pc == 1);
	all_pages = (page_code == 0x3f);

1274 1275
	/*
	 * Write the mode parameter header.  Fixed values are: default
1276 1277
	 * medium type, no cache control (DPOFUA), and no block descriptors.
	 * The only variable value is the WriteProtect bit.  We will fill in
1278 1279
	 * the mode data length later.
	 */
1280
	memset(buf, 0, 8);
1281
	if (mscmnd == MODE_SENSE) {
1282
		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1283 1284
		buf += 4;
		limit = 255;
1285
	} else {			/* MODE_SENSE_10 */
1286
		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1287
		buf += 8;
1288
		limit = 65535;		/* Should really be FSG_BUFLEN */
1289 1290 1291 1292
	}

	/* No block descriptors */

1293 1294 1295 1296
	/*
	 * The mode pages, in numerical order.  The only page we support
	 * is the Caching page.
	 */
1297 1298
	if (page_code == 0x08 || all_pages) {
		valid_page = 1;
1299 1300 1301
		buf[0] = 0x08;		/* Page code */
		buf[1] = 10;		/* Page length */
		memset(buf+2, 0, 10);	/* None of the fields are changeable */
1302 1303

		if (!changeable_values) {
1304 1305 1306
			buf[2] = 0x04;	/* Write cache enable, */
					/* Read cache not disabled */
					/* No cache retention priorities */
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
			put_unaligned_be16(0xffff, &buf[4]);
					/* Don't disable prefetch */
					/* Minimum prefetch = 0 */
			put_unaligned_be16(0xffff, &buf[8]);
					/* Maximum prefetch */
			put_unaligned_be16(0xffff, &buf[10]);
					/* Maximum prefetch ceiling */
		}
		buf += 12;
	}

1318 1319 1320 1321
	/*
	 * Check that a valid page was requested and the mode data length
	 * isn't too long.
	 */
1322 1323 1324 1325 1326 1327 1328
	len = buf - buf0;
	if (!valid_page || len > limit) {
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	/*  Store the mode data length */
1329
	if (mscmnd == MODE_SENSE)
1330 1331 1332 1333 1334 1335
		buf0[0] = len - 1;
	else
		put_unaligned_be16(len - 2, buf0);
	return len;
}

1336
static int do_start_stop(struct fsg_common *common)
1337
{
1338 1339 1340 1341
	struct fsg_lun	*curlun = common->curlun;
	int		loej, start;

	if (!curlun) {
1342
		return -EINVAL;
1343 1344
	} else if (!curlun->removable) {
		curlun->sense_data = SS_INVALID_COMMAND;
1345
		return -EINVAL;
1346 1347
	} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
		   (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1348 1349 1350 1351
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

1352 1353
	loej  = common->cmnd[4] & 0x02;
	start = common->cmnd[4] & 0x01;
1354

1355 1356 1357 1358
	/*
	 * Our emulation doesn't support mounting; the medium is
	 * available for use as soon as it is loaded.
	 */
1359
	if (start) {
1360 1361 1362 1363
		if (!fsg_lun_is_open(curlun)) {
			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
			return -EINVAL;
		}
1364
		return 0;
1365
	}
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396

	/* Are we allowed to unload the media? */
	if (curlun->prevent_medium_removal) {
		LDBG(curlun, "unload attempt prevented\n");
		curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
		return -EINVAL;
	}

	if (!loej)
		return 0;

	/* Simulate an unload/eject */
	if (common->ops && common->ops->pre_eject) {
		int r = common->ops->pre_eject(common, curlun,
					       curlun - common->luns);
		if (unlikely(r < 0))
			return r;
		else if (r)
			return 0;
	}

	up_read(&common->filesem);
	down_write(&common->filesem);
	fsg_lun_close(curlun);
	up_write(&common->filesem);
	down_read(&common->filesem);

	return common->ops && common->ops->post_eject
		? min(0, common->ops->post_eject(common, curlun,
						 curlun - common->luns))
		: 0;
1397 1398
}

1399
static int do_prevent_allow(struct fsg_common *common)
1400
{
1401
	struct fsg_lun	*curlun = common->curlun;
1402 1403
	int		prevent;

1404
	if (!common->curlun) {
1405
		return -EINVAL;
1406 1407
	} else if (!common->curlun->removable) {
		common->curlun->sense_data = SS_INVALID_COMMAND;
1408 1409 1410
		return -EINVAL;
	}

1411 1412
	prevent = common->cmnd[4] & 0x01;
	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
		return -EINVAL;
	}

	if (curlun->prevent_medium_removal && !prevent)
		fsg_lun_fsync_sub(curlun);
	curlun->prevent_medium_removal = prevent;
	return 0;
}

1423
static int do_read_format_capacities(struct fsg_common *common,
1424 1425
			struct fsg_buffhd *bh)
{
1426
	struct fsg_lun	*curlun = common->curlun;
1427 1428 1429
	u8		*buf = (u8 *) bh->buf;

	buf[0] = buf[1] = buf[2] = 0;
1430
	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
1431 1432 1433 1434
	buf += 4;

	put_unaligned_be32(curlun->num_sectors, &buf[0]);
						/* Number of blocks */
1435
	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1436 1437 1438 1439
	buf[4] = 0x02;				/* Current capacity */
	return 12;
}

1440
static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1441
{
1442
	struct fsg_lun	*curlun = common->curlun;
1443 1444

	/* We don't support MODE SELECT */
1445 1446
	if (curlun)
		curlun->sense_data = SS_INVALID_COMMAND;
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	return -EINVAL;
}


/*-------------------------------------------------------------------------*/

static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
{
	int	rc;

	rc = fsg_set_halt(fsg, fsg->bulk_in);
	if (rc == -EAGAIN)
		VDBG(fsg, "delayed bulk-in endpoint halt\n");
	while (rc != 0) {
		if (rc != -EAGAIN) {
			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
			rc = 0;
			break;
		}

		/* Wait for a short time and then try again */
		if (msleep_interruptible(100) != 0)
			return -EINTR;
		rc = usb_ep_set_halt(fsg->bulk_in);
	}
	return rc;
}

static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
{
	int	rc;

	DBG(fsg, "bulk-in set wedge\n");
	rc = usb_ep_set_wedge(fsg->bulk_in);
	if (rc == -EAGAIN)
		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
	while (rc != 0) {
		if (rc != -EAGAIN) {
			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
			rc = 0;
			break;
		}

		/* Wait for a short time and then try again */
		if (msleep_interruptible(100) != 0)
			return -EINTR;
		rc = usb_ep_set_wedge(fsg->bulk_in);
	}
	return rc;
}

1498
static int throw_away_data(struct fsg_common *common)
1499 1500 1501 1502 1503
{
	struct fsg_buffhd	*bh;
	u32			amount;
	int			rc;

1504 1505 1506
	for (bh = common->next_buffhd_to_drain;
	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
	     bh = common->next_buffhd_to_drain) {
1507 1508 1509 1510 1511

		/* Throw away the data in a filled buffer */
		if (bh->state == BUF_STATE_FULL) {
			smp_rmb();
			bh->state = BUF_STATE_EMPTY;
1512
			common->next_buffhd_to_drain = bh->next;
1513 1514

			/* A short packet or an error ends everything */
1515
			if (bh->outreq->actual < bh->bulk_out_intended_length ||
1516
			    bh->outreq->status != 0) {
1517 1518
				raise_exception(common,
						FSG_STATE_ABORT_BULK_OUT);
1519 1520 1521 1522 1523 1524
				return -EINTR;
			}
			continue;
		}

		/* Try to submit another request if we need one */
1525 1526 1527 1528
		bh = common->next_buffhd_to_fill;
		if (bh->state == BUF_STATE_EMPTY
		 && common->usb_amount_left > 0) {
			amount = min(common->usb_amount_left, FSG_BUFLEN);
1529

1530
			/*
1531 1532
			 * Except at the end of the transfer, amount will be
			 * equal to the buffer size, which is divisible by
1533 1534
			 * the bulk-out maxpacket size.
			 */
1535
			set_bulk_out_req_length(common, bh, amount);
1536
			if (!start_out_transfer(common, bh))
1537
				/* Dunno what to do if common->fsg is NULL */
1538 1539 1540
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
			common->usb_amount_left -= amount;
1541 1542 1543 1544
			continue;
		}

		/* Otherwise wait for something to happen */
1545
		rc = sleep_thread(common);
1546 1547 1548 1549 1550 1551
		if (rc)
			return rc;
	}
	return 0;
}

1552
static int finish_reply(struct fsg_common *common)
1553
{
1554
	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
1555 1556
	int			rc = 0;

1557
	switch (common->data_dir) {
1558
	case DATA_DIR_NONE:
1559
		break;			/* Nothing to send */
1560

1561 1562
	/*
	 * If we don't know whether the host wants to read or write,
1563 1564
	 * this must be CB or CBI with an unknown command.  We mustn't
	 * try to send or receive any data.  So stall both bulk pipes
1565 1566
	 * if we can and wait for a reset.
	 */
1567
	case DATA_DIR_UNKNOWN:
1568 1569 1570 1571 1572 1573 1574 1575
		if (!common->can_stall) {
			/* Nothing */
		} else if (fsg_is_set(common)) {
			fsg_set_halt(common->fsg, common->fsg->bulk_out);
			rc = halt_bulk_in_endpoint(common->fsg);
		} else {
			/* Don't know what to do if common->fsg is NULL */
			rc = -EIO;
1576 1577 1578 1579 1580
		}
		break;

	/* All but the last buffer of data must have already been sent */
	case DATA_DIR_TO_HOST:
1581
		if (common->data_size == 0) {
1582
			/* Nothing to send */
1583

1584 1585 1586 1587
		/* Don't know what to do if common->fsg is NULL */
		} else if (!fsg_is_set(common)) {
			rc = -EIO;

1588
		/* If there's no residue, simply send the last buffer */
1589
		} else if (common->residue == 0) {
1590
			bh->inreq->zero = 0;
1591
			if (!start_in_transfer(common, bh))
1592 1593
				return -EIO;
			common->next_buffhd_to_fill = bh->next;
1594

1595
		/*
1596 1597 1598 1599 1600
		 * For Bulk-only, mark the end of the data with a short
		 * packet.  If we are allowed to stall, halt the bulk-in
		 * endpoint.  (Note: This violates the Bulk-Only Transport
		 * specification, which requires us to pad the data if we
		 * don't halt the endpoint.  Presumably nobody will mind.)
1601
		 */
1602
		} else {
1603
			bh->inreq->zero = 1;
1604
			if (!start_in_transfer(common, bh))
1605 1606
				rc = -EIO;
			common->next_buffhd_to_fill = bh->next;
1607
			if (common->can_stall)
1608
				rc = halt_bulk_in_endpoint(common->fsg);
1609 1610 1611
		}
		break;

1612 1613 1614 1615
	/*
	 * We have processed all we want from the data the host has sent.
	 * There may still be outstanding bulk-out requests.
	 */
1616
	case DATA_DIR_FROM_HOST:
1617
		if (common->residue == 0) {
1618
			/* Nothing to receive */
1619 1620

		/* Did the host stop sending unexpectedly early? */
1621 1622
		} else if (common->short_packet_received) {
			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1623 1624
			rc = -EINTR;

1625 1626
		/*
		 * We haven't processed all the incoming data.  Even though
1627 1628 1629 1630
		 * we may be allowed to stall, doing so would cause a race.
		 * The controller may already have ACK'ed all the remaining
		 * bulk-out packets, in which case the host wouldn't see a
		 * STALL.  Not realizing the endpoint was halted, it wouldn't
1631 1632
		 * clear the halt -- leading to problems later on.
		 */
1633
#if 0
1634 1635 1636 1637 1638
		} else if (common->can_stall) {
			if (fsg_is_set(common))
				fsg_set_halt(common->fsg,
					     common->fsg->bulk_out);
			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1639 1640 1641
			rc = -EINTR;
#endif

1642 1643 1644 1645
		/*
		 * We can't stall.  Read in the excess data and throw it
		 * all away.
		 */
1646
		} else {
1647
			rc = throw_away_data(common);
1648
		}
1649 1650 1651 1652 1653
		break;
	}
	return rc;
}

1654
static int send_status(struct fsg_common *common)
1655
{
1656
	struct fsg_lun		*curlun = common->curlun;
1657
	struct fsg_buffhd	*bh;
1658
	struct bulk_cs_wrap	*csw;
1659
	int			rc;
1660
	u8			status = US_BULK_STAT_OK;
1661 1662 1663
	u32			sd, sdinfo = 0;

	/* Wait for the next buffer to become available */
1664
	bh = common->next_buffhd_to_fill;
1665
	while (bh->state != BUF_STATE_EMPTY) {
1666
		rc = sleep_thread(common);
1667 1668 1669 1670 1671 1672 1673
		if (rc)
			return rc;
	}

	if (curlun) {
		sd = curlun->sense_data;
		sdinfo = curlun->sense_data_info;
1674
	} else if (common->bad_lun_okay)
1675 1676 1677 1678
		sd = SS_NO_SENSE;
	else
		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;

1679 1680
	if (common->phase_error) {
		DBG(common, "sending phase-error status\n");
1681
		status = US_BULK_STAT_PHASE;
1682 1683
		sd = SS_INVALID_COMMAND;
	} else if (sd != SS_NO_SENSE) {
1684
		DBG(common, "sending command-failure status\n");
1685
		status = US_BULK_STAT_FAIL;
1686
		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1687 1688 1689 1690
				"  info x%x\n",
				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
	}

1691
	/* Store and send the Bulk-only CSW */
1692
	csw = (void *)bh->buf;
1693

1694
	csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
1695 1696
	csw->Tag = common->tag;
	csw->Residue = cpu_to_le32(common->residue);
1697
	csw->Status = status;
1698

1699
	bh->inreq->length = US_BULK_CS_WRAP_LEN;
1700
	bh->inreq->zero = 0;
1701
	if (!start_in_transfer(common, bh))
1702 1703
		/* Don't know what to do if common->fsg is NULL */
		return -EIO;
1704

1705
	common->next_buffhd_to_fill = bh->next;
1706 1707 1708 1709 1710 1711
	return 0;
}


/*-------------------------------------------------------------------------*/

1712 1713 1714 1715
/*
 * Check whether the command is properly formed and whether its data size
 * and direction agree with the values we already have.
 */
1716
static int check_command(struct fsg_common *common, int cmnd_size,
1717 1718
			 enum data_direction data_dir, unsigned int mask,
			 int needs_medium, const char *name)
1719 1720
{
	int			i;
1721
	int			lun = common->cmnd[1] >> 5;
1722 1723 1724 1725 1726
	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
	char			hdlen[20];
	struct fsg_lun		*curlun;

	hdlen[0] = 0;
1727 1728
	if (common->data_dir != DATA_DIR_UNKNOWN)
		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1729
			common->data_size);
1730
	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
1731
	     name, cmnd_size, dirletter[(int) data_dir],
1732
	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
1733

1734 1735 1736 1737
	/*
	 * We can't reply at all until we know the correct data direction
	 * and size.
	 */
1738
	if (common->data_size_from_cmnd == 0)
1739
		data_dir = DATA_DIR_NONE;
1740
	if (common->data_size < common->data_size_from_cmnd) {
1741 1742
		/*
		 * Host data size < Device data size is a phase error.
1743
		 * Carry out the command, but only transfer as much as
1744 1745
		 * we are allowed.
		 */
1746 1747
		common->data_size_from_cmnd = common->data_size;
		common->phase_error = 1;
1748
	}
1749 1750
	common->residue = common->data_size;
	common->usb_amount_left = common->data_size;
1751 1752

	/* Conflicting data directions is a phase error */
1753
	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1754
		common->phase_error = 1;
1755 1756 1757 1758
		return -EINVAL;
	}

	/* Verify the length of the command itself */
1759
	if (cmnd_size != common->cmnd_size) {
1760

1761 1762
		/*
		 * Special case workaround: There are plenty of buggy SCSI
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
		 * implementations. Many have issues with cbw->Length
		 * field passing a wrong command size. For those cases we
		 * always try to work around the problem by using the length
		 * sent by the host side provided it is at least as large
		 * as the correct command length.
		 * Examples of such cases would be MS-Windows, which issues
		 * REQUEST SENSE with cbw->Length == 12 where it should
		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
		 * REQUEST SENSE with cbw->Length == 10 where it should
		 * be 6 as well.
		 */
1774 1775
		if (cmnd_size <= common->cmnd_size) {
			DBG(common, "%s is buggy! Expected length %d "
1776
			    "but we got %d\n", name,
1777 1778
			    cmnd_size, common->cmnd_size);
			cmnd_size = common->cmnd_size;
1779
		} else {
1780
			common->phase_error = 1;
1781 1782 1783 1784 1785
			return -EINVAL;
		}
	}

	/* Check that the LUN values are consistent */
1786 1787 1788
	if (common->lun != lun)
		DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
		    common->lun, lun);
1789 1790

	/* Check the LUN */
1791 1792
	curlun = common->curlun;
	if (curlun) {
1793
		if (common->cmnd[0] != REQUEST_SENSE) {
1794 1795 1796 1797 1798
			curlun->sense_data = SS_NO_SENSE;
			curlun->sense_data_info = 0;
			curlun->info_valid = 0;
		}
	} else {
1799
		common->bad_lun_okay = 0;
1800

1801 1802 1803 1804
		/*
		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
		 * to use unsupported LUNs; all others may not.
		 */
1805 1806
		if (common->cmnd[0] != INQUIRY &&
		    common->cmnd[0] != REQUEST_SENSE) {
1807
			DBG(common, "unsupported LUN %d\n", common->lun);
1808 1809 1810 1811
			return -EINVAL;
		}
	}

1812 1813 1814 1815
	/*
	 * If a unit attention condition exists, only INQUIRY and
	 * REQUEST SENSE commands are allowed; anything else must fail.
	 */
1816
	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1817 1818
	    common->cmnd[0] != INQUIRY &&
	    common->cmnd[0] != REQUEST_SENSE) {
1819 1820 1821 1822 1823 1824
		curlun->sense_data = curlun->unit_attention_data;
		curlun->unit_attention_data = SS_NO_SENSE;
		return -EINVAL;
	}

	/* Check that only command bytes listed in the mask are non-zero */
1825
	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
1826
	for (i = 1; i < cmnd_size; ++i) {
1827
		if (common->cmnd[i] && !(mask & (1 << i))) {
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
			if (curlun)
				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
			return -EINVAL;
		}
	}

	/* If the medium isn't mounted and the command needs to access
	 * it, return an error. */
	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
		return -EINVAL;
	}

	return 0;
}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
/* wrapper of check_command for data size in blocks handling */
static int check_command_size_in_blocks(struct fsg_common *common,
		int cmnd_size, enum data_direction data_dir,
		unsigned int mask, int needs_medium, const char *name)
{
	if (common->curlun)
		common->data_size_from_cmnd <<= common->curlun->blkbits;
	return check_command(common, cmnd_size, data_dir,
			mask, needs_medium, name);
}

1855
static int do_scsi_command(struct fsg_common *common)
1856 1857 1858 1859 1860 1861 1862
{
	struct fsg_buffhd	*bh;
	int			rc;
	int			reply = -EINVAL;
	int			i;
	static char		unknown[16];

1863
	dump_cdb(common);
1864 1865

	/* Wait for the next buffer to become available for data or status */
1866 1867
	bh = common->next_buffhd_to_fill;
	common->next_buffhd_to_drain = bh;
1868
	while (bh->state != BUF_STATE_EMPTY) {
1869
		rc = sleep_thread(common);
1870 1871 1872
		if (rc)
			return rc;
	}
1873 1874
	common->phase_error = 0;
	common->short_packet_received = 0;
1875

1876 1877
	down_read(&common->filesem);	/* We're using the backing file */
	switch (common->cmnd[0]) {
1878

1879
	case INQUIRY:
1880 1881
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1882 1883 1884
				      (1<<4), 0,
				      "INQUIRY");
		if (reply == 0)
1885
			reply = do_inquiry(common, bh);
1886 1887
		break;

1888
	case MODE_SELECT:
1889 1890
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1891 1892 1893
				      (1<<1) | (1<<4), 0,
				      "MODE SELECT(6)");
		if (reply == 0)
1894
			reply = do_mode_select(common, bh);
1895 1896
		break;

1897
	case MODE_SELECT_10:
1898 1899 1900
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1901 1902 1903
				      (1<<1) | (3<<7), 0,
				      "MODE SELECT(10)");
		if (reply == 0)
1904
			reply = do_mode_select(common, bh);
1905 1906
		break;

1907
	case MODE_SENSE:
1908 1909
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1910 1911 1912
				      (1<<1) | (1<<2) | (1<<4), 0,
				      "MODE SENSE(6)");
		if (reply == 0)
1913
			reply = do_mode_sense(common, bh);
1914 1915
		break;

1916
	case MODE_SENSE_10:
1917 1918 1919
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1920 1921 1922
				      (1<<1) | (1<<2) | (3<<7), 0,
				      "MODE SENSE(10)");
		if (reply == 0)
1923
			reply = do_mode_sense(common, bh);
1924 1925
		break;

1926
	case ALLOW_MEDIUM_REMOVAL:
1927 1928
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
1929 1930 1931
				      (1<<4), 0,
				      "PREVENT-ALLOW MEDIUM REMOVAL");
		if (reply == 0)
1932
			reply = do_prevent_allow(common);
1933 1934
		break;

1935
	case READ_6:
1936
		i = common->cmnd[4];
1937 1938 1939
		common->data_size_from_cmnd = (i == 0) ? 256 : i;
		reply = check_command_size_in_blocks(common, 6,
				      DATA_DIR_TO_HOST,
1940 1941 1942
				      (7<<1) | (1<<4), 1,
				      "READ(6)");
		if (reply == 0)
1943
			reply = do_read(common);
1944 1945
		break;

1946
	case READ_10:
1947
		common->data_size_from_cmnd =
1948 1949 1950
				get_unaligned_be16(&common->cmnd[7]);
		reply = check_command_size_in_blocks(common, 10,
				      DATA_DIR_TO_HOST,
1951 1952 1953
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "READ(10)");
		if (reply == 0)
1954
			reply = do_read(common);
1955 1956
		break;

1957
	case READ_12:
1958
		common->data_size_from_cmnd =
1959 1960 1961
				get_unaligned_be32(&common->cmnd[6]);
		reply = check_command_size_in_blocks(common, 12,
				      DATA_DIR_TO_HOST,
1962 1963 1964
				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
				      "READ(12)");
		if (reply == 0)
1965
			reply = do_read(common);
1966 1967
		break;

1968
	case READ_CAPACITY:
1969 1970
		common->data_size_from_cmnd = 8;
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1971 1972 1973
				      (0xf<<2) | (1<<8), 1,
				      "READ CAPACITY");
		if (reply == 0)
1974
			reply = do_read_capacity(common, bh);
1975 1976
		break;

1977
	case READ_HEADER:
1978
		if (!common->curlun || !common->curlun->cdrom)
1979
			goto unknown_cmnd;
1980 1981 1982
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1983 1984 1985
				      (3<<7) | (0x1f<<1), 1,
				      "READ HEADER");
		if (reply == 0)
1986
			reply = do_read_header(common, bh);
1987 1988
		break;

1989
	case READ_TOC:
1990
		if (!common->curlun || !common->curlun->cdrom)
1991
			goto unknown_cmnd;
1992 1993 1994
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1995 1996 1997
				      (7<<6) | (1<<1), 1,
				      "READ TOC");
		if (reply == 0)
1998
			reply = do_read_toc(common, bh);
1999 2000
		break;

2001
	case READ_FORMAT_CAPACITIES:
2002 2003 2004
		common->data_size_from_cmnd =
			get_unaligned_be16(&common->cmnd[7]);
		reply = check_command(common, 10, DATA_DIR_TO_HOST,
2005 2006 2007
				      (3<<7), 1,
				      "READ FORMAT CAPACITIES");
		if (reply == 0)
2008
			reply = do_read_format_capacities(common, bh);
2009 2010
		break;

2011
	case REQUEST_SENSE:
2012 2013
		common->data_size_from_cmnd = common->cmnd[4];
		reply = check_command(common, 6, DATA_DIR_TO_HOST,
2014 2015 2016
				      (1<<4), 0,
				      "REQUEST SENSE");
		if (reply == 0)
2017
			reply = do_request_sense(common, bh);
2018 2019
		break;

2020
	case START_STOP:
2021 2022
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
2023 2024 2025
				      (1<<1) | (1<<4), 0,
				      "START-STOP UNIT");
		if (reply == 0)
2026
			reply = do_start_stop(common);
2027 2028
		break;

2029
	case SYNCHRONIZE_CACHE:
2030 2031
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 10, DATA_DIR_NONE,
2032 2033 2034
				      (0xf<<2) | (3<<7), 1,
				      "SYNCHRONIZE CACHE");
		if (reply == 0)
2035
			reply = do_synchronize_cache(common);
2036 2037
		break;

2038
	case TEST_UNIT_READY:
2039 2040
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 6, DATA_DIR_NONE,
2041 2042 2043 2044
				0, 1,
				"TEST UNIT READY");
		break;

2045 2046 2047 2048
	/*
	 * Although optional, this command is used by MS-Windows.  We
	 * support a minimal version: BytChk must be 0.
	 */
2049
	case VERIFY:
2050 2051
		common->data_size_from_cmnd = 0;
		reply = check_command(common, 10, DATA_DIR_NONE,
2052 2053 2054
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "VERIFY");
		if (reply == 0)
2055
			reply = do_verify(common);
2056 2057
		break;

2058
	case WRITE_6:
2059
		i = common->cmnd[4];
2060 2061 2062
		common->data_size_from_cmnd = (i == 0) ? 256 : i;
		reply = check_command_size_in_blocks(common, 6,
				      DATA_DIR_FROM_HOST,
2063 2064 2065
				      (7<<1) | (1<<4), 1,
				      "WRITE(6)");
		if (reply == 0)
2066
			reply = do_write(common);
2067 2068
		break;

2069
	case WRITE_10:
2070
		common->data_size_from_cmnd =
2071 2072 2073
				get_unaligned_be16(&common->cmnd[7]);
		reply = check_command_size_in_blocks(common, 10,
				      DATA_DIR_FROM_HOST,
2074 2075 2076
				      (1<<1) | (0xf<<2) | (3<<7), 1,
				      "WRITE(10)");
		if (reply == 0)
2077
			reply = do_write(common);
2078 2079
		break;

2080
	case WRITE_12:
2081
		common->data_size_from_cmnd =
2082 2083 2084
				get_unaligned_be32(&common->cmnd[6]);
		reply = check_command_size_in_blocks(common, 12,
				      DATA_DIR_FROM_HOST,
2085 2086 2087
				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
				      "WRITE(12)");
		if (reply == 0)
2088
			reply = do_write(common);
2089 2090
		break;

2091 2092
	/*
	 * Some mandatory commands that we recognize but don't implement.
2093 2094
	 * They don't mean much in this setting.  It's left as an exercise
	 * for anyone interested to implement RESERVE and RELEASE in terms
2095 2096
	 * of Posix locks.
	 */
2097 2098 2099 2100
	case FORMAT_UNIT:
	case RELEASE:
	case RESERVE:
	case SEND_DIAGNOSTIC:
2101
		/* Fall through */
2102 2103

	default:
2104
unknown_cmnd:
2105 2106 2107
		common->data_size_from_cmnd = 0;
		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
		reply = check_command(common, common->cmnd_size,
2108
				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
2109
		if (reply == 0) {
2110
			common->curlun->sense_data = SS_INVALID_COMMAND;
2111 2112 2113 2114
			reply = -EINVAL;
		}
		break;
	}
2115
	up_read(&common->filesem);
2116 2117 2118 2119 2120 2121

	if (reply == -EINTR || signal_pending(current))
		return -EINTR;

	/* Set up the single reply buffer for finish_reply() */
	if (reply == -EINVAL)
2122
		reply = 0;		/* Error reply length */
2123
	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2124
		reply = min((u32)reply, common->data_size_from_cmnd);
2125 2126
		bh->inreq->length = reply;
		bh->state = BUF_STATE_FULL;
2127
		common->residue -= reply;
2128
	}				/* Otherwise it's already set */
2129 2130 2131 2132 2133 2134 2135 2136 2137

	return 0;
}


/*-------------------------------------------------------------------------*/

static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
2138
	struct usb_request	*req = bh->outreq;
2139
	struct bulk_cb_wrap	*cbw = req->buf;
2140
	struct fsg_common	*common = fsg->common;
2141 2142 2143 2144 2145 2146

	/* Was this a real packet?  Should it be ignored? */
	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
		return -EINVAL;

	/* Is the CBW valid? */
2147
	if (req->actual != US_BULK_CB_WRAP_LEN ||
2148
			cbw->Signature != cpu_to_le32(
2149
				US_BULK_CB_SIGN)) {
2150 2151 2152 2153
		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
				req->actual,
				le32_to_cpu(cbw->Signature));

2154 2155
		/*
		 * The Bulk-only spec says we MUST stall the IN endpoint
2156 2157 2158 2159 2160 2161 2162
		 * (6.6.1), so it's unavoidable.  It also says we must
		 * retain this state until the next reset, but there's
		 * no way to tell the controller driver it should ignore
		 * Clear-Feature(HALT) requests.
		 *
		 * We aren't required to halt the OUT endpoint; instead
		 * we can simply accept and discard any data received
2163 2164
		 * until the next reset.
		 */
2165 2166 2167 2168 2169 2170
		wedge_bulk_in_endpoint(fsg);
		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
		return -EINVAL;
	}

	/* Is the CBW meaningful? */
2171
	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
2172 2173 2174 2175 2176
			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
				"cmdlen %u\n",
				cbw->Lun, cbw->Flags, cbw->Length);

2177 2178 2179 2180
		/*
		 * We can do anything we want here, so let's stall the
		 * bulk pipes if we are allowed to.
		 */
2181
		if (common->can_stall) {
2182 2183 2184 2185 2186 2187 2188
			fsg_set_halt(fsg, fsg->bulk_out);
			halt_bulk_in_endpoint(fsg);
		}
		return -EINVAL;
	}

	/* Save the command for later */
2189 2190
	common->cmnd_size = cbw->Length;
	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2191
	if (cbw->Flags & US_BULK_FLAG_IN)
2192
		common->data_dir = DATA_DIR_TO_HOST;
2193
	else
2194 2195 2196 2197 2198
		common->data_dir = DATA_DIR_FROM_HOST;
	common->data_size = le32_to_cpu(cbw->DataTransferLength);
	if (common->data_size == 0)
		common->data_dir = DATA_DIR_NONE;
	common->lun = cbw->Lun;
2199 2200 2201 2202
	if (common->lun >= 0 && common->lun < common->nluns)
		common->curlun = &common->luns[common->lun];
	else
		common->curlun = NULL;
2203
	common->tag = cbw->Tag;
2204 2205 2206
	return 0;
}

2207
static int get_next_command(struct fsg_common *common)
2208 2209 2210 2211
{
	struct fsg_buffhd	*bh;
	int			rc = 0;

2212
	/* Wait for the next buffer to become available */
2213
	bh = common->next_buffhd_to_fill;
2214
	while (bh->state != BUF_STATE_EMPTY) {
2215
		rc = sleep_thread(common);
2216 2217 2218
		if (rc)
			return rc;
	}
2219

2220
	/* Queue a request to read a Bulk-only CBW */
2221
	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
2222
	if (!start_out_transfer(common, bh))
2223 2224
		/* Don't know what to do if common->fsg is NULL */
		return -EIO;
2225

2226 2227
	/*
	 * We will drain the buffer in software, which means we
2228
	 * can reuse it for the next filling.  No need to advance
2229 2230
	 * next_buffhd_to_fill.
	 */
2231

2232 2233
	/* Wait for the CBW to arrive */
	while (bh->state != BUF_STATE_FULL) {
2234
		rc = sleep_thread(common);
2235 2236
		if (rc)
			return rc;
2237
	}
2238
	smp_rmb();
2239
	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2240 2241
	bh->state = BUF_STATE_EMPTY;

2242 2243 2244 2245 2246 2247
	return rc;
}


/*-------------------------------------------------------------------------*/

2248
static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2249 2250 2251 2252 2253
		struct usb_request **preq)
{
	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
	if (*preq)
		return 0;
2254
	ERROR(common, "can't allocate request for %s\n", ep->name);
2255 2256 2257
	return -ENOMEM;
}

2258 2259
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2260
{
2261 2262
	struct fsg_dev *fsg;
	int i, rc = 0;
2263

2264 2265
	if (common->running)
		DBG(common, "reset interface\n");
2266 2267 2268

reset:
	/* Deallocate the requests */
2269 2270
	if (common->fsg) {
		fsg = common->fsg;
2271

2272
		for (i = 0; i < fsg_num_buffers; ++i) {
2273
			struct fsg_buffhd *bh = &common->buffhds[i];
2274

2275 2276 2277 2278 2279 2280 2281 2282
			if (bh->inreq) {
				usb_ep_free_request(fsg->bulk_in, bh->inreq);
				bh->inreq = NULL;
			}
			if (bh->outreq) {
				usb_ep_free_request(fsg->bulk_out, bh->outreq);
				bh->outreq = NULL;
			}
2283
		}
2284 2285 2286 2287 2288 2289 2290 2291 2292

		/* Disable the endpoints */
		if (fsg->bulk_in_enabled) {
			usb_ep_disable(fsg->bulk_in);
			fsg->bulk_in_enabled = 0;
		}
		if (fsg->bulk_out_enabled) {
			usb_ep_disable(fsg->bulk_out);
			fsg->bulk_out_enabled = 0;
2293 2294
		}

2295 2296
		common->fsg = NULL;
		wake_up(&common->fsg_wait);
2297 2298
	}

2299
	common->running = 0;
2300
	if (!new_fsg || rc)
2301 2302
		return rc;

2303 2304
	common->fsg = new_fsg;
	fsg = common->fsg;
2305

2306
	/* Enable the endpoints */
2307 2308 2309 2310
	rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
	if (rc)
		goto reset;
	rc = usb_ep_enable(fsg->bulk_in);
2311 2312
	if (rc)
		goto reset;
2313
	fsg->bulk_in->driver_data = common;
2314
	fsg->bulk_in_enabled = 1;
2315

2316 2317 2318 2319 2320
	rc = config_ep_by_speed(common->gadget, &(fsg->function),
				fsg->bulk_out);
	if (rc)
		goto reset;
	rc = usb_ep_enable(fsg->bulk_out);
2321 2322
	if (rc)
		goto reset;
2323
	fsg->bulk_out->driver_data = common;
2324
	fsg->bulk_out_enabled = 1;
2325
	common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
2326 2327 2328
	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);

	/* Allocate the requests */
2329
	for (i = 0; i < fsg_num_buffers; ++i) {
2330 2331 2332
		struct fsg_buffhd	*bh = &common->buffhds[i];

		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2333
		if (rc)
2334
			goto reset;
2335
		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2336
		if (rc)
2337
			goto reset;
2338 2339 2340 2341
		bh->inreq->buf = bh->outreq->buf = bh->buf;
		bh->inreq->context = bh->outreq->context = bh;
		bh->inreq->complete = bulk_in_complete;
		bh->outreq->complete = bulk_out_complete;
2342
	}
2343

2344 2345 2346
	common->running = 1;
	for (i = 0; i < common->nluns; ++i)
		common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2347 2348 2349 2350
	return rc;
}


2351 2352 2353 2354 2355
/****************************** ALT CONFIGS ******************************/

static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
	struct fsg_dev *fsg = fsg_from_func(f);
2356
	fsg->common->new_fsg = fsg;
2357
	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2358
	return USB_GADGET_DELAYED_STATUS;
2359 2360 2361 2362 2363
}

static void fsg_disable(struct usb_function *f)
{
	struct fsg_dev *fsg = fsg_from_func(f);
2364
	fsg->common->new_fsg = NULL;
2365
	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2366 2367 2368
}


2369 2370
/*-------------------------------------------------------------------------*/

2371
static void handle_exception(struct fsg_common *common)
2372 2373 2374 2375 2376 2377 2378 2379
{
	siginfo_t		info;
	int			i;
	struct fsg_buffhd	*bh;
	enum fsg_state		old_state;
	struct fsg_lun		*curlun;
	unsigned int		exception_req_tag;

2380 2381 2382 2383
	/*
	 * Clear the existing signals.  Anything but SIGUSR1 is converted
	 * into a high-priority EXIT exception.
	 */
2384
	for (;;) {
2385 2386
		int sig =
			dequeue_signal_lock(current, &current->blocked, &info);
2387 2388 2389
		if (!sig)
			break;
		if (sig != SIGUSR1) {
2390 2391 2392
			if (common->state < FSG_STATE_EXIT)
				DBG(common, "Main thread exiting on signal\n");
			raise_exception(common, FSG_STATE_EXIT);
2393 2394 2395 2396
		}
	}

	/* Cancel all the pending transfers */
2397
	if (likely(common->fsg)) {
2398
		for (i = 0; i < fsg_num_buffers; ++i) {
2399 2400 2401 2402 2403 2404
			bh = &common->buffhds[i];
			if (bh->inreq_busy)
				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
			if (bh->outreq_busy)
				usb_ep_dequeue(common->fsg->bulk_out,
					       bh->outreq);
2405 2406
		}

2407 2408 2409
		/* Wait until everything is idle */
		for (;;) {
			int num_active = 0;
2410
			for (i = 0; i < fsg_num_buffers; ++i) {
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
				bh = &common->buffhds[i];
				num_active += bh->inreq_busy + bh->outreq_busy;
			}
			if (num_active == 0)
				break;
			if (sleep_thread(common))
				return;
		}

		/* Clear out the controller's fifos */
		if (common->fsg->bulk_in_enabled)
			usb_ep_fifo_flush(common->fsg->bulk_in);
		if (common->fsg->bulk_out_enabled)
			usb_ep_fifo_flush(common->fsg->bulk_out);
	}
2426

2427 2428 2429 2430
	/*
	 * Reset the I/O buffer states and pointers, the SCSI
	 * state, and the exception.  Then invoke the handler.
	 */
2431
	spin_lock_irq(&common->lock);
2432

2433
	for (i = 0; i < fsg_num_buffers; ++i) {
2434
		bh = &common->buffhds[i];
2435 2436
		bh->state = BUF_STATE_EMPTY;
	}
2437 2438 2439 2440
	common->next_buffhd_to_fill = &common->buffhds[0];
	common->next_buffhd_to_drain = &common->buffhds[0];
	exception_req_tag = common->exception_req_tag;
	old_state = common->state;
2441 2442

	if (old_state == FSG_STATE_ABORT_BULK_OUT)
2443
		common->state = FSG_STATE_STATUS_PHASE;
2444
	else {
2445 2446
		for (i = 0; i < common->nluns; ++i) {
			curlun = &common->luns[i];
2447
			curlun->prevent_medium_removal = 0;
2448 2449
			curlun->sense_data = SS_NO_SENSE;
			curlun->unit_attention_data = SS_NO_SENSE;
2450 2451 2452
			curlun->sense_data_info = 0;
			curlun->info_valid = 0;
		}
2453
		common->state = FSG_STATE_IDLE;
2454
	}
2455
	spin_unlock_irq(&common->lock);
2456 2457 2458 2459

	/* Carry out any extra actions required for the exception */
	switch (old_state) {
	case FSG_STATE_ABORT_BULK_OUT:
2460 2461 2462 2463 2464
		send_status(common);
		spin_lock_irq(&common->lock);
		if (common->state == FSG_STATE_STATUS_PHASE)
			common->state = FSG_STATE_IDLE;
		spin_unlock_irq(&common->lock);
2465 2466 2467
		break;

	case FSG_STATE_RESET:
2468 2469
		/*
		 * In case we were forced against our will to halt a
2470
		 * bulk endpoint, clear the halt now.  (The SuperH UDC
2471 2472
		 * requires this.)
		 */
2473 2474 2475 2476 2477
		if (!fsg_is_set(common))
			break;
		if (test_and_clear_bit(IGNORE_BULK_OUT,
				       &common->fsg->atomic_bitflags))
			usb_ep_clear_halt(common->fsg->bulk_in);
2478

2479 2480
		if (common->ep0_req_tag == exception_req_tag)
			ep0_queue(common);	/* Complete the status stage */
2481

2482 2483
		/*
		 * Technically this should go here, but it would only be
2484
		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
2485 2486
		 * CONFIG_CHANGE cases.
		 */
2487 2488
		/* for (i = 0; i < common->nluns; ++i) */
		/*	common->luns[i].unit_attention_data = */
2489
		/*		SS_RESET_OCCURRED;  */
2490 2491 2492
		break;

	case FSG_STATE_CONFIG_CHANGE:
2493
		do_set_interface(common, common->new_fsg);
2494 2495
		if (common->new_fsg)
			usb_composite_setup_continue(common->cdev);
2496 2497 2498 2499
		break;

	case FSG_STATE_EXIT:
	case FSG_STATE_TERMINATED:
2500
		do_set_interface(common, NULL);		/* Free resources */
2501 2502 2503
		spin_lock_irq(&common->lock);
		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
		spin_unlock_irq(&common->lock);
2504
		break;
2505 2506 2507 2508 2509 2510 2511 2512

	case FSG_STATE_INTERFACE_CHANGE:
	case FSG_STATE_DISCONNECT:
	case FSG_STATE_COMMAND_PHASE:
	case FSG_STATE_DATA_PHASE:
	case FSG_STATE_STATUS_PHASE:
	case FSG_STATE_IDLE:
		break;
2513 2514 2515 2516 2517 2518
	}
}


/*-------------------------------------------------------------------------*/

2519
static int fsg_main_thread(void *common_)
2520
{
2521
	struct fsg_common	*common = common_;
2522

2523 2524 2525 2526
	/*
	 * Allow the thread to be killed by a signal, but set the signal mask
	 * to block everything but INT, TERM, KILL, and USR1.
	 */
2527 2528 2529 2530 2531 2532 2533 2534
	allow_signal(SIGINT);
	allow_signal(SIGTERM);
	allow_signal(SIGKILL);
	allow_signal(SIGUSR1);

	/* Allow the thread to be frozen */
	set_freezable();

2535 2536
	/*
	 * Arrange for userspace references to be interpreted as kernel
2537
	 * pointers.  That way we can pass a kernel pointer to a routine
2538 2539
	 * that expects a __user pointer and it will work okay.
	 */
2540 2541 2542
	set_fs(get_ds());

	/* The main loop */
2543 2544 2545
	while (common->state != FSG_STATE_TERMINATED) {
		if (exception_in_progress(common) || signal_pending(current)) {
			handle_exception(common);
2546 2547 2548
			continue;
		}

2549 2550
		if (!common->running) {
			sleep_thread(common);
2551 2552 2553
			continue;
		}

2554
		if (get_next_command(common))
2555 2556
			continue;

2557 2558 2559 2560
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_DATA_PHASE;
		spin_unlock_irq(&common->lock);
2561

2562
		if (do_scsi_command(common) || finish_reply(common))
2563 2564
			continue;

2565 2566 2567 2568
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_STATUS_PHASE;
		spin_unlock_irq(&common->lock);
2569

2570
		if (send_status(common))
2571 2572
			continue;

2573 2574 2575 2576
		spin_lock_irq(&common->lock);
		if (!exception_in_progress(common))
			common->state = FSG_STATE_IDLE;
		spin_unlock_irq(&common->lock);
2577
	}
2578

2579 2580 2581
	spin_lock_irq(&common->lock);
	common->thread_task = NULL;
	spin_unlock_irq(&common->lock);
2582

2583 2584
	if (!common->ops || !common->ops->thread_exits
	 || common->ops->thread_exits(common) < 0) {
2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
		struct fsg_lun *curlun = common->luns;
		unsigned i = common->nluns;

		down_write(&common->filesem);
		for (; i--; ++curlun) {
			if (!fsg_lun_is_open(curlun))
				continue;

			fsg_lun_close(curlun);
			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
		}
		up_write(&common->filesem);
	}
2598

2599
	/* Let fsg_unbind() know the thread has exited */
2600
	complete_and_exit(&common->thread_notifier, 0);
2601 2602 2603
}


2604
/*************************** DEVICE ATTRIBUTES ***************************/
2605

2606
static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2607
static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
2608
static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
2609

2610 2611 2612 2613 2614
static struct device_attribute dev_attr_ro_cdrom =
	__ATTR(ro, 0444, fsg_show_ro, NULL);
static struct device_attribute dev_attr_file_nonremovable =
	__ATTR(file, 0444, fsg_show_file, NULL);

2615

2616 2617 2618
/****************************** FSG COMMON ******************************/

static void fsg_common_release(struct kref *ref);
2619

2620
static void fsg_lun_release(struct device *dev)
2621
{
2622
	/* Nothing needs to be done */
2623 2624
}

2625
static inline void fsg_common_get(struct fsg_common *common)
2626
{
2627
	kref_get(&common->ref);
2628 2629
}

2630 2631 2632 2633 2634 2635
static inline void fsg_common_put(struct fsg_common *common)
{
	kref_put(&common->ref, fsg_common_release);
}

static struct fsg_common *fsg_common_init(struct fsg_common *common,
2636 2637
					  struct usb_composite_dev *cdev,
					  struct fsg_config *cfg)
2638
{
2639
	struct usb_gadget *gadget = cdev->gadget;
2640 2641
	struct fsg_buffhd *bh;
	struct fsg_lun *curlun;
2642
	struct fsg_lun_config *lcfg;
2643
	int nluns, i, rc;
2644
	char *pathbuf;
2645

2646 2647 2648 2649
	rc = fsg_num_buffers_validate();
	if (rc != 0)
		return ERR_PTR(rc);

2650
	/* Find out how many LUNs there should be */
2651
	nluns = cfg->nluns;
2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
		dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
		return ERR_PTR(-EINVAL);
	}

	/* Allocate? */
	if (!common) {
		common = kzalloc(sizeof *common, GFP_KERNEL);
		if (!common)
			return ERR_PTR(-ENOMEM);
		common->free_storage_on_release = 1;
	} else {
2664
		memset(common, 0, sizeof *common);
2665 2666
		common->free_storage_on_release = 0;
	}
2667

2668 2669 2670 2671 2672 2673 2674 2675
	common->buffhds = kcalloc(fsg_num_buffers,
				  sizeof *(common->buffhds), GFP_KERNEL);
	if (!common->buffhds) {
		if (common->free_storage_on_release)
			kfree(common);
		return ERR_PTR(-ENOMEM);
	}

2676
	common->ops = cfg->ops;
2677 2678
	common->private_data = cfg->private_data;

2679
	common->gadget = gadget;
2680 2681
	common->ep0 = gadget->ep0;
	common->ep0req = cdev->req;
2682
	common->cdev = cdev;
2683 2684 2685 2686

	/* Maybe allocate device-global string IDs, and patch descriptors */
	if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
		rc = usb_string_id(cdev);
2687 2688
		if (unlikely(rc < 0))
			goto error_release;
2689 2690 2691
		fsg_strings[FSG_STRING_INTERFACE].id = rc;
		fsg_intf_desc.iInterface = rc;
	}
2692

2693 2694 2695 2696
	/*
	 * Create the LUNs, open their backing files, and register the
	 * LUN devices in sysfs.
	 */
2697
	curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
2698 2699 2700
	if (unlikely(!curlun)) {
		rc = -ENOMEM;
		goto error_release;
2701 2702 2703 2704 2705
	}
	common->luns = curlun;

	init_rwsem(&common->filesem);

2706 2707 2708
	for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
		curlun->cdrom = !!lcfg->cdrom;
		curlun->ro = lcfg->cdrom || lcfg->ro;
2709
		curlun->initially_ro = curlun->ro;
2710
		curlun->removable = lcfg->removable;
2711 2712
		curlun->dev.release = fsg_lun_release;
		curlun->dev.parent = &gadget->dev;
2713
		/* curlun->dev.driver = &fsg_driver.driver; XXX */
2714
		dev_set_drvdata(&curlun->dev, &common->filesem);
2715
		dev_set_name(&curlun->dev, "lun%d", i);
2716 2717 2718 2719 2720

		rc = device_register(&curlun->dev);
		if (rc) {
			INFO(common, "failed to register LUN%d: %d\n", i, rc);
			common->nluns = i;
2721
			put_device(&curlun->dev);
2722 2723 2724
			goto error_release;
		}

2725 2726 2727 2728
		rc = device_create_file(&curlun->dev,
					curlun->cdrom
				      ? &dev_attr_ro_cdrom
				      : &dev_attr_ro);
2729 2730
		if (rc)
			goto error_luns;
2731 2732 2733 2734
		rc = device_create_file(&curlun->dev,
					curlun->removable
				      ? &dev_attr_file
				      : &dev_attr_file_nonremovable);
2735 2736 2737
		if (rc)
			goto error_luns;
		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
2738 2739 2740
		if (rc)
			goto error_luns;

2741 2742
		if (lcfg->filename) {
			rc = fsg_lun_open(curlun, lcfg->filename);
2743 2744
			if (rc)
				goto error_luns;
2745
		} else if (!curlun->removable) {
2746 2747 2748 2749 2750 2751 2752 2753 2754
			ERROR(common, "no file given for LUN%d\n", i);
			rc = -EINVAL;
			goto error_luns;
		}
	}
	common->nluns = nluns;

	/* Data buffers cyclic list */
	bh = common->buffhds;
2755
	i = fsg_num_buffers;
2756
	goto buffhds_first_it;
2757 2758
	do {
		bh->next = bh + 1;
2759 2760 2761 2762 2763 2764 2765 2766
		++bh;
buffhds_first_it:
		bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
		if (unlikely(!bh->buf)) {
			rc = -ENOMEM;
			goto error_release;
		}
	} while (--i);
2767 2768
	bh->next = common->buffhds;

2769
	/* Prepare inquiryString */
2770
	i = get_default_bcdDevice();
2771
	snprintf(common->inquiry_string, sizeof common->inquiry_string,
2772
		 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
2773
		 /* Assume product name dependent on the first LUN */
2774
		 cfg->product_name ?: (common->luns->cdrom
2775
				     ? "File-Stor Gadget"
2776
				     : "File-CD Gadget"),
2777
		 i);
2778

2779 2780
	/*
	 * Some peripheral controllers are known not to be able to
2781 2782 2783
	 * halt bulk endpoints correctly.  If one of them is present,
	 * disable stalls.
	 */
2784
	common->can_stall = cfg->can_stall &&
2785
		!(gadget_is_at91(common->gadget));
2786

2787
	spin_lock_init(&common->lock);
2788
	kref_init(&common->ref);
2789 2790 2791

	/* Tell the thread to start working */
	common->thread_task =
2792
		kthread_create(fsg_main_thread, common, "file-storage");
2793 2794 2795 2796 2797
	if (IS_ERR(common->thread_task)) {
		rc = PTR_ERR(common->thread_task);
		goto error_release;
	}
	init_completion(&common->thread_notifier);
2798
	init_waitqueue_head(&common->fsg_wait);
2799

2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
	/* Information */
	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
	INFO(common, "Number of LUNs=%d\n", common->nluns);

	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
	for (i = 0, nluns = common->nluns, curlun = common->luns;
	     i < nluns;
	     ++curlun, ++i) {
		char *p = "(no medium)";
		if (fsg_lun_is_open(curlun)) {
			p = "(error)";
			if (pathbuf) {
				p = d_path(&curlun->filp->f_path,
					   pathbuf, PATH_MAX);
				if (IS_ERR(p))
					p = "(error)";
			}
		}
		LINFO(curlun, "LUN: %s%s%sfile: %s\n",
		      curlun->removable ? "removable " : "",
		      curlun->ro ? "read only " : "",
		      curlun->cdrom ? "CD-ROM " : "",
		      p);
	}
	kfree(pathbuf);

2826 2827 2828 2829
	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));

	wake_up_process(common->thread_task);

2830 2831 2832 2833 2834
	return common;

error_luns:
	common->nluns = i + 1;
error_release:
2835
	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
2836
	/* Call fsg_common_release() directly, ref might be not initialised. */
2837 2838 2839 2840 2841 2842
	fsg_common_release(&common->ref);
	return ERR_PTR(rc);
}

static void fsg_common_release(struct kref *ref)
{
2843
	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2844

2845 2846 2847 2848 2849 2850
	/* If the thread isn't already dead, tell it to exit now */
	if (common->state != FSG_STATE_TERMINATED) {
		raise_exception(common, FSG_STATE_EXIT);
		wait_for_completion(&common->thread_notifier);
	}

2851 2852 2853 2854 2855 2856
	if (likely(common->luns)) {
		struct fsg_lun *lun = common->luns;
		unsigned i = common->nluns;

		/* In error recovery common->nluns may be zero. */
		for (; i; --i, ++lun) {
2857
			device_remove_file(&lun->dev, &dev_attr_nofua);
2858 2859 2860 2861 2862 2863 2864 2865
			device_remove_file(&lun->dev,
					   lun->cdrom
					 ? &dev_attr_ro_cdrom
					 : &dev_attr_ro);
			device_remove_file(&lun->dev,
					   lun->removable
					 ? &dev_attr_file
					 : &dev_attr_file_nonremovable);
2866 2867 2868
			fsg_lun_close(lun);
			device_unregister(&lun->dev);
		}
2869

2870
		kfree(common->luns);
2871 2872
	}

2873 2874
	{
		struct fsg_buffhd *bh = common->buffhds;
2875
		unsigned i = fsg_num_buffers;
2876 2877 2878 2879
		do {
			kfree(bh->buf);
		} while (++bh, --i);
	}
2880

2881
	kfree(common->buffhds);
2882 2883 2884 2885 2886 2887 2888
	if (common->free_storage_on_release)
		kfree(common);
}


/*-------------------------------------------------------------------------*/

2889
static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2890
{
2891
	struct fsg_dev		*fsg = fsg_from_func(f);
2892
	struct fsg_common	*common = fsg->common;
2893 2894

	DBG(fsg, "unbind\n");
2895 2896 2897 2898 2899 2900 2901 2902
	if (fsg->common->fsg == fsg) {
		fsg->common->new_fsg = NULL;
		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
		/* FIXME: make interruptible or killable somehow? */
		wait_event(common->fsg_wait, common->fsg != fsg);
	}

	fsg_common_put(common);
2903
	usb_free_all_descriptors(&fsg->function);
2904
	kfree(fsg);
2905 2906
}

2907
static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2908
{
2909 2910
	struct fsg_dev		*fsg = fsg_from_func(f);
	struct usb_gadget	*gadget = c->cdev->gadget;
2911 2912
	int			i;
	struct usb_ep		*ep;
2913 2914
	unsigned		max_burst;
	int			ret;
2915 2916 2917

	fsg->gadget = gadget;

2918 2919 2920 2921 2922 2923
	/* New interface */
	i = usb_interface_id(c, f);
	if (i < 0)
		return i;
	fsg_intf_desc.bInterfaceNumber = i;
	fsg->interface_number = i;
2924 2925 2926 2927 2928

	/* Find all the endpoints we will use */
	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
	if (!ep)
		goto autoconf_fail;
2929
	ep->driver_data = fsg->common;	/* claim the endpoint */
2930 2931 2932 2933 2934
	fsg->bulk_in = ep;

	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
	if (!ep)
		goto autoconf_fail;
2935
	ep->driver_data = fsg->common;	/* claim the endpoint */
2936 2937
	fsg->bulk_out = ep;

2938 2939 2940 2941 2942
	/* Assume endpoint addresses are the same for both speeds */
	fsg_hs_bulk_in_desc.bEndpointAddress =
		fsg_fs_bulk_in_desc.bEndpointAddress;
	fsg_hs_bulk_out_desc.bEndpointAddress =
		fsg_fs_bulk_out_desc.bEndpointAddress;
2943

2944 2945
	/* Calculate bMaxBurst, we know packet size is 1024 */
	max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
2946

2947 2948 2949
	fsg_ss_bulk_in_desc.bEndpointAddress =
		fsg_fs_bulk_in_desc.bEndpointAddress;
	fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
2950

2951 2952 2953
	fsg_ss_bulk_out_desc.bEndpointAddress =
		fsg_fs_bulk_out_desc.bEndpointAddress;
	fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
2954

2955 2956 2957 2958
	ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
			fsg_ss_function);
	if (ret)
		goto autoconf_fail;
2959

2960 2961 2962 2963
	return 0;

autoconf_fail:
	ERROR(fsg, "unable to autoconfigure all endpoints\n");
2964
	return -ENOTSUPP;
2965 2966
}

2967
/****************************** ADD FUNCTION ******************************/
2968

2969 2970 2971
static struct usb_gadget_strings *fsg_strings_array[] = {
	&fsg_stringtab,
	NULL,
2972 2973
};

2974 2975 2976
static int fsg_bind_config(struct usb_composite_dev *cdev,
			   struct usb_configuration *c,
			   struct fsg_common *common)
2977
{
2978 2979 2980 2981 2982 2983
	struct fsg_dev *fsg;
	int rc;

	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
	if (unlikely(!fsg))
		return -ENOMEM;
2984

2985 2986 2987 2988 2989 2990 2991 2992 2993
	fsg->function.name        = FSG_DRIVER_DESC;
	fsg->function.strings     = fsg_strings_array;
	fsg->function.bind        = fsg_bind;
	fsg->function.unbind      = fsg_unbind;
	fsg->function.setup       = fsg_setup;
	fsg->function.set_alt     = fsg_set_alt;
	fsg->function.disable     = fsg_disable;

	fsg->common               = common;
2994 2995
	/*
	 * Our caller holds a reference to common structure so we
2996 2997 2998
	 * don't have to be worry about it being freed until we return
	 * from this function.  So instead of incrementing counter now
	 * and decrement in error recovery we increment it only when
2999 3000
	 * call to usb_add_function() was successful.
	 */
3001 3002

	rc = usb_add_function(c, &fsg->function);
3003
	if (unlikely(rc))
3004 3005 3006
		kfree(fsg);
	else
		fsg_common_get(fsg->common);
3007
	return rc;
3008
}
3009 3010 3011 3012 3013 3014


/************************* Module parameters *************************/

struct fsg_module_parameters {
	char		*file[FSG_MAX_LUNS];
3015 3016 3017 3018
	bool		ro[FSG_MAX_LUNS];
	bool		removable[FSG_MAX_LUNS];
	bool		cdrom[FSG_MAX_LUNS];
	bool		nofua[FSG_MAX_LUNS];
3019 3020

	unsigned int	file_count, ro_count, removable_count, cdrom_count;
3021
	unsigned int	nofua_count;
3022
	unsigned int	luns;	/* nluns */
3023
	bool		stall;	/* can_stall */
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
};

#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)	\
	module_param_array_named(prefix ## name, params.name, type,	\
				 &prefix ## params.name ## _count,	\
				 S_IRUGO);				\
	MODULE_PARM_DESC(prefix ## name, desc)

#define _FSG_MODULE_PARAM(prefix, params, name, type, desc)		\
	module_param_named(prefix ## name, params.name, type,		\
			   S_IRUGO);					\
	MODULE_PARM_DESC(prefix ## name, desc)

#define FSG_MODULE_PARAMETERS(prefix, params)				\
	_FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp,		\
				"names of backing files or devices");	\
	_FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool,		\
				"true to force read-only");		\
	_FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool,	\
				"true to simulate removable media");	\
	_FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool,		\
				"true to simulate CD-ROM instead of disk"); \
3046 3047
	_FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool,		\
				"true to ignore SCSI WRITE(10,12) FUA bit"); \
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
	_FSG_MODULE_PARAM(prefix, params, luns, uint,			\
			  "number of LUNs");				\
	_FSG_MODULE_PARAM(prefix, params, stall, bool,			\
			  "false to prevent bulk stalls")

static void
fsg_config_from_params(struct fsg_config *cfg,
		       const struct fsg_module_parameters *params)
{
	struct fsg_lun_config *lun;
3058
	unsigned i;
3059 3060

	/* Configure LUNs */
3061 3062 3063 3064
	cfg->nluns =
		min(params->luns ?: (params->file_count ?: 1u),
		    (unsigned)FSG_MAX_LUNS);
	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3065 3066
		lun->ro = !!params->ro[i];
		lun->cdrom = !!params->cdrom[i];
3067
		lun->removable = !!params->removable[i];
3068 3069 3070 3071 3072 3073
		lun->filename =
			params->file_count > i && params->file[i][0]
			? params->file[i]
			: 0;
	}

3074
	/* Let MSF use defaults */
3075 3076 3077
	cfg->vendor_name = 0;
	cfg->product_name = 0;

3078 3079
	cfg->ops = NULL;
	cfg->private_data = NULL;
3080

3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
	/* Finalise */
	cfg->can_stall = params->stall;
}

static inline struct fsg_common *
fsg_common_from_params(struct fsg_common *common,
		       struct usb_composite_dev *cdev,
		       const struct fsg_module_parameters *params)
	__attribute__((unused));
static inline struct fsg_common *
fsg_common_from_params(struct fsg_common *common,
		       struct usb_composite_dev *cdev,
		       const struct fsg_module_parameters *params)
{
	struct fsg_config cfg;
	fsg_config_from_params(&cfg, params);
	return fsg_common_init(common, cdev, &cfg);
}