rbd.c 144.9 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
   rbd.c -- Export ceph rados objects as a Linux block device


   based on drivers/block/osdblk.c:

   Copyright 2009 Red Hat, Inc.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.



25
   For usage instructions, please refer to:
26

27
                 Documentation/ABI/testing/sysfs-bus-rbd
28 29 30 31 32 33 34

 */

#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
35
#include <linux/parser.h>
36
#include <linux/bsearch.h>
37 38 39 40

#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
C
Christoph Hellwig 已提交
41
#include <linux/blk-mq.h>
42 43
#include <linux/fs.h>
#include <linux/blkdev.h>
44
#include <linux/slab.h>
45
#include <linux/idr.h>
I
Ilya Dryomov 已提交
46
#include <linux/workqueue.h>
47 48 49

#include "rbd_types.h"

A
Alex Elder 已提交
50 51
#define RBD_DEBUG	/* Activate rbd_assert() calls */

A
Alex Elder 已提交
52 53 54 55 56 57 58 59 60
/*
 * The basic unit of block I/O is a sector.  It is interpreted in a
 * number of contexts in Linux (blk, bio, genhd), but the default is
 * universally 512 bytes.  These symbols are just slightly more
 * meaningful than the bare numbers they represent.
 */
#define	SECTOR_SHIFT	9
#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Increment the given counter and return its updated value.
 * If the counter is already 0 it will not be incremented.
 * If the counter is already at its maximum value returns
 * -EINVAL without updating it.
 */
static int atomic_inc_return_safe(atomic_t *v)
{
	unsigned int counter;

	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
	if (counter <= (unsigned int)INT_MAX)
		return (int)counter;

	atomic_dec(v);

	return -EINVAL;
}

/* Decrement the counter.  Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
	int counter;

	counter = atomic_dec_return(v);
	if (counter >= 0)
		return counter;

	atomic_inc(v);

	return -EINVAL;
}

A
Alex Elder 已提交
94
#define RBD_DRV_NAME "rbd"
95

96 97
#define RBD_MINORS_PER_MAJOR		256
#define RBD_SINGLE_MAJOR_PART_SHIFT	4
98

99 100
#define RBD_MAX_PARENT_CHAIN_LEN	16

101 102 103 104
#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
#define RBD_MAX_SNAP_NAME_LEN	\
			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))

105
#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
106 107 108

#define RBD_SNAP_HEAD_NAME	"-"

109 110
#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */

111 112
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
113
#define RBD_IMAGE_ID_LEN_MAX	64
114

115
#define RBD_OBJ_PREFIX_LEN_MAX	64
A
Alex Elder 已提交
116

117 118
#define RBD_RETRY_DELAY		msecs_to_jiffies(1000)

A
Alex Elder 已提交
119 120
/* Feature bits */

A
Alex Elder 已提交
121 122 123 124
#define RBD_FEATURE_LAYERING	(1<<0)
#define RBD_FEATURE_STRIPINGV2	(1<<1)
#define RBD_FEATURES_ALL \
	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
A
Alex Elder 已提交
125 126 127

/* Features supported by this (client software) implementation. */

128
#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
A
Alex Elder 已提交
129

A
Alex Elder 已提交
130 131 132 133
/*
 * An RBD device name will be "rbd#", where the "rbd" comes from
 * RBD_DRV_NAME above, and # is a unique integer identifier.
 */
134 135 136 137 138 139
#define DEV_NAME_LEN		32

/*
 * block device image metadata (in-memory version)
 */
struct rbd_image_header {
140
	/* These six fields never change for a given rbd image */
141
	char *object_prefix;
142 143 144
	__u8 obj_order;
	__u8 crypt_type;
	__u8 comp_type;
145 146 147
	u64 stripe_unit;
	u64 stripe_count;
	u64 features;		/* Might be changeable someday? */
148

A
Alex Elder 已提交
149 150 151
	/* The remaining fields need to be updated occasionally */
	u64 image_size;
	struct ceph_snap_context *snapc;
152 153
	char *snap_names;	/* format 1 only */
	u64 *snap_sizes;	/* format 1 only */
154 155
};

156 157 158 159
/*
 * An rbd image specification.
 *
 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
A
Alex Elder 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
 * identify an image.  Each rbd_dev structure includes a pointer to
 * an rbd_spec structure that encapsulates this identity.
 *
 * Each of the id's in an rbd_spec has an associated name.  For a
 * user-mapped image, the names are supplied and the id's associated
 * with them are looked up.  For a layered image, a parent image is
 * defined by the tuple, and the names are looked up.
 *
 * An rbd_dev structure contains a parent_spec pointer which is
 * non-null if the image it represents is a child in a layered
 * image.  This pointer will refer to the rbd_spec structure used
 * by the parent rbd_dev for its own identity (i.e., the structure
 * is shared between the parent and child).
 *
 * Since these structures are populated once, during the discovery
 * phase of image construction, they are effectively immutable so
 * we make no effort to synchronize access to them.
 *
 * Note that code herein does not assume the image name is known (it
 * could be a null pointer).
180 181 182
 */
struct rbd_spec {
	u64		pool_id;
183
	const char	*pool_name;
184

185 186
	const char	*image_id;
	const char	*image_name;
187 188

	u64		snap_id;
189
	const char	*snap_name;
190 191 192 193

	struct kref	kref;
};

194
/*
A
Alex Elder 已提交
195
 * an instance of the client.  multiple devices may share an rbd client.
196 197 198 199 200 201 202
 */
struct rbd_client {
	struct ceph_client	*client;
	struct kref		kref;
	struct list_head	node;
};

A
Alex Elder 已提交
203 204 205 206 207 208 209 210
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);

#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */

struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);

211 212 213
enum obj_request_type {
	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
A
Alex Elder 已提交
214

G
Guangliang Zhao 已提交
215 216 217
enum obj_operation_type {
	OBJ_OP_WRITE,
	OBJ_OP_READ,
218
	OBJ_OP_DISCARD,
G
Guangliang Zhao 已提交
219 220
};

221 222
enum obj_req_flags {
	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
223
	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
224 225
	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
226 227
};

A
Alex Elder 已提交
228 229 230 231
struct rbd_obj_request {
	const char		*object_name;
	u64			offset;		/* object start byte */
	u64			length;		/* bytes from offset */
232
	unsigned long		flags;
A
Alex Elder 已提交
233

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	/*
	 * An object request associated with an image will have its
	 * img_data flag set; a standalone object request will not.
	 *
	 * A standalone object request will have which == BAD_WHICH
	 * and a null obj_request pointer.
	 *
	 * An object request initiated in support of a layered image
	 * object (to check for its existence before a write) will
	 * have which == BAD_WHICH and a non-null obj_request pointer.
	 *
	 * Finally, an object request for rbd image data will have
	 * which != BAD_WHICH, and will have a non-null img_request
	 * pointer.  The value of which will be in the range
	 * 0..(img_request->obj_request_count-1).
	 */
	union {
		struct rbd_obj_request	*obj_request;	/* STAT op */
		struct {
			struct rbd_img_request	*img_request;
			u64			img_offset;
			/* links for img_request->obj_requests list */
			struct list_head	links;
		};
	};
A
Alex Elder 已提交
259 260 261
	u32			which;		/* posn image request list */

	enum obj_request_type	type;
262 263 264 265 266 267 268
	union {
		struct bio	*bio_list;
		struct {
			struct page	**pages;
			u32		page_count;
		};
	};
269
	struct page		**copyup_pages;
270
	u32			copyup_page_count;
A
Alex Elder 已提交
271 272 273 274

	struct ceph_osd_request	*osd_req;

	u64			xferred;	/* bytes transferred */
275
	int			result;
A
Alex Elder 已提交
276 277

	rbd_obj_callback_t	callback;
278
	struct completion	completion;
A
Alex Elder 已提交
279 280 281 282

	struct kref		kref;
};

A
Alex Elder 已提交
283
enum img_req_flags {
284 285
	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
286
	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
287
	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
A
Alex Elder 已提交
288 289
};

A
Alex Elder 已提交
290 291 292 293
struct rbd_img_request {
	struct rbd_device	*rbd_dev;
	u64			offset;	/* starting image byte offset */
	u64			length;	/* byte count from offset */
A
Alex Elder 已提交
294
	unsigned long		flags;
A
Alex Elder 已提交
295
	union {
296
		u64			snap_id;	/* for reads */
A
Alex Elder 已提交
297
		struct ceph_snap_context *snapc;	/* for writes */
298 299 300 301
	};
	union {
		struct request		*rq;		/* block request */
		struct rbd_obj_request	*obj_request;	/* obj req initiator */
A
Alex Elder 已提交
302
	};
303
	struct page		**copyup_pages;
304
	u32			copyup_page_count;
A
Alex Elder 已提交
305 306 307
	spinlock_t		completion_lock;/* protects next_completion */
	u32			next_completion;
	rbd_img_callback_t	callback;
308
	u64			xferred;/* aggregate bytes transferred */
309
	int			result;	/* first nonzero obj_request result */
A
Alex Elder 已提交
310 311 312 313 314 315 316 317

	u32			obj_request_count;
	struct list_head	obj_requests;	/* rbd_obj_request structs */

	struct kref		kref;
};

#define for_each_obj_request(ireq, oreq) \
318
	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
319
#define for_each_obj_request_from(ireq, oreq) \
320
	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
321
#define for_each_obj_request_safe(ireq, oreq, n) \
322
	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
323

324 325 326 327 328 329
enum rbd_watch_state {
	RBD_WATCH_STATE_UNREGISTERED,
	RBD_WATCH_STATE_REGISTERED,
	RBD_WATCH_STATE_ERROR,
};

A
Alex Elder 已提交
330
struct rbd_mapping {
A
Alex Elder 已提交
331
	u64                     size;
A
Alex Elder 已提交
332
	u64                     features;
A
Alex Elder 已提交
333 334 335
	bool			read_only;
};

336 337 338 339
/*
 * a single device
 */
struct rbd_device {
A
Alex Elder 已提交
340
	int			dev_id;		/* blkdev unique id */
341 342

	int			major;		/* blkdev assigned major */
343
	int			minor;
344 345
	struct gendisk		*disk;		/* blkdev's gendisk and rq */

346
	u32			image_format;	/* Either 1 or 2 */
347 348 349 350
	struct rbd_client	*rbd_client;

	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */

351
	spinlock_t		lock;		/* queue, flags, open_count */
352 353

	struct rbd_image_header	header;
354
	unsigned long		flags;		/* possibly lock protected */
355
	struct rbd_spec		*spec;
356
	struct rbd_options	*opts;
357

358
	struct ceph_object_id	header_oid;
359
	struct ceph_object_locator header_oloc;
360

361
	struct ceph_file_layout	layout;		/* used for all rbd requests */
362

363 364
	struct mutex		watch_mutex;
	enum rbd_watch_state	watch_state;
365
	struct ceph_osd_linger_request *watch_handle;
366 367
	u64			watch_cookie;
	struct delayed_work	watch_dwork;
368

369 370
	struct workqueue_struct	*task_wq;

371 372
	struct rbd_spec		*parent_spec;
	u64			parent_overlap;
373
	atomic_t		parent_ref;
374
	struct rbd_device	*parent;
375

C
Christoph Hellwig 已提交
376 377 378
	/* Block layer tags. */
	struct blk_mq_tag_set	tag_set;

379 380
	/* protects updating the header */
	struct rw_semaphore     header_rwsem;
A
Alex Elder 已提交
381 382

	struct rbd_mapping	mapping;
383 384

	struct list_head	node;
385 386 387

	/* sysfs related */
	struct device		dev;
388
	unsigned long		open_count;	/* protected by lock */
389 390
};

391 392 393 394 395 396 397
/*
 * Flag bits for rbd_dev->flags.  If atomicity is required,
 * rbd_dev->lock is used to protect access.
 *
 * Currently, only the "removing" flag (which is coupled with the
 * "open_count" field) requires atomic access.
 */
398 399
enum rbd_dev_flags {
	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
400
	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
401 402
};

403
static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
404

405
static LIST_HEAD(rbd_dev_list);    /* devices */
406 407
static DEFINE_SPINLOCK(rbd_dev_list_lock);

A
Alex Elder 已提交
408 409
static LIST_HEAD(rbd_client_list);		/* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
410

411 412
/* Slab caches for frequently-allocated structures */

413
static struct kmem_cache	*rbd_img_request_cache;
414
static struct kmem_cache	*rbd_obj_request_cache;
415
static struct kmem_cache	*rbd_segment_name_cache;
416

417
static int rbd_major;
418 419
static DEFINE_IDA(rbd_dev_id_ida);

420 421
static struct workqueue_struct *rbd_wq;

422 423 424 425 426 427 428 429
/*
 * Default to false for now, as single-major requires >= 0.75 version of
 * userspace rbd utility.
 */
static bool single_major = false;
module_param(single_major, bool, S_IRUGO);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");

430 431
static int rbd_img_request_submit(struct rbd_img_request *img_request);

A
Alex Elder 已提交
432 433 434 435
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
		       size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
			  size_t count);
436 437 438 439
static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
				    size_t count);
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
				       size_t count);
440
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
441
static void rbd_spec_put(struct rbd_spec *spec);
A
Alex Elder 已提交
442

443 444
static int rbd_dev_id_to_minor(int dev_id)
{
445
	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
446 447 448 449
}

static int minor_to_rbd_dev_id(int minor)
{
450
	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
451 452
}

453 454
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
455 456
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
457 458 459 460

static struct attribute *rbd_bus_attrs[] = {
	&bus_attr_add.attr,
	&bus_attr_remove.attr,
461 462
	&bus_attr_add_single_major.attr,
	&bus_attr_remove_single_major.attr,
463
	NULL,
A
Alex Elder 已提交
464
};
465 466 467 468

static umode_t rbd_bus_is_visible(struct kobject *kobj,
				  struct attribute *attr, int index)
{
469 470 471 472 473
	if (!single_major &&
	    (attr == &bus_attr_add_single_major.attr ||
	     attr == &bus_attr_remove_single_major.attr))
		return 0;

474 475 476 477 478 479 480 481
	return attr->mode;
}

static const struct attribute_group rbd_bus_group = {
	.attrs = rbd_bus_attrs,
	.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
A
Alex Elder 已提交
482 483 484

static struct bus_type rbd_bus_type = {
	.name		= "rbd",
485
	.bus_groups	= rbd_bus_groups,
A
Alex Elder 已提交
486 487 488 489 490 491 492 493 494 495 496
};

static void rbd_root_dev_release(struct device *dev)
{
}

static struct device rbd_root_dev = {
	.init_name =    "rbd",
	.release =      rbd_root_dev_release,
};

A
Alex Elder 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (!rbd_dev)
		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
	else if (rbd_dev->disk)
		printk(KERN_WARNING "%s: %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_name)
		printk(KERN_WARNING "%s: image %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_id)
		printk(KERN_WARNING "%s: id %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
	else	/* punt */
		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
			RBD_DRV_NAME, rbd_dev, &vaf);
	va_end(args);
}

A
Alex Elder 已提交
524 525 526 527 528 529 530 531 532 533 534 535
#ifdef RBD_DEBUG
#define rbd_assert(expr)						\
		if (unlikely(!(expr))) {				\
			printk(KERN_ERR "\nAssertion failure in %s() "	\
						"at line %d:\n\n"	\
					"\trbd_assert(%s);\n\n",	\
					__func__, __LINE__, #expr);	\
			BUG();						\
		}
#else /* !RBD_DEBUG */
#  define rbd_assert(expr)	((void) 0)
#endif /* !RBD_DEBUG */
536

I
Ilya Dryomov 已提交
537
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
538
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
539 540
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
541

A
Alex Elder 已提交
542
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
543
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
544
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
545
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
546 547
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id);
548 549 550 551
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features);
552

553 554
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
A
Alex Elder 已提交
555
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
556
	bool removing = false;
557

A
Alex Elder 已提交
558
	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
559 560
		return -EROFS;

561
	spin_lock_irq(&rbd_dev->lock);
562 563 564 565
	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
		removing = true;
	else
		rbd_dev->open_count++;
566
	spin_unlock_irq(&rbd_dev->lock);
567 568 569
	if (removing)
		return -ENOENT;

A
Alex Elder 已提交
570
	(void) get_device(&rbd_dev->dev);
571

572 573 574
	return 0;
}

575
static void rbd_release(struct gendisk *disk, fmode_t mode)
576 577
{
	struct rbd_device *rbd_dev = disk->private_data;
578 579
	unsigned long open_count_before;

580
	spin_lock_irq(&rbd_dev->lock);
581
	open_count_before = rbd_dev->open_count--;
582
	spin_unlock_irq(&rbd_dev->lock);
583
	rbd_assert(open_count_before > 0);
584

A
Alex Elder 已提交
585
	put_device(&rbd_dev->dev);
586 587
}

G
Guangliang Zhao 已提交
588 589
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
590
	int ret = 0;
G
Guangliang Zhao 已提交
591 592
	int val;
	bool ro;
593
	bool ro_changed = false;
G
Guangliang Zhao 已提交
594

595
	/* get_user() may sleep, so call it before taking rbd_dev->lock */
G
Guangliang Zhao 已提交
596 597 598 599 600 601 602 603
	if (get_user(val, (int __user *)(arg)))
		return -EFAULT;

	ro = val ? true : false;
	/* Snapshot doesn't allow to write*/
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
		return -EROFS;

604 605 606 607 608 609 610
	spin_lock_irq(&rbd_dev->lock);
	/* prevent others open this device */
	if (rbd_dev->open_count > 1) {
		ret = -EBUSY;
		goto out;
	}

G
Guangliang Zhao 已提交
611 612
	if (rbd_dev->mapping.read_only != ro) {
		rbd_dev->mapping.read_only = ro;
613
		ro_changed = true;
G
Guangliang Zhao 已提交
614 615
	}

616 617 618 619 620 621 622
out:
	spin_unlock_irq(&rbd_dev->lock);
	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
	if (ret == 0 && ro_changed)
		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);

	return ret;
G
Guangliang Zhao 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
}

static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
	int ret = 0;

	switch (cmd) {
	case BLKROSET:
		ret = rbd_ioctl_set_ro(rbd_dev, arg);
		break;
	default:
		ret = -ENOTTY;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
				unsigned int cmd, unsigned long arg)
{
	return rbd_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */

650 651 652
static const struct block_device_operations rbd_bd_ops = {
	.owner			= THIS_MODULE,
	.open			= rbd_open,
653
	.release		= rbd_release,
G
Guangliang Zhao 已提交
654 655 656 657
	.ioctl			= rbd_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= rbd_compat_ioctl,
#endif
658 659 660
};

/*
661
 * Initialize an rbd client instance.  Success or not, this function
662
 * consumes ceph_opts.  Caller holds client_mutex.
663
 */
664
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
665 666 667 668
{
	struct rbd_client *rbdc;
	int ret = -ENOMEM;

A
Alex Elder 已提交
669
	dout("%s:\n", __func__);
670 671 672 673 674 675 676
	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
	if (!rbdc)
		goto out_opt;

	kref_init(&rbdc->kref);
	INIT_LIST_HEAD(&rbdc->node);

A
Alex Elder 已提交
677
	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
678
	if (IS_ERR(rbdc->client))
679
		goto out_rbdc;
A
Alex Elder 已提交
680
	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
681 682 683

	ret = ceph_open_session(rbdc->client);
	if (ret < 0)
684
		goto out_client;
685

A
Alex Elder 已提交
686
	spin_lock(&rbd_client_list_lock);
687
	list_add_tail(&rbdc->node, &rbd_client_list);
A
Alex Elder 已提交
688
	spin_unlock(&rbd_client_list_lock);
689

A
Alex Elder 已提交
690
	dout("%s: rbdc %p\n", __func__, rbdc);
691

692
	return rbdc;
693
out_client:
694
	ceph_destroy_client(rbdc->client);
695
out_rbdc:
696 697
	kfree(rbdc);
out_opt:
A
Alex Elder 已提交
698 699
	if (ceph_opts)
		ceph_destroy_options(ceph_opts);
A
Alex Elder 已提交
700 701
	dout("%s: error %d\n", __func__, ret);

V
Vasiliy Kulikov 已提交
702
	return ERR_PTR(ret);
703 704
}

705 706 707 708 709 710 711
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
	kref_get(&rbdc->kref);

	return rbdc;
}

712
/*
713 714
 * Find a ceph client with specific addr and configuration.  If
 * found, bump its reference count.
715
 */
716
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
717 718
{
	struct rbd_client *client_node;
719
	bool found = false;
720

A
Alex Elder 已提交
721
	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
722 723
		return NULL;

724 725 726
	spin_lock(&rbd_client_list_lock);
	list_for_each_entry(client_node, &rbd_client_list, node) {
		if (!ceph_compare_options(ceph_opts, client_node->client)) {
727 728
			__rbd_get_client(client_node);

729 730 731 732 733 734 735
			found = true;
			break;
		}
	}
	spin_unlock(&rbd_client_list_lock);

	return found ? client_node : NULL;
736 737
}

738
/*
739
 * (Per device) rbd map options
740 741
 */
enum {
I
Ilya Dryomov 已提交
742
	Opt_queue_depth,
743 744 745 746
	Opt_last_int,
	/* int args above */
	Opt_last_string,
	/* string args above */
A
Alex Elder 已提交
747 748
	Opt_read_only,
	Opt_read_write,
749
	Opt_err
750 751
};

A
Alex Elder 已提交
752
static match_table_t rbd_opts_tokens = {
I
Ilya Dryomov 已提交
753
	{Opt_queue_depth, "queue_depth=%d"},
754 755
	/* int args above */
	/* string args above */
A
Alex Elder 已提交
756
	{Opt_read_only, "read_only"},
A
Alex Elder 已提交
757 758 759
	{Opt_read_only, "ro"},		/* Alternate spelling */
	{Opt_read_write, "read_write"},
	{Opt_read_write, "rw"},		/* Alternate spelling */
760
	{Opt_err, NULL}
761 762
};

A
Alex Elder 已提交
763
struct rbd_options {
I
Ilya Dryomov 已提交
764
	int	queue_depth;
A
Alex Elder 已提交
765 766 767
	bool	read_only;
};

I
Ilya Dryomov 已提交
768
#define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
A
Alex Elder 已提交
769 770
#define RBD_READ_ONLY_DEFAULT	false

771 772
static int parse_rbd_opts_token(char *c, void *private)
{
A
Alex Elder 已提交
773
	struct rbd_options *rbd_opts = private;
774 775 776
	substring_t argstr[MAX_OPT_ARGS];
	int token, intval, ret;

A
Alex Elder 已提交
777
	token = match_token(c, rbd_opts_tokens, argstr);
778 779 780
	if (token < Opt_last_int) {
		ret = match_int(&argstr[0], &intval);
		if (ret < 0) {
781
			pr_err("bad mount option arg (not int) at '%s'\n", c);
782 783 784 785
			return ret;
		}
		dout("got int token %d val %d\n", token, intval);
	} else if (token > Opt_last_int && token < Opt_last_string) {
786
		dout("got string token %d val %s\n", token, argstr[0].from);
787 788 789 790 791
	} else {
		dout("got token %d\n", token);
	}

	switch (token) {
I
Ilya Dryomov 已提交
792 793 794 795 796 797 798
	case Opt_queue_depth:
		if (intval < 1) {
			pr_err("queue_depth out of range\n");
			return -EINVAL;
		}
		rbd_opts->queue_depth = intval;
		break;
A
Alex Elder 已提交
799 800 801 802 803 804
	case Opt_read_only:
		rbd_opts->read_only = true;
		break;
	case Opt_read_write:
		rbd_opts->read_only = false;
		break;
805
	default:
806 807
		/* libceph prints "bad option" msg */
		return -EINVAL;
808
	}
809

810 811 812
	return 0;
}

G
Guangliang Zhao 已提交
813 814 815 816 817 818 819
static char* obj_op_name(enum obj_operation_type op_type)
{
	switch (op_type) {
	case OBJ_OP_READ:
		return "read";
	case OBJ_OP_WRITE:
		return "write";
820 821
	case OBJ_OP_DISCARD:
		return "discard";
G
Guangliang Zhao 已提交
822 823 824 825 826
	default:
		return "???";
	}
}

827 828
/*
 * Get a ceph client with specific addr and configuration, if one does
829 830
 * not exist create it.  Either way, ceph_opts is consumed by this
 * function.
831
 */
832
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
833
{
834
	struct rbd_client *rbdc;
835

836
	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
837
	rbdc = rbd_client_find(ceph_opts);
838
	if (rbdc)	/* using an existing client */
A
Alex Elder 已提交
839
		ceph_destroy_options(ceph_opts);
840
	else
841
		rbdc = rbd_client_create(ceph_opts);
842
	mutex_unlock(&client_mutex);
843

844
	return rbdc;
845 846 847 848
}

/*
 * Destroy ceph client
A
Alex Elder 已提交
849
 *
A
Alex Elder 已提交
850
 * Caller must hold rbd_client_list_lock.
851 852 853 854 855
 */
static void rbd_client_release(struct kref *kref)
{
	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);

A
Alex Elder 已提交
856
	dout("%s: rbdc %p\n", __func__, rbdc);
857
	spin_lock(&rbd_client_list_lock);
858
	list_del(&rbdc->node);
859
	spin_unlock(&rbd_client_list_lock);
860 861 862 863 864 865 866 867 868

	ceph_destroy_client(rbdc->client);
	kfree(rbdc);
}

/*
 * Drop reference to ceph client node. If it's not referenced anymore, release
 * it.
 */
869
static void rbd_put_client(struct rbd_client *rbdc)
870
{
871 872
	if (rbdc)
		kref_put(&rbdc->kref, rbd_client_release);
873 874
}

875 876 877 878 879
static bool rbd_image_format_valid(u32 image_format)
{
	return image_format == 1 || image_format == 2;
}

880 881
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
882 883 884 885 886 887 888
	size_t size;
	u32 snap_count;

	/* The header has to start with the magic rbd header text */
	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
		return false;

A
Alex Elder 已提交
889 890 891 892 893 894 895 896 897 898
	/* The bio layer requires at least sector-sized I/O */

	if (ondisk->options.order < SECTOR_SHIFT)
		return false;

	/* If we use u64 in a few spots we may be able to loosen this */

	if (ondisk->options.order > 8 * sizeof (int) - 1)
		return false;

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
	/*
	 * The size of a snapshot header has to fit in a size_t, and
	 * that limits the number of snapshots.
	 */
	snap_count = le32_to_cpu(ondisk->snap_count);
	size = SIZE_MAX - sizeof (struct ceph_snap_context);
	if (snap_count > size / sizeof (__le64))
		return false;

	/*
	 * Not only that, but the size of the entire the snapshot
	 * header must also be representable in a size_t.
	 */
	size -= snap_count * sizeof (__le64);
	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
		return false;

	return true;
917 918
}

919
/*
920 921
 * Fill an rbd image header with information from the given format 1
 * on-disk header.
922
 */
A
Alex Elder 已提交
923
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
924
				 struct rbd_image_header_ondisk *ondisk)
925
{
A
Alex Elder 已提交
926
	struct rbd_image_header *header = &rbd_dev->header;
927 928 929 930 931
	bool first_time = header->object_prefix == NULL;
	struct ceph_snap_context *snapc;
	char *object_prefix = NULL;
	char *snap_names = NULL;
	u64 *snap_sizes = NULL;
932
	u32 snap_count;
933
	size_t size;
934
	int ret = -ENOMEM;
935
	u32 i;
936

937
	/* Allocate this now to avoid having to handle failure below */
A
Alex Elder 已提交
938

939 940
	if (first_time) {
		size_t len;
941

942 943 944 945 946 947 948 949
		len = strnlen(ondisk->object_prefix,
				sizeof (ondisk->object_prefix));
		object_prefix = kmalloc(len + 1, GFP_KERNEL);
		if (!object_prefix)
			return -ENOMEM;
		memcpy(object_prefix, ondisk->object_prefix, len);
		object_prefix[len] = '\0';
	}
A
Alex Elder 已提交
950

951
	/* Allocate the snapshot context and fill it in */
A
Alex Elder 已提交
952

953 954 955 956 957
	snap_count = le32_to_cpu(ondisk->snap_count);
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
	if (!snapc)
		goto out_err;
	snapc->seq = le64_to_cpu(ondisk->snap_seq);
958
	if (snap_count) {
959
		struct rbd_image_snap_ondisk *snaps;
A
Alex Elder 已提交
960 961
		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);

962
		/* We'll keep a copy of the snapshot names... */
963

964 965 966 967
		if (snap_names_len > (u64)SIZE_MAX)
			goto out_2big;
		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
		if (!snap_names)
A
Alex Elder 已提交
968 969
			goto out_err;

970
		/* ...as well as the array of their sizes. */
971

972
		size = snap_count * sizeof (*header->snap_sizes);
973 974
		snap_sizes = kmalloc(size, GFP_KERNEL);
		if (!snap_sizes)
A
Alex Elder 已提交
975
			goto out_err;
976

A
Alex Elder 已提交
977
		/*
978 979 980
		 * Copy the names, and fill in each snapshot's id
		 * and size.
		 *
981
		 * Note that rbd_dev_v1_header_info() guarantees the
982
		 * ondisk buffer we're working with has
A
Alex Elder 已提交
983 984 985
		 * snap_names_len bytes beyond the end of the
		 * snapshot id array, this memcpy() is safe.
		 */
986 987 988 989 990 991
		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
		snaps = ondisk->snaps;
		for (i = 0; i < snap_count; i++) {
			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
		}
992
	}
A
Alex Elder 已提交
993

994
	/* We won't fail any more, fill in the header */
995

996 997 998 999 1000 1001 1002 1003 1004
	if (first_time) {
		header->object_prefix = object_prefix;
		header->obj_order = ondisk->options.order;
		header->crypt_type = ondisk->options.crypt_type;
		header->comp_type = ondisk->options.comp_type;
		/* The rest aren't used for format 1 images */
		header->stripe_unit = 0;
		header->stripe_count = 0;
		header->features = 0;
1005
	} else {
A
Alex Elder 已提交
1006 1007 1008
		ceph_put_snap_context(header->snapc);
		kfree(header->snap_names);
		kfree(header->snap_sizes);
1009
	}
1010

1011
	/* The remaining fields always get updated (when we refresh) */
1012

A
Alex Elder 已提交
1013
	header->image_size = le64_to_cpu(ondisk->image_size);
1014 1015 1016
	header->snapc = snapc;
	header->snap_names = snap_names;
	header->snap_sizes = snap_sizes;
1017

1018
	return 0;
1019 1020
out_2big:
	ret = -EIO;
A
Alex Elder 已提交
1021
out_err:
1022 1023 1024 1025
	kfree(snap_sizes);
	kfree(snap_names);
	ceph_put_snap_context(snapc);
	kfree(object_prefix);
1026

1027
	return ret;
1028 1029
}

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
	const char *snap_name;

	rbd_assert(which < rbd_dev->header.snapc->num_snaps);

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which--)
		snap_name += strlen(snap_name) + 1;

	return kstrdup(snap_name, GFP_KERNEL);
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
/*
 * Snapshot id comparison function for use with qsort()/bsearch().
 * Note that result is for snapshots in *descending* order.
 */
static int snapid_compare_reverse(const void *s1, const void *s2)
{
	u64 snap_id1 = *(u64 *)s1;
	u64 snap_id2 = *(u64 *)s2;

	if (snap_id1 < snap_id2)
		return 1;
	return snap_id1 == snap_id2 ? 0 : -1;
}

/*
 * Search a snapshot context to see if the given snapshot id is
 * present.
 *
 * Returns the position of the snapshot id in the array if it's found,
 * or BAD_SNAP_INDEX otherwise.
 *
 * Note: The snapshot array is in kept sorted (by the osd) in
 * reverse order, highest snapshot id first.
 */
1069 1070 1071
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1072
	u64 *found;
1073

1074 1075
	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
				sizeof (snap_id), snapid_compare_reverse);
1076

1077
	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1078 1079
}

1080 1081
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
1082
{
1083
	u32 which;
1084
	const char *snap_name;
1085

1086 1087
	which = rbd_dev_snap_index(rbd_dev, snap_id);
	if (which == BAD_SNAP_INDEX)
1088
		return ERR_PTR(-ENOENT);
1089

1090 1091
	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1092 1093 1094 1095
}

static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
1096 1097 1098
	if (snap_id == CEPH_NOSNAP)
		return RBD_SNAP_HEAD_NAME;

1099 1100 1101
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1102

1103
	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1104 1105
}

1106 1107
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u64 *snap_size)
1108
{
1109 1110 1111 1112 1113
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_size = rbd_dev->header.image_size;
	} else if (rbd_dev->image_format == 1) {
		u32 which;
1114

1115 1116 1117
		which = rbd_dev_snap_index(rbd_dev, snap_id);
		if (which == BAD_SNAP_INDEX)
			return -ENOENT;
1118

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		*snap_size = rbd_dev->header.snap_sizes[which];
	} else {
		u64 size = 0;
		int ret;

		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
		if (ret)
			return ret;

		*snap_size = size;
	}
	return 0;
1131 1132
}

1133 1134
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
			u64 *snap_features)
1135
{
1136 1137 1138 1139 1140
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_features = rbd_dev->header.features;
	} else if (rbd_dev->image_format == 1) {
		*snap_features = 0;	/* No features for format 1 */
1141
	} else {
1142 1143
		u64 features = 0;
		int ret;
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
		if (ret)
			return ret;

		*snap_features = features;
	}
	return 0;
}

static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
1156
	u64 snap_id = rbd_dev->spec->snap_id;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	u64 size = 0;
	u64 features = 0;
	int ret;

	ret = rbd_snap_size(rbd_dev, snap_id, &size);
	if (ret)
		return ret;
	ret = rbd_snap_features(rbd_dev, snap_id, &features);
	if (ret)
		return ret;

	rbd_dev->mapping.size = size;
	rbd_dev->mapping.features = features;

1171
	return 0;
1172 1173
}

A
Alex Elder 已提交
1174 1175 1176 1177
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
	rbd_dev->mapping.size = 0;
	rbd_dev->mapping.features = 0;
1178 1179
}

1180 1181 1182 1183 1184 1185 1186
static void rbd_segment_name_free(const char *name)
{
	/* The explicit cast here is needed to drop the const qualifier */

	kmem_cache_free(rbd_segment_name_cache, (void *)name);
}

A
Alex Elder 已提交
1187
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1188
{
A
Alex Elder 已提交
1189 1190 1191
	char *name;
	u64 segment;
	int ret;
1192
	char *name_format;
1193

1194
	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
A
Alex Elder 已提交
1195 1196 1197
	if (!name)
		return NULL;
	segment = offset >> rbd_dev->header.obj_order;
1198 1199 1200
	name_format = "%s.%012llx";
	if (rbd_dev->image_format == 2)
		name_format = "%s.%016llx";
1201
	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
A
Alex Elder 已提交
1202
			rbd_dev->header.object_prefix, segment);
1203
	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
A
Alex Elder 已提交
1204 1205
		pr_err("error formatting segment name for #%llu (%d)\n",
			segment, ret);
1206
		rbd_segment_name_free(name);
A
Alex Elder 已提交
1207 1208
		name = NULL;
	}
1209

A
Alex Elder 已提交
1210 1211
	return name;
}
1212

A
Alex Elder 已提交
1213 1214 1215
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1216

A
Alex Elder 已提交
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
	return offset & (segment_size - 1);
}

static u64 rbd_segment_length(struct rbd_device *rbd_dev,
				u64 offset, u64 length)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;

	offset &= segment_size - 1;

A
Alex Elder 已提交
1227
	rbd_assert(length <= U64_MAX - offset);
A
Alex Elder 已提交
1228 1229 1230 1231
	if (offset + length > segment_size)
		length = segment_size - offset;

	return length;
1232 1233
}

1234 1235 1236 1237 1238 1239 1240 1241
/*
 * returns the size of an object in the image
 */
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
	return 1 << header->obj_order;
}

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
/*
 * bio helpers
 */

static void bio_chain_put(struct bio *chain)
{
	struct bio *tmp;

	while (chain) {
		tmp = chain;
		chain = chain->bi_next;
		bio_put(tmp);
	}
}

/*
 * zeros a bio chain, starting at specific offset
 */
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
1262 1263
	struct bio_vec bv;
	struct bvec_iter iter;
1264 1265 1266 1267 1268
	unsigned long flags;
	void *buf;
	int pos = 0;

	while (chain) {
1269 1270
		bio_for_each_segment(bv, chain, iter) {
			if (pos + bv.bv_len > start_ofs) {
1271
				int remainder = max(start_ofs - pos, 0);
1272
				buf = bvec_kmap_irq(&bv, &flags);
1273
				memset(buf + remainder, 0,
1274 1275
				       bv.bv_len - remainder);
				flush_dcache_page(bv.bv_page);
1276
				bvec_kunmap_irq(buf, &flags);
1277
			}
1278
			pos += bv.bv_len;
1279 1280 1281 1282 1283 1284
		}

		chain = chain->bi_next;
	}
}

A
Alex Elder 已提交
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * similar to zero_bio_chain(), zeros data defined by a page array,
 * starting at the given byte offset from the start of the array and
 * continuing up to the given end offset.  The pages array is
 * assumed to be big enough to hold all bytes up to the end.
 */
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
	struct page **page = &pages[offset >> PAGE_SHIFT];

	rbd_assert(end > offset);
	rbd_assert(end - offset <= (u64)SIZE_MAX);
	while (offset < end) {
		size_t page_offset;
		size_t length;
		unsigned long flags;
		void *kaddr;

1303 1304
		page_offset = offset & ~PAGE_MASK;
		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
A
Alex Elder 已提交
1305 1306 1307
		local_irq_save(flags);
		kaddr = kmap_atomic(*page);
		memset(kaddr + page_offset, 0, length);
1308
		flush_dcache_page(*page);
A
Alex Elder 已提交
1309 1310 1311 1312 1313 1314 1315 1316
		kunmap_atomic(kaddr);
		local_irq_restore(flags);

		offset += length;
		page++;
	}
}

1317
/*
A
Alex Elder 已提交
1318 1319
 * Clone a portion of a bio, starting at the given byte offset
 * and continuing for the number of bytes indicated.
1320
 */
A
Alex Elder 已提交
1321 1322 1323 1324
static struct bio *bio_clone_range(struct bio *bio_src,
					unsigned int offset,
					unsigned int len,
					gfp_t gfpmask)
1325
{
A
Alex Elder 已提交
1326 1327
	struct bio *bio;

K
Kent Overstreet 已提交
1328
	bio = bio_clone(bio_src, gfpmask);
A
Alex Elder 已提交
1329 1330
	if (!bio)
		return NULL;	/* ENOMEM */
1331

K
Kent Overstreet 已提交
1332
	bio_advance(bio, offset);
1333
	bio->bi_iter.bi_size = len;
A
Alex Elder 已提交
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363

	return bio;
}

/*
 * Clone a portion of a bio chain, starting at the given byte offset
 * into the first bio in the source chain and continuing for the
 * number of bytes indicated.  The result is another bio chain of
 * exactly the given length, or a null pointer on error.
 *
 * The bio_src and offset parameters are both in-out.  On entry they
 * refer to the first source bio and the offset into that bio where
 * the start of data to be cloned is located.
 *
 * On return, bio_src is updated to refer to the bio in the source
 * chain that contains first un-cloned byte, and *offset will
 * contain the offset of that byte within that bio.
 */
static struct bio *bio_chain_clone_range(struct bio **bio_src,
					unsigned int *offset,
					unsigned int len,
					gfp_t gfpmask)
{
	struct bio *bi = *bio_src;
	unsigned int off = *offset;
	struct bio *chain = NULL;
	struct bio **end;

	/* Build up a chain of clone bios up to the limit */

1364
	if (!bi || off >= bi->bi_iter.bi_size || !len)
A
Alex Elder 已提交
1365
		return NULL;		/* Nothing to clone */
1366

A
Alex Elder 已提交
1367 1368 1369 1370 1371
	end = &chain;
	while (len) {
		unsigned int bi_size;
		struct bio *bio;

1372 1373
		if (!bi) {
			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
A
Alex Elder 已提交
1374
			goto out_err;	/* EINVAL; ran out of bio's */
1375
		}
1376
		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
A
Alex Elder 已提交
1377 1378 1379 1380 1381 1382
		bio = bio_clone_range(bi, off, bi_size, gfpmask);
		if (!bio)
			goto out_err;	/* ENOMEM */

		*end = bio;
		end = &bio->bi_next;
1383

A
Alex Elder 已提交
1384
		off += bi_size;
1385
		if (off == bi->bi_iter.bi_size) {
A
Alex Elder 已提交
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
			bi = bi->bi_next;
			off = 0;
		}
		len -= bi_size;
	}
	*bio_src = bi;
	*offset = off;

	return chain;
out_err:
	bio_chain_put(chain);
1397 1398 1399 1400

	return NULL;
}

1401 1402 1403 1404 1405
/*
 * The default/initial value for all object request flags is 0.  For
 * each flag, once its value is set to 1 it is never reset to 0
 * again.
 */
A
Alex Elder 已提交
1406
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1407
{
A
Alex Elder 已提交
1408
	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1409 1410
		struct rbd_device *rbd_dev;

A
Alex Elder 已提交
1411
		rbd_dev = obj_request->img_request->rbd_dev;
1412
		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1413 1414 1415 1416
			obj_request);
	}
}

A
Alex Elder 已提交
1417
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1418 1419
{
	smp_mb();
A
Alex Elder 已提交
1420
	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1421 1422
}

A
Alex Elder 已提交
1423
static void obj_request_done_set(struct rbd_obj_request *obj_request)
1424
{
A
Alex Elder 已提交
1425 1426
	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
		struct rbd_device *rbd_dev = NULL;
1427

A
Alex Elder 已提交
1428 1429
		if (obj_request_img_data_test(obj_request))
			rbd_dev = obj_request->img_request->rbd_dev;
1430
		rbd_warn(rbd_dev, "obj_request %p already marked done",
1431 1432 1433 1434
			obj_request);
	}
}

A
Alex Elder 已提交
1435
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1436 1437
{
	smp_mb();
A
Alex Elder 已提交
1438
	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1439 1440
}

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
/*
 * This sets the KNOWN flag after (possibly) setting the EXISTS
 * flag.  The latter is set based on the "exists" value provided.
 *
 * Note that for our purposes once an object exists it never goes
 * away again.  It's possible that the response from two existence
 * checks are separated by the creation of the target object, and
 * the first ("doesn't exist") response arrives *after* the second
 * ("does exist").  In that case we ignore the second one.
 */
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
				bool exists)
{
	if (exists)
		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
	smp_mb();
}

static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}

static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}

1472 1473 1474 1475 1476 1477 1478 1479
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;

	return obj_request->img_offset <
	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}

A
Alex Elder 已提交
1480 1481
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1482 1483
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1484 1485 1486 1487 1488 1489 1490
	kref_get(&obj_request->kref);
}

static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request != NULL);
A
Alex Elder 已提交
1491 1492
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1493 1494 1495
	kref_put(&obj_request->kref, rbd_obj_request_destroy);
}

1496 1497 1498 1499 1500 1501 1502
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
	dout("%s: img %p (was %d)\n", __func__, img_request,
	     atomic_read(&img_request->kref.refcount));
	kref_get(&img_request->kref);
}

1503 1504
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
A
Alex Elder 已提交
1505 1506 1507 1508
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
	rbd_assert(img_request != NULL);
A
Alex Elder 已提交
1509 1510
	dout("%s: img %p (was %d)\n", __func__, img_request,
		atomic_read(&img_request->kref.refcount));
1511 1512 1513 1514
	if (img_request_child_test(img_request))
		kref_put(&img_request->kref, rbd_parent_request_destroy);
	else
		kref_put(&img_request->kref, rbd_img_request_destroy);
A
Alex Elder 已提交
1515 1516 1517 1518 1519
}

static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
1520 1521
	rbd_assert(obj_request->img_request == NULL);

1522
	/* Image request now owns object's original reference */
A
Alex Elder 已提交
1523
	obj_request->img_request = img_request;
1524
	obj_request->which = img_request->obj_request_count;
1525 1526
	rbd_assert(!obj_request_img_data_test(obj_request));
	obj_request_img_data_set(obj_request);
A
Alex Elder 已提交
1527
	rbd_assert(obj_request->which != BAD_WHICH);
1528 1529
	img_request->obj_request_count++;
	list_add_tail(&obj_request->links, &img_request->obj_requests);
A
Alex Elder 已提交
1530 1531
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1532 1533 1534 1535 1536 1537
}

static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request->which != BAD_WHICH);
1538

A
Alex Elder 已提交
1539 1540
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1541
	list_del(&obj_request->links);
1542 1543 1544 1545
	rbd_assert(img_request->obj_request_count > 0);
	img_request->obj_request_count--;
	rbd_assert(obj_request->which == img_request->obj_request_count);
	obj_request->which = BAD_WHICH;
1546
	rbd_assert(obj_request_img_data_test(obj_request));
A
Alex Elder 已提交
1547 1548
	rbd_assert(obj_request->img_request == img_request);
	obj_request->img_request = NULL;
1549
	obj_request->callback = NULL;
A
Alex Elder 已提交
1550 1551 1552 1553 1554 1555
	rbd_obj_request_put(obj_request);
}

static bool obj_request_type_valid(enum obj_request_type type)
{
	switch (type) {
1556
	case OBJ_REQUEST_NODATA:
A
Alex Elder 已提交
1557
	case OBJ_REQUEST_BIO:
1558
	case OBJ_REQUEST_PAGES:
A
Alex Elder 已提交
1559 1560 1561 1562 1563 1564 1565 1566 1567
		return true;
	default:
		return false;
	}
}

static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
				struct rbd_obj_request *obj_request)
{
1568
	dout("%s %p\n", __func__, obj_request);
A
Alex Elder 已提交
1569 1570 1571
	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}

1572 1573 1574 1575 1576 1577 1578 1579 1580
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
{
	dout("%s %p\n", __func__, obj_request);
	ceph_osdc_cancel_request(obj_request->osd_req);
}

/*
 * Wait for an object request to complete.  If interrupted, cancel the
 * underlying osd request.
1581 1582
 *
 * @timeout: in jiffies, 0 means "wait forever"
1583
 */
1584 1585
static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
				  unsigned long timeout)
1586
{
1587
	long ret;
1588 1589

	dout("%s %p\n", __func__, obj_request);
1590 1591 1592 1593 1594 1595
	ret = wait_for_completion_interruptible_timeout(
					&obj_request->completion,
					ceph_timeout_jiffies(timeout));
	if (ret <= 0) {
		if (ret == 0)
			ret = -ETIMEDOUT;
1596
		rbd_obj_request_end(obj_request);
1597 1598
	} else {
		ret = 0;
1599 1600
	}

1601 1602 1603 1604 1605 1606 1607 1608 1609
	dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
	return ret;
}

static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
	return __rbd_obj_request_wait(obj_request, 0);
}

A
Alex Elder 已提交
1610 1611
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
1612

A
Alex Elder 已提交
1613
	dout("%s: img %p\n", __func__, img_request);
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629

	/*
	 * If no error occurred, compute the aggregate transfer
	 * count for the image request.  We could instead use
	 * atomic64_cmpxchg() to update it as each object request
	 * completes; not clear which way is better off hand.
	 */
	if (!img_request->result) {
		struct rbd_obj_request *obj_request;
		u64 xferred = 0;

		for_each_obj_request(img_request, obj_request)
			xferred += obj_request->xferred;
		img_request->xferred = xferred;
	}

A
Alex Elder 已提交
1630 1631 1632 1633 1634 1635
	if (img_request->callback)
		img_request->callback(img_request);
	else
		rbd_img_request_put(img_request);
}

A
Alex Elder 已提交
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
/*
 * The default/initial value for all image request flags is 0.  Each
 * is conditionally set to 1 at image request initialization time
 * and currently never change thereafter.
 */
static void img_request_write_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_WRITE, &img_request->flags);
	smp_mb();
}

static bool img_request_write_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}

1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
/*
 * Set the discard flag when the img_request is an discard request
 */
static void img_request_discard_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_DISCARD, &img_request->flags);
	smp_mb();
}

static bool img_request_discard_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
}

1668 1669 1670 1671 1672 1673
static void img_request_child_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1674 1675 1676 1677 1678 1679
static void img_request_child_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1680 1681 1682 1683 1684 1685
static bool img_request_child_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}

1686 1687 1688 1689 1690 1691
static void img_request_layered_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1692 1693 1694 1695 1696 1697
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1698 1699 1700 1701 1702 1703
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}

1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
static enum obj_operation_type
rbd_img_request_op_type(struct rbd_img_request *img_request)
{
	if (img_request_write_test(img_request))
		return OBJ_OP_WRITE;
	else if (img_request_discard_test(img_request))
		return OBJ_OP_DISCARD;
	else
		return OBJ_OP_READ;
}

1715 1716 1717
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1718 1719 1720
	u64 xferred = obj_request->xferred;
	u64 length = obj_request->length;

1721 1722
	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, obj_request->img_request, obj_request->result,
A
Alex Elder 已提交
1723
		xferred, length);
1724
	/*
1725 1726 1727 1728 1729 1730
	 * ENOENT means a hole in the image.  We zero-fill the entire
	 * length of the request.  A short read also implies zero-fill
	 * to the end of the request.  An error requires the whole
	 * length of the request to be reported finished with an error
	 * to the block layer.  In each case we update the xferred
	 * count to indicate the whole request was satisfied.
1731
	 */
A
Alex Elder 已提交
1732
	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1733
	if (obj_request->result == -ENOENT) {
A
Alex Elder 已提交
1734 1735 1736 1737
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, 0);
		else
			zero_pages(obj_request->pages, 0, length);
1738
		obj_request->result = 0;
A
Alex Elder 已提交
1739 1740 1741 1742 1743
	} else if (xferred < length && !obj_request->result) {
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, xferred);
		else
			zero_pages(obj_request->pages, xferred, length);
1744
	}
1745
	obj_request->xferred = length;
1746 1747 1748
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1749 1750
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1751 1752
	dout("%s: obj %p cb %p\n", __func__, obj_request,
		obj_request->callback);
A
Alex Elder 已提交
1753 1754
	if (obj_request->callback)
		obj_request->callback(obj_request);
1755 1756
	else
		complete_all(&obj_request->completion);
A
Alex Elder 已提交
1757 1758
}

1759
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1760
{
A
Alex Elder 已提交
1761
	struct rbd_img_request *img_request = NULL;
A
Alex Elder 已提交
1762
	struct rbd_device *rbd_dev = NULL;
A
Alex Elder 已提交
1763 1764 1765 1766 1767
	bool layered = false;

	if (obj_request_img_data_test(obj_request)) {
		img_request = obj_request->img_request;
		layered = img_request && img_request_layered_test(img_request);
A
Alex Elder 已提交
1768
		rbd_dev = img_request->rbd_dev;
A
Alex Elder 已提交
1769
	}
A
Alex Elder 已提交
1770 1771 1772 1773

	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, img_request, obj_request->result,
		obj_request->xferred, obj_request->length);
A
Alex Elder 已提交
1774 1775
	if (layered && obj_request->result == -ENOENT &&
			obj_request->img_offset < rbd_dev->parent_overlap)
A
Alex Elder 已提交
1776 1777
		rbd_img_parent_read(obj_request);
	else if (img_request)
1778 1779 1780
		rbd_img_obj_request_read_callback(obj_request);
	else
		obj_request_done_set(obj_request);
A
Alex Elder 已提交
1781 1782
}

1783
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1784
{
1785 1786 1787
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
A
Alex Elder 已提交
1788 1789
	 * There is no such thing as a successful short write.  Set
	 * it to our originally-requested length.
1790 1791
	 */
	obj_request->xferred = obj_request->length;
1792
	obj_request_done_set(obj_request);
A
Alex Elder 已提交
1793 1794
}

1795 1796 1797 1798 1799 1800 1801 1802 1803
static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
	 * There is no such thing as a successful short discard.  Set
	 * it to our originally-requested length.
	 */
	obj_request->xferred = obj_request->length;
1804 1805 1806
	/* discarding a non-existent object is not a problem */
	if (obj_request->result == -ENOENT)
		obj_request->result = 0;
1807 1808 1809
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1810 1811 1812 1813
/*
 * For a simple stat call there's nothing to do.  We'll do more if
 * this is part of a write sequence for a layered image.
 */
1814
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1815
{
A
Alex Elder 已提交
1816
	dout("%s: obj %p\n", __func__, obj_request);
A
Alex Elder 已提交
1817 1818 1819
	obj_request_done_set(obj_request);
}

I
Ilya Dryomov 已提交
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p\n", __func__, obj_request);

	if (obj_request_img_data_test(obj_request))
		rbd_osd_copyup_callback(obj_request);
	else
		obj_request_done_set(obj_request);
}

1830
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
A
Alex Elder 已提交
1831 1832 1833 1834
{
	struct rbd_obj_request *obj_request = osd_req->r_priv;
	u16 opcode;

1835
	dout("%s: osd_req %p\n", __func__, osd_req);
A
Alex Elder 已提交
1836
	rbd_assert(osd_req == obj_request->osd_req);
A
Alex Elder 已提交
1837 1838 1839 1840 1841 1842
	if (obj_request_img_data_test(obj_request)) {
		rbd_assert(obj_request->img_request);
		rbd_assert(obj_request->which != BAD_WHICH);
	} else {
		rbd_assert(obj_request->which == BAD_WHICH);
	}
A
Alex Elder 已提交
1843

1844 1845
	if (osd_req->r_result < 0)
		obj_request->result = osd_req->r_result;
A
Alex Elder 已提交
1846

1847 1848
	/*
	 * We support a 64-bit length, but ultimately it has to be
C
Christoph Hellwig 已提交
1849 1850
	 * passed to the block layer, which just supports a 32-bit
	 * length field.
1851
	 */
1852
	obj_request->xferred = osd_req->r_ops[0].outdata_len;
A
Alex Elder 已提交
1853
	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1854

1855
	opcode = osd_req->r_ops[0].op;
A
Alex Elder 已提交
1856 1857
	switch (opcode) {
	case CEPH_OSD_OP_READ:
1858
		rbd_osd_read_callback(obj_request);
A
Alex Elder 已提交
1859
		break;
1860
	case CEPH_OSD_OP_SETALLOCHINT:
1861 1862
		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1863
		/* fall through */
A
Alex Elder 已提交
1864
	case CEPH_OSD_OP_WRITE:
1865
	case CEPH_OSD_OP_WRITEFULL:
1866
		rbd_osd_write_callback(obj_request);
A
Alex Elder 已提交
1867
		break;
A
Alex Elder 已提交
1868
	case CEPH_OSD_OP_STAT:
1869
		rbd_osd_stat_callback(obj_request);
A
Alex Elder 已提交
1870
		break;
1871 1872 1873 1874 1875
	case CEPH_OSD_OP_DELETE:
	case CEPH_OSD_OP_TRUNCATE:
	case CEPH_OSD_OP_ZERO:
		rbd_osd_discard_callback(obj_request);
		break;
1876
	case CEPH_OSD_OP_CALL:
I
Ilya Dryomov 已提交
1877 1878
		rbd_osd_call_callback(obj_request);
		break;
A
Alex Elder 已提交
1879
	default:
1880
		rbd_warn(NULL, "%s: unsupported op %hu",
A
Alex Elder 已提交
1881 1882 1883 1884
			obj_request->object_name, (unsigned short) opcode);
		break;
	}

1885
	if (obj_request_done_test(obj_request))
A
Alex Elder 已提交
1886 1887 1888
		rbd_obj_request_complete(obj_request);
}

1889
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1890 1891
{
	struct rbd_img_request *img_request = obj_request->img_request;
1892
	struct ceph_osd_request *osd_req = obj_request->osd_req;
A
Alex Elder 已提交
1893

1894 1895
	if (img_request)
		osd_req->r_snapid = img_request->snap_id;
1896 1897 1898 1899 1900 1901
}

static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
	struct ceph_osd_request *osd_req = obj_request->osd_req;

1902 1903
	osd_req->r_mtime = CURRENT_TIME;
	osd_req->r_data_offset = obj_request->offset;
A
Alex Elder 已提交
1904 1905
}

1906 1907 1908 1909 1910 1911
/*
 * Create an osd request.  A read request has one osd op (read).
 * A write request has either one (watch) or two (hint+write) osd ops.
 * (All rbd data writes are prefixed with an allocation hint op, but
 * technically osd watch is a write request, hence this distinction.)
 */
A
Alex Elder 已提交
1912 1913
static struct ceph_osd_request *rbd_osd_req_create(
					struct rbd_device *rbd_dev,
G
Guangliang Zhao 已提交
1914
					enum obj_operation_type op_type,
1915
					unsigned int num_ops,
A
Alex Elder 已提交
1916
					struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1917 1918 1919 1920 1921
{
	struct ceph_snap_context *snapc = NULL;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

1922 1923
	if (obj_request_img_data_test(obj_request) &&
		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1924
		struct rbd_img_request *img_request = obj_request->img_request;
1925 1926 1927 1928 1929
		if (op_type == OBJ_OP_WRITE) {
			rbd_assert(img_request_write_test(img_request));
		} else {
			rbd_assert(img_request_discard_test(img_request));
		}
G
Guangliang Zhao 已提交
1930
		snapc = img_request->snapc;
A
Alex Elder 已提交
1931 1932
	}

G
Guangliang Zhao 已提交
1933
	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1934 1935

	/* Allocate and initialize the request, for the num_ops ops */
A
Alex Elder 已提交
1936 1937

	osdc = &rbd_dev->rbd_client->client->osdc;
1938
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1939
					  GFP_NOIO);
A
Alex Elder 已提交
1940
	if (!osd_req)
1941
		goto fail;
A
Alex Elder 已提交
1942

1943
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
A
Alex Elder 已提交
1944
		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
A
Alex Elder 已提交
1945
	else
A
Alex Elder 已提交
1946 1947 1948 1949 1950
		osd_req->r_flags = CEPH_OSD_FLAG_READ;

	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1951
	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1952 1953 1954
	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
			     obj_request->object_name))
		goto fail;
A
Alex Elder 已提交
1955

1956 1957 1958
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

A
Alex Elder 已提交
1959
	return osd_req;
1960 1961 1962 1963

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
A
Alex Elder 已提交
1964 1965
}

1966
/*
1967 1968 1969 1970
 * Create a copyup osd request based on the information in the object
 * request supplied.  A copyup request has two or three osd ops, a
 * copyup method call, potentially a hint op, and a write or truncate
 * or zero op.
1971 1972 1973 1974 1975 1976 1977 1978 1979
 */
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;
1980
	int num_osd_ops = 3;
1981 1982 1983 1984

	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);
1985 1986
	rbd_assert(img_request_write_test(img_request) ||
			img_request_discard_test(img_request));
1987

1988 1989 1990 1991
	if (img_request_discard_test(img_request))
		num_osd_ops = 2;

	/* Allocate and initialize the request, for all the ops */
1992 1993 1994 1995

	snapc = img_request->snapc;
	rbd_dev = img_request->rbd_dev;
	osdc = &rbd_dev->rbd_client->client->osdc;
1996
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1997
						false, GFP_NOIO);
1998
	if (!osd_req)
1999
		goto fail;
2000 2001 2002 2003 2004

	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

2005
	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2006 2007 2008
	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
			     obj_request->object_name))
		goto fail;
2009

2010 2011 2012
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

2013
	return osd_req;
2014 2015 2016 2017

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
2018 2019 2020
}


A
Alex Elder 已提交
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
	ceph_osdc_put_request(osd_req);
}

/* object_name is assumed to be a non-null pointer and NUL-terminated */

static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
						u64 offset, u64 length,
						enum obj_request_type type)
{
	struct rbd_obj_request *obj_request;
	size_t size;
	char *name;

	rbd_assert(obj_request_type_valid(type));

	size = strlen(object_name) + 1;
2039
	name = kmalloc(size, GFP_NOIO);
2040
	if (!name)
A
Alex Elder 已提交
2041 2042
		return NULL;

2043
	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2044 2045 2046 2047 2048
	if (!obj_request) {
		kfree(name);
		return NULL;
	}

A
Alex Elder 已提交
2049 2050 2051
	obj_request->object_name = memcpy(name, object_name, size);
	obj_request->offset = offset;
	obj_request->length = length;
2052
	obj_request->flags = 0;
A
Alex Elder 已提交
2053 2054 2055
	obj_request->which = BAD_WHICH;
	obj_request->type = type;
	INIT_LIST_HEAD(&obj_request->links);
2056
	init_completion(&obj_request->completion);
A
Alex Elder 已提交
2057 2058
	kref_init(&obj_request->kref);

A
Alex Elder 已提交
2059 2060 2061
	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
		offset, length, (int)type, obj_request);

A
Alex Elder 已提交
2062 2063 2064 2065 2066 2067 2068 2069 2070
	return obj_request;
}

static void rbd_obj_request_destroy(struct kref *kref)
{
	struct rbd_obj_request *obj_request;

	obj_request = container_of(kref, struct rbd_obj_request, kref);

A
Alex Elder 已提交
2071 2072
	dout("%s: obj %p\n", __func__, obj_request);

A
Alex Elder 已提交
2073 2074 2075 2076 2077 2078 2079 2080
	rbd_assert(obj_request->img_request == NULL);
	rbd_assert(obj_request->which == BAD_WHICH);

	if (obj_request->osd_req)
		rbd_osd_req_destroy(obj_request->osd_req);

	rbd_assert(obj_request_type_valid(obj_request->type));
	switch (obj_request->type) {
2081 2082
	case OBJ_REQUEST_NODATA:
		break;		/* Nothing to do */
A
Alex Elder 已提交
2083 2084 2085 2086
	case OBJ_REQUEST_BIO:
		if (obj_request->bio_list)
			bio_chain_put(obj_request->bio_list);
		break;
2087 2088 2089 2090 2091
	case OBJ_REQUEST_PAGES:
		if (obj_request->pages)
			ceph_release_page_vector(obj_request->pages,
						obj_request->page_count);
		break;
A
Alex Elder 已提交
2092 2093
	}

2094
	kfree(obj_request->object_name);
2095 2096
	obj_request->object_name = NULL;
	kmem_cache_free(rbd_obj_request_cache, obj_request);
A
Alex Elder 已提交
2097 2098
}

A
Alex Elder 已提交
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
/* It's OK to call this for a device with no parent */

static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
	rbd_dev_remove_parent(rbd_dev);
	rbd_spec_put(rbd_dev->parent_spec);
	rbd_dev->parent_spec = NULL;
	rbd_dev->parent_overlap = 0;
}

2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
/*
 * Parent image reference counting is used to determine when an
 * image's parent fields can be safely torn down--after there are no
 * more in-flight requests to the parent image.  When the last
 * reference is dropped, cleaning them up is safe.
 */
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return;

	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
	if (counter > 0)
		return;

	/* Last reference; clean up parent data structures */

	if (!counter)
		rbd_dev_unparent(rbd_dev);
	else
2132
		rbd_warn(rbd_dev, "parent reference underflow");
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
}

/*
 * If an image has a non-zero parent overlap, get a reference to its
 * parent.
 *
 * Returns true if the rbd device has a parent with a non-zero
 * overlap and a reference for it was successfully taken, or
 * false otherwise.
 */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
2145
	int counter = 0;
2146 2147 2148 2149

	if (!rbd_dev->parent_spec)
		return false;

2150 2151 2152 2153
	down_read(&rbd_dev->header_rwsem);
	if (rbd_dev->parent_overlap)
		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
	up_read(&rbd_dev->header_rwsem);
2154 2155

	if (counter < 0)
2156
		rbd_warn(rbd_dev, "parent reference overflow");
2157

2158
	return counter > 0;
2159 2160
}

A
Alex Elder 已提交
2161 2162 2163 2164 2165
/*
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 */
A
Alex Elder 已提交
2166 2167
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
A
Alex Elder 已提交
2168
					u64 offset, u64 length,
G
Guangliang Zhao 已提交
2169
					enum obj_operation_type op_type,
2170
					struct ceph_snap_context *snapc)
A
Alex Elder 已提交
2171 2172 2173
{
	struct rbd_img_request *img_request;

2174
	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
A
Alex Elder 已提交
2175 2176 2177 2178 2179 2180 2181
	if (!img_request)
		return NULL;

	img_request->rq = NULL;
	img_request->rbd_dev = rbd_dev;
	img_request->offset = offset;
	img_request->length = length;
A
Alex Elder 已提交
2182
	img_request->flags = 0;
2183 2184 2185 2186
	if (op_type == OBJ_OP_DISCARD) {
		img_request_discard_set(img_request);
		img_request->snapc = snapc;
	} else if (op_type == OBJ_OP_WRITE) {
A
Alex Elder 已提交
2187
		img_request_write_set(img_request);
2188
		img_request->snapc = snapc;
A
Alex Elder 已提交
2189
	} else {
A
Alex Elder 已提交
2190
		img_request->snap_id = rbd_dev->spec->snap_id;
A
Alex Elder 已提交
2191
	}
2192
	if (rbd_dev_parent_get(rbd_dev))
2193
		img_request_layered_set(img_request);
A
Alex Elder 已提交
2194 2195 2196
	spin_lock_init(&img_request->completion_lock);
	img_request->next_completion = 0;
	img_request->callback = NULL;
2197
	img_request->result = 0;
A
Alex Elder 已提交
2198 2199 2200 2201
	img_request->obj_request_count = 0;
	INIT_LIST_HEAD(&img_request->obj_requests);
	kref_init(&img_request->kref);

A
Alex Elder 已提交
2202
	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
G
Guangliang Zhao 已提交
2203
		obj_op_name(op_type), offset, length, img_request);
A
Alex Elder 已提交
2204

A
Alex Elder 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
	return img_request;
}

static void rbd_img_request_destroy(struct kref *kref)
{
	struct rbd_img_request *img_request;
	struct rbd_obj_request *obj_request;
	struct rbd_obj_request *next_obj_request;

	img_request = container_of(kref, struct rbd_img_request, kref);

A
Alex Elder 已提交
2216 2217
	dout("%s: img %p\n", __func__, img_request);

A
Alex Elder 已提交
2218 2219
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
		rbd_img_obj_request_del(img_request, obj_request);
2220
	rbd_assert(img_request->obj_request_count == 0);
A
Alex Elder 已提交
2221

2222 2223 2224 2225 2226
	if (img_request_layered_test(img_request)) {
		img_request_layered_clear(img_request);
		rbd_dev_parent_put(img_request->rbd_dev);
	}

2227 2228
	if (img_request_write_test(img_request) ||
		img_request_discard_test(img_request))
2229
		ceph_put_snap_context(img_request->snapc);
A
Alex Elder 已提交
2230

2231
	kmem_cache_free(rbd_img_request_cache, img_request);
A
Alex Elder 已提交
2232 2233
}

2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
static struct rbd_img_request *rbd_parent_request_create(
					struct rbd_obj_request *obj_request,
					u64 img_offset, u64 length)
{
	struct rbd_img_request *parent_request;
	struct rbd_device *rbd_dev;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;

2244
	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
G
Guangliang Zhao 已提交
2245
						length, OBJ_OP_READ, NULL);
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	if (!parent_request)
		return NULL;

	img_request_child_set(parent_request);
	rbd_obj_request_get(obj_request);
	parent_request->obj_request = obj_request;

	return parent_request;
}

static void rbd_parent_request_destroy(struct kref *kref)
{
	struct rbd_img_request *parent_request;
	struct rbd_obj_request *orig_request;

	parent_request = container_of(kref, struct rbd_img_request, kref);
	orig_request = parent_request->obj_request;

	parent_request->obj_request = NULL;
	rbd_obj_request_put(orig_request);
	img_request_child_clear(parent_request);

	rbd_img_request_destroy(kref);
}

2271 2272
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
2273
	struct rbd_img_request *img_request;
2274 2275
	unsigned int xferred;
	int result;
A
Alex Elder 已提交
2276
	bool more;
2277

2278 2279 2280
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;

2281 2282 2283 2284 2285
	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
	xferred = (unsigned int)obj_request->xferred;
	result = obj_request->result;
	if (result) {
		struct rbd_device *rbd_dev = img_request->rbd_dev;
G
Guangliang Zhao 已提交
2286 2287
		enum obj_operation_type op_type;

2288 2289 2290 2291 2292 2293
		if (img_request_discard_test(img_request))
			op_type = OBJ_OP_DISCARD;
		else if (img_request_write_test(img_request))
			op_type = OBJ_OP_WRITE;
		else
			op_type = OBJ_OP_READ;
2294

2295
		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
G
Guangliang Zhao 已提交
2296 2297
			obj_op_name(op_type), obj_request->length,
			obj_request->img_offset, obj_request->offset);
2298
		rbd_warn(rbd_dev, "  result %d xferred %x",
2299 2300 2301
			result, xferred);
		if (!img_request->result)
			img_request->result = result;
2302 2303 2304 2305 2306
		/*
		 * Need to end I/O on the entire obj_request worth of
		 * bytes in case of error.
		 */
		xferred = obj_request->length;
2307 2308
	}

2309 2310 2311 2312 2313 2314 2315
	/* Image object requests don't own their page array */

	if (obj_request->type == OBJ_REQUEST_PAGES) {
		obj_request->pages = NULL;
		obj_request->page_count = 0;
	}

A
Alex Elder 已提交
2316 2317 2318 2319 2320
	if (img_request_child_test(img_request)) {
		rbd_assert(img_request->obj_request != NULL);
		more = obj_request->which < img_request->obj_request_count - 1;
	} else {
		rbd_assert(img_request->rq != NULL);
C
Christoph Hellwig 已提交
2321 2322 2323 2324

		more = blk_update_request(img_request->rq, result, xferred);
		if (!more)
			__blk_mq_end_request(img_request->rq, result);
A
Alex Elder 已提交
2325 2326 2327
	}

	return more;
2328 2329
}

2330 2331 2332 2333 2334 2335
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	u32 which = obj_request->which;
	bool more = true;

2336
	rbd_assert(obj_request_img_data_test(obj_request));
2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	img_request = obj_request->img_request;

	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
	rbd_assert(img_request != NULL);
	rbd_assert(img_request->obj_request_count > 0);
	rbd_assert(which != BAD_WHICH);
	rbd_assert(which < img_request->obj_request_count);

	spin_lock_irq(&img_request->completion_lock);
	if (which != img_request->next_completion)
		goto out;

	for_each_obj_request_from(img_request, obj_request) {
		rbd_assert(more);
		rbd_assert(which < img_request->obj_request_count);

		if (!obj_request_done_test(obj_request))
			break;
2355
		more = rbd_img_obj_end_request(obj_request);
2356 2357 2358 2359 2360 2361 2362
		which++;
	}

	rbd_assert(more ^ (which == img_request->obj_request_count));
	img_request->next_completion = which;
out:
	spin_unlock_irq(&img_request->completion_lock);
2363
	rbd_img_request_put(img_request);
2364 2365 2366 2367 2368

	if (!more)
		rbd_img_request_complete(img_request);
}

2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
/*
 * Add individual osd ops to the given ceph_osd_request and prepare
 * them for submission. num_ops is the current number of
 * osd operations already to the object request.
 */
static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
				struct ceph_osd_request *osd_request,
				enum obj_operation_type op_type,
				unsigned int num_ops)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
	u64 offset = obj_request->offset;
	u64 length = obj_request->length;
	u64 img_end;
	u16 opcode;

	if (op_type == OBJ_OP_DISCARD) {
2388 2389 2390
		if (!offset && length == object_size &&
		    (!img_request_layered_test(img_request) ||
		     !obj_request_overlaps_parent(obj_request))) {
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
			opcode = CEPH_OSD_OP_DELETE;
		} else if ((offset + length == object_size)) {
			opcode = CEPH_OSD_OP_TRUNCATE;
		} else {
			down_read(&rbd_dev->header_rwsem);
			img_end = rbd_dev->header.image_size;
			up_read(&rbd_dev->header_rwsem);

			if (obj_request->img_offset + length == img_end)
				opcode = CEPH_OSD_OP_TRUNCATE;
			else
				opcode = CEPH_OSD_OP_ZERO;
		}
	} else if (op_type == OBJ_OP_WRITE) {
2405 2406 2407 2408
		if (!offset && length == object_size)
			opcode = CEPH_OSD_OP_WRITEFULL;
		else
			opcode = CEPH_OSD_OP_WRITE;
2409 2410 2411 2412 2413 2414 2415
		osd_req_op_alloc_hint_init(osd_request, num_ops,
					object_size, object_size);
		num_ops++;
	} else {
		opcode = CEPH_OSD_OP_READ;
	}

2416
	if (opcode == CEPH_OSD_OP_DELETE)
2417
		osd_req_op_init(osd_request, num_ops, opcode, 0);
2418 2419 2420 2421
	else
		osd_req_op_extent_init(osd_request, num_ops, opcode,
				       offset, length, 0, 0);

2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
	if (obj_request->type == OBJ_REQUEST_BIO)
		osd_req_op_extent_osd_data_bio(osd_request, num_ops,
					obj_request->bio_list, length);
	else if (obj_request->type == OBJ_REQUEST_PAGES)
		osd_req_op_extent_osd_data_pages(osd_request, num_ops,
					obj_request->pages, length,
					offset & ~PAGE_MASK, false, false);

	/* Discards are also writes */
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
		rbd_osd_req_format_write(obj_request);
	else
		rbd_osd_req_format_read(obj_request);
}

2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
/*
 * Split up an image request into one or more object requests, each
 * to a different object.  The "type" parameter indicates whether
 * "data_desc" is the pointer to the head of a list of bio
 * structures, or the base of a page array.  In either case this
 * function assumes data_desc describes memory sufficient to hold
 * all data described by the image request.
 */
static int rbd_img_request_fill(struct rbd_img_request *img_request,
					enum obj_request_type type,
					void *data_desc)
A
Alex Elder 已提交
2448 2449 2450 2451
{
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	struct rbd_obj_request *obj_request = NULL;
	struct rbd_obj_request *next_obj_request;
J
Jingoo Han 已提交
2452
	struct bio *bio_list = NULL;
2453
	unsigned int bio_offset = 0;
J
Jingoo Han 已提交
2454
	struct page **pages = NULL;
G
Guangliang Zhao 已提交
2455
	enum obj_operation_type op_type;
2456
	u64 img_offset;
A
Alex Elder 已提交
2457 2458
	u64 resid;

2459 2460
	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
		(int)type, data_desc);
A
Alex Elder 已提交
2461

2462
	img_offset = img_request->offset;
A
Alex Elder 已提交
2463
	resid = img_request->length;
A
Alex Elder 已提交
2464
	rbd_assert(resid > 0);
2465
	op_type = rbd_img_request_op_type(img_request);
2466 2467 2468

	if (type == OBJ_REQUEST_BIO) {
		bio_list = data_desc;
2469 2470
		rbd_assert(img_offset ==
			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2471
	} else if (type == OBJ_REQUEST_PAGES) {
2472 2473 2474
		pages = data_desc;
	}

A
Alex Elder 已提交
2475
	while (resid) {
2476
		struct ceph_osd_request *osd_req;
A
Alex Elder 已提交
2477 2478 2479 2480
		const char *object_name;
		u64 offset;
		u64 length;

2481
		object_name = rbd_segment_name(rbd_dev, img_offset);
A
Alex Elder 已提交
2482 2483
		if (!object_name)
			goto out_unwind;
2484 2485
		offset = rbd_segment_offset(rbd_dev, img_offset);
		length = rbd_segment_length(rbd_dev, img_offset, resid);
A
Alex Elder 已提交
2486
		obj_request = rbd_obj_request_create(object_name,
2487
						offset, length, type);
2488 2489
		/* object request has its own copy of the object name */
		rbd_segment_name_free(object_name);
A
Alex Elder 已提交
2490 2491
		if (!obj_request)
			goto out_unwind;
2492

2493 2494 2495 2496 2497
		/*
		 * set obj_request->img_request before creating the
		 * osd_request so that it gets the right snapc
		 */
		rbd_img_obj_request_add(img_request, obj_request);
A
Alex Elder 已提交
2498

2499 2500 2501 2502 2503 2504 2505 2506 2507
		if (type == OBJ_REQUEST_BIO) {
			unsigned int clone_size;

			rbd_assert(length <= (u64)UINT_MAX);
			clone_size = (unsigned int)length;
			obj_request->bio_list =
					bio_chain_clone_range(&bio_list,
								&bio_offset,
								clone_size,
2508
								GFP_NOIO);
2509
			if (!obj_request->bio_list)
2510
				goto out_unwind;
2511
		} else if (type == OBJ_REQUEST_PAGES) {
2512 2513 2514 2515 2516 2517 2518 2519 2520
			unsigned int page_count;

			obj_request->pages = pages;
			page_count = (u32)calc_pages_for(offset, length);
			obj_request->page_count = page_count;
			if ((offset + length) & ~PAGE_MASK)
				page_count--;	/* more on last page */
			pages += page_count;
		}
A
Alex Elder 已提交
2521

G
Guangliang Zhao 已提交
2522 2523 2524
		osd_req = rbd_osd_req_create(rbd_dev, op_type,
					(op_type == OBJ_OP_WRITE) ? 2 : 1,
					obj_request);
2525
		if (!osd_req)
2526
			goto out_unwind;
2527

2528
		obj_request->osd_req = osd_req;
2529
		obj_request->callback = rbd_img_obj_callback;
2530
		obj_request->img_offset = img_offset;
2531

2532
		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
A
Alex Elder 已提交
2533

2534
		rbd_img_request_get(img_request);
A
Alex Elder 已提交
2535

2536
		img_offset += length;
A
Alex Elder 已提交
2537 2538 2539 2540 2541 2542 2543
		resid -= length;
	}

	return 0;

out_unwind:
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2544
		rbd_img_obj_request_del(img_request, obj_request);
A
Alex Elder 已提交
2545 2546 2547 2548

	return -ENOMEM;
}

2549
static void
I
Ilya Dryomov 已提交
2550
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2551 2552 2553
{
	struct rbd_img_request *img_request;
	struct rbd_device *rbd_dev;
2554
	struct page **pages;
2555 2556
	u32 page_count;

I
Ilya Dryomov 已提交
2557 2558
	dout("%s: obj %p\n", __func__, obj_request);

2559 2560
	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
		obj_request->type == OBJ_REQUEST_NODATA);
2561 2562 2563 2564 2565 2566 2567
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);

	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev);

2568 2569
	pages = obj_request->copyup_pages;
	rbd_assert(pages != NULL);
2570
	obj_request->copyup_pages = NULL;
2571 2572 2573 2574
	page_count = obj_request->copyup_page_count;
	rbd_assert(page_count);
	obj_request->copyup_page_count = 0;
	ceph_release_page_vector(pages, page_count);
2575 2576 2577 2578 2579 2580 2581 2582 2583 2584

	/*
	 * We want the transfer count to reflect the size of the
	 * original write request.  There is no such thing as a
	 * successful short write, so if the request was successful
	 * we can just set it to the originally-requested length.
	 */
	if (!obj_request->result)
		obj_request->xferred = obj_request->length;

I
Ilya Dryomov 已提交
2585
	obj_request_done_set(obj_request);
2586 2587
}

2588 2589 2590 2591
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *orig_request;
2592 2593 2594
	struct ceph_osd_request *osd_req;
	struct ceph_osd_client *osdc;
	struct rbd_device *rbd_dev;
2595
	struct page **pages;
2596
	enum obj_operation_type op_type;
2597
	u32 page_count;
2598
	int img_result;
2599
	u64 parent_length;
2600 2601 2602 2603 2604 2605 2606 2607

	rbd_assert(img_request_child_test(img_request));

	/* First get what we need from the image request */

	pages = img_request->copyup_pages;
	rbd_assert(pages != NULL);
	img_request->copyup_pages = NULL;
2608 2609 2610
	page_count = img_request->copyup_page_count;
	rbd_assert(page_count);
	img_request->copyup_page_count = 0;
2611 2612 2613

	orig_request = img_request->obj_request;
	rbd_assert(orig_request != NULL);
2614
	rbd_assert(obj_request_type_valid(orig_request->type));
2615
	img_result = img_request->result;
2616 2617
	parent_length = img_request->length;
	rbd_assert(parent_length == img_request->xferred);
2618
	rbd_img_request_put(img_request);
2619

2620 2621
	rbd_assert(orig_request->img_request);
	rbd_dev = orig_request->img_request->rbd_dev;
2622 2623
	rbd_assert(rbd_dev);

2624 2625 2626 2627 2628 2629 2630
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;
2631

2632 2633 2634 2635 2636 2637
		ceph_release_page_vector(pages, page_count);
		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, orig_request);
		if (!img_result)
			return;
	}
2638

2639
	if (img_result)
2640 2641
		goto out_err;

2642 2643
	/*
	 * The original osd request is of no use to use any more.
2644
	 * We need a new one that can hold the three ops in a copyup
2645 2646 2647
	 * request.  Allocate the new copyup osd request for the
	 * original request, and release the old one.
	 */
2648
	img_result = -ENOMEM;
2649 2650 2651
	osd_req = rbd_osd_req_create_copyup(orig_request);
	if (!osd_req)
		goto out_err;
2652
	rbd_osd_req_destroy(orig_request->osd_req);
2653 2654
	orig_request->osd_req = osd_req;
	orig_request->copyup_pages = pages;
2655
	orig_request->copyup_page_count = page_count;
2656

2657
	/* Initialize the copyup op */
2658

2659
	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2660
	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2661
						false, false);
2662

2663
	/* Add the other op(s) */
2664

2665 2666
	op_type = rbd_img_request_op_type(orig_request->img_request);
	rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2667 2668 2669 2670

	/* All set, send it off. */

	osdc = &rbd_dev->rbd_client->client->osdc;
2671 2672
	img_result = rbd_obj_request_submit(osdc, orig_request);
	if (!img_result)
2673 2674 2675 2676
		return;
out_err:
	/* Record the error code and complete the request */

2677
	orig_request->result = img_result;
2678 2679 2680
	orig_request->xferred = 0;
	obj_request_done_set(orig_request);
	rbd_obj_request_complete(orig_request);
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
}

/*
 * Read from the parent image the range of data that covers the
 * entire target of the given object request.  This is used for
 * satisfying a layered image write request when the target of an
 * object request from the image request does not exist.
 *
 * A page array big enough to hold the returned data is allocated
 * and supplied to rbd_img_request_fill() as the "data descriptor."
 * When the read completes, this page array will be transferred to
 * the original object request for the copyup operation.
 *
 * If an error occurs, record it as the result of the original
 * object request and mark it done so it gets completed.
 */
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = NULL;
	struct rbd_img_request *parent_request = NULL;
	struct rbd_device *rbd_dev;
	u64 img_offset;
	u64 length;
	struct page **pages = NULL;
	u32 page_count;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
2709
	rbd_assert(obj_request_type_valid(obj_request->type));
2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722

	img_request = obj_request->img_request;
	rbd_assert(img_request != NULL);
	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev->parent != NULL);

	/*
	 * Determine the byte range covered by the object in the
	 * child image to which the original request was to be sent.
	 */
	img_offset = obj_request->img_offset - obj_request->offset;
	length = (u64)1 << rbd_dev->header.obj_order;

A
Alex Elder 已提交
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
	/*
	 * There is no defined parent data beyond the parent
	 * overlap, so limit what we read at that boundary if
	 * necessary.
	 */
	if (img_offset + length > rbd_dev->parent_overlap) {
		rbd_assert(img_offset < rbd_dev->parent_overlap);
		length = rbd_dev->parent_overlap - img_offset;
	}

2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
	/*
	 * Allocate a page array big enough to receive the data read
	 * from the parent.
	 */
	page_count = (u32)calc_pages_for(0, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages)) {
		result = PTR_ERR(pages);
		pages = NULL;
		goto out_err;
	}

	result = -ENOMEM;
2746 2747
	parent_request = rbd_parent_request_create(obj_request,
						img_offset, length);
2748 2749 2750 2751 2752 2753 2754
	if (!parent_request)
		goto out_err;

	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
	if (result)
		goto out_err;
	parent_request->copyup_pages = pages;
2755
	parent_request->copyup_page_count = page_count;
2756 2757 2758 2759 2760 2761 2762

	parent_request->callback = rbd_img_obj_parent_read_full_callback;
	result = rbd_img_request_submit(parent_request);
	if (!result)
		return 0;

	parent_request->copyup_pages = NULL;
2763
	parent_request->copyup_page_count = 0;
2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777
	parent_request->obj_request = NULL;
	rbd_obj_request_put(obj_request);
out_err:
	if (pages)
		ceph_release_page_vector(pages, page_count);
	if (parent_request)
		rbd_img_request_put(parent_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);

	return result;
}

2778 2779 2780
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *orig_request;
2781
	struct rbd_device *rbd_dev;
2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
	int result;

	rbd_assert(!obj_request_img_data_test(obj_request));

	/*
	 * All we need from the object request is the original
	 * request and the result of the STAT op.  Grab those, then
	 * we're done with the request.
	 */
	orig_request = obj_request->obj_request;
	obj_request->obj_request = NULL;
2793
	rbd_obj_request_put(orig_request);
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
	rbd_assert(orig_request);
	rbd_assert(orig_request->img_request);

	result = obj_request->result;
	obj_request->result = 0;

	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
		obj_request, orig_request, result,
		obj_request->xferred, obj_request->length);
	rbd_obj_request_put(obj_request);

2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	rbd_dev = orig_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		result = rbd_obj_request_submit(osdc, orig_request);
		if (!result)
			return;
	}
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831

	/*
	 * Our only purpose here is to determine whether the object
	 * exists, and we don't want to treat the non-existence as
	 * an error.  If something else comes back, transfer the
	 * error to the original request and complete it now.
	 */
	if (!result) {
		obj_request_existence_set(orig_request, true);
	} else if (result == -ENOENT) {
		obj_request_existence_set(orig_request, false);
	} else if (result) {
		orig_request->result = result;
2832
		goto out;
2833 2834 2835 2836 2837 2838
	}

	/*
	 * Resubmit the original request now that we have recorded
	 * whether the target object exists.
	 */
2839
	orig_request->result = rbd_img_obj_request_submit(orig_request);
2840
out:
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
	if (orig_request->result)
		rbd_obj_request_complete(orig_request);
}

static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *stat_request;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct page **pages = NULL;
	u32 page_count;
	size_t size;
	int ret;

	/*
	 * The response data for a STAT call consists of:
	 *     le64 length;
	 *     struct {
	 *         le32 tv_sec;
	 *         le32 tv_nsec;
	 *     } mtime;
	 */
	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
	page_count = (u32)calc_pages_for(0, size);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
							OBJ_REQUEST_PAGES);
	if (!stat_request)
		goto out;

	rbd_obj_request_get(obj_request);
	stat_request->obj_request = obj_request;
	stat_request->pages = pages;
	stat_request->page_count = page_count;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;
G
Guangliang Zhao 已提交
2882
	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2883
						   stat_request);
2884 2885 2886 2887
	if (!stat_request->osd_req)
		goto out;
	stat_request->callback = rbd_img_obj_exists_callback;

2888
	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2889 2890
	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
					false, false);
2891
	rbd_osd_req_format_read(stat_request);
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901

	osdc = &rbd_dev->rbd_client->client->osdc;
	ret = rbd_obj_request_submit(osdc, stat_request);
out:
	if (ret)
		rbd_obj_request_put(obj_request);

	return ret;
}

2902
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2903 2904
{
	struct rbd_img_request *img_request;
A
Alex Elder 已提交
2905
	struct rbd_device *rbd_dev;
2906 2907 2908 2909 2910

	rbd_assert(obj_request_img_data_test(obj_request));

	img_request = obj_request->img_request;
	rbd_assert(img_request);
A
Alex Elder 已提交
2911
	rbd_dev = img_request->rbd_dev;
2912

2913
	/* Reads */
2914 2915
	if (!img_request_write_test(img_request) &&
	    !img_request_discard_test(img_request))
2916 2917 2918 2919 2920 2921
		return true;

	/* Non-layered writes */
	if (!img_request_layered_test(img_request))
		return true;

2922
	/*
2923 2924
	 * Layered writes outside of the parent overlap range don't
	 * share any data with the parent.
2925
	 */
2926 2927
	if (!obj_request_overlaps_parent(obj_request))
		return true;
2928

2929 2930 2931 2932 2933 2934 2935 2936
	/*
	 * Entire-object layered writes - we will overwrite whatever
	 * parent data there is anyway.
	 */
	if (!obj_request->offset &&
	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
		return true;

2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
	/*
	 * If the object is known to already exist, its parent data has
	 * already been copied.
	 */
	if (obj_request_known_test(obj_request) &&
	    obj_request_exists_test(obj_request))
		return true;

	return false;
}

static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
	if (img_obj_request_simple(obj_request)) {
2951 2952 2953 2954 2955 2956 2957 2958 2959 2960
		struct rbd_device *rbd_dev;
		struct ceph_osd_client *osdc;

		rbd_dev = obj_request->img_request->rbd_dev;
		osdc = &rbd_dev->rbd_client->client->osdc;

		return rbd_obj_request_submit(osdc, obj_request);
	}

	/*
2961 2962 2963 2964
	 * It's a layered write.  The target object might exist but
	 * we may not know that yet.  If we know it doesn't exist,
	 * start by reading the data for the full target object from
	 * the parent so we can use it for a copyup to the target.
2965
	 */
2966
	if (obj_request_known_test(obj_request))
2967 2968 2969
		return rbd_img_obj_parent_read_full(obj_request);

	/* We don't know whether the target exists.  Go find out. */
2970 2971 2972 2973

	return rbd_img_obj_exists_submit(obj_request);
}

A
Alex Elder 已提交
2974 2975 2976
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
2977
	struct rbd_obj_request *next_obj_request;
2978
	int ret = 0;
A
Alex Elder 已提交
2979

A
Alex Elder 已提交
2980
	dout("%s: img %p\n", __func__, img_request);
A
Alex Elder 已提交
2981

2982 2983
	rbd_img_request_get(img_request);
	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2984
		ret = rbd_img_obj_request_submit(obj_request);
A
Alex Elder 已提交
2985
		if (ret)
2986
			goto out_put_ireq;
A
Alex Elder 已提交
2987 2988
	}

2989 2990 2991
out_put_ireq:
	rbd_img_request_put(img_request);
	return ret;
A
Alex Elder 已提交
2992
}
A
Alex Elder 已提交
2993 2994 2995 2996

static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
A
Alex Elder 已提交
2997 2998
	struct rbd_device *rbd_dev;
	u64 obj_end;
2999 3000
	u64 img_xferred;
	int img_result;
A
Alex Elder 已提交
3001 3002 3003

	rbd_assert(img_request_child_test(img_request));

3004 3005
	/* First get what we need from the image request and release it */

A
Alex Elder 已提交
3006
	obj_request = img_request->obj_request;
3007 3008 3009 3010 3011 3012 3013 3014 3015
	img_xferred = img_request->xferred;
	img_result = img_request->result;
	rbd_img_request_put(img_request);

	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to re-submit the
	 * original request.
	 */
A
Alex Elder 已提交
3016 3017
	rbd_assert(obj_request);
	rbd_assert(obj_request->img_request);
3018 3019 3020 3021 3022 3023 3024 3025 3026
	rbd_dev = obj_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, obj_request);
		if (!img_result)
			return;
	}
A
Alex Elder 已提交
3027

3028
	obj_request->result = img_result;
A
Alex Elder 已提交
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
	if (obj_request->result)
		goto out;

	/*
	 * We need to zero anything beyond the parent overlap
	 * boundary.  Since rbd_img_obj_request_read_callback()
	 * will zero anything beyond the end of a short read, an
	 * easy way to do this is to pretend the data from the
	 * parent came up short--ending at the overlap boundary.
	 */
	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
	obj_end = obj_request->img_offset + obj_request->length;
	if (obj_end > rbd_dev->parent_overlap) {
		u64 xferred = 0;

		if (obj_request->img_offset < rbd_dev->parent_overlap)
			xferred = rbd_dev->parent_overlap -
					obj_request->img_offset;
A
Alex Elder 已提交
3047

3048
		obj_request->xferred = min(img_xferred, xferred);
A
Alex Elder 已提交
3049
	} else {
3050
		obj_request->xferred = img_xferred;
A
Alex Elder 已提交
3051 3052
	}
out:
A
Alex Elder 已提交
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064
	rbd_img_obj_request_read_callback(obj_request);
	rbd_obj_request_complete(obj_request);
}

static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
	rbd_assert(obj_request->img_request != NULL);
	rbd_assert(obj_request->result == (s32) -ENOENT);
3065
	rbd_assert(obj_request_type_valid(obj_request->type));
A
Alex Elder 已提交
3066 3067

	/* rbd_read_finish(obj_request, obj_request->length); */
3068
	img_request = rbd_parent_request_create(obj_request,
A
Alex Elder 已提交
3069
						obj_request->img_offset,
3070
						obj_request->length);
A
Alex Elder 已提交
3071 3072 3073 3074
	result = -ENOMEM;
	if (!img_request)
		goto out_err;

3075 3076 3077 3078 3079 3080
	if (obj_request->type == OBJ_REQUEST_BIO)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
						obj_request->bio_list);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
						obj_request->pages);
A
Alex Elder 已提交
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
	if (result)
		goto out_err;

	img_request->callback = rbd_img_parent_read_callback;
	result = rbd_img_request_submit(img_request);
	if (result)
		goto out_err;

	return;
out_err:
	if (img_request)
		rbd_img_request_put(img_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);
}
A
Alex Elder 已提交
3097

3098 3099
static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
			 u64 notifier_id, void *data, size_t data_len)
A
Alex Elder 已提交
3100
{
3101 3102
	struct rbd_device *rbd_dev = arg;
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3103
	int ret;
A
Alex Elder 已提交
3104

3105 3106
	dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
	     cookie, notify_id);
3107 3108 3109 3110 3111 3112 3113

	/*
	 * Until adequate refresh error handling is in place, there is
	 * not much we can do here, except warn.
	 *
	 * See http://tracker.ceph.com/issues/5040
	 */
3114 3115
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
3116
		rbd_warn(rbd_dev, "refresh failed: %d", ret);
A
Alex Elder 已提交
3117

3118 3119 3120
	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
				   &rbd_dev->header_oloc, notify_id, cookie,
				   NULL, 0);
3121
	if (ret)
3122
		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
A
Alex Elder 已提交
3123 3124
}

3125 3126
static void __rbd_unregister_watch(struct rbd_device *rbd_dev);

3127
static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3128
{
3129
	struct rbd_device *rbd_dev = arg;
3130

3131
	rbd_warn(rbd_dev, "encountered watch error: %d", err);
3132

3133 3134 3135 3136
	mutex_lock(&rbd_dev->watch_mutex);
	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
		__rbd_unregister_watch(rbd_dev);
		rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3137

3138
		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3139
	}
3140
	mutex_unlock(&rbd_dev->watch_mutex);
3141 3142
}

3143
/*
3144
 * watch_mutex must be locked
3145
 */
3146
static int __rbd_register_watch(struct rbd_device *rbd_dev)
3147 3148
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3149
	struct ceph_osd_linger_request *handle;
3150

3151
	rbd_assert(!rbd_dev->watch_handle);
3152
	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3153

3154 3155 3156 3157 3158
	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
				 &rbd_dev->header_oloc, rbd_watch_cb,
				 rbd_watch_errcb, rbd_dev);
	if (IS_ERR(handle))
		return PTR_ERR(handle);
3159

3160
	rbd_dev->watch_handle = handle;
3161 3162 3163
	return 0;
}

3164 3165 3166 3167
/*
 * watch_mutex must be locked
 */
static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3168
{
3169 3170
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	int ret;
3171

3172 3173
	rbd_assert(rbd_dev->watch_handle);
	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3174

3175 3176 3177
	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
	if (ret)
		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3178

3179
	rbd_dev->watch_handle = NULL;
3180 3181
}

3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
static int rbd_register_watch(struct rbd_device *rbd_dev)
{
	int ret;

	mutex_lock(&rbd_dev->watch_mutex);
	rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
	ret = __rbd_register_watch(rbd_dev);
	if (ret)
		goto out;

	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;

out:
	mutex_unlock(&rbd_dev->watch_mutex);
	return ret;
}

static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3201
{
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
	dout("%s rbd_dev %p\n", __func__, rbd_dev);

	cancel_delayed_work_sync(&rbd_dev->watch_dwork);
}

static void rbd_unregister_watch(struct rbd_device *rbd_dev)
{
	cancel_tasks_sync(rbd_dev);

	mutex_lock(&rbd_dev->watch_mutex);
	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
		__rbd_unregister_watch(rbd_dev);
	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
	mutex_unlock(&rbd_dev->watch_mutex);
I
Ilya Dryomov 已提交
3216 3217

	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3218 3219
}

3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
static void rbd_reregister_watch(struct work_struct *work)
{
	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
					    struct rbd_device, watch_dwork);
	int ret;

	dout("%s rbd_dev %p\n", __func__, rbd_dev);

	mutex_lock(&rbd_dev->watch_mutex);
	if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
		goto fail_unlock;

	ret = __rbd_register_watch(rbd_dev);
	if (ret) {
		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
		if (ret != -EBLACKLISTED)
			queue_delayed_work(rbd_dev->task_wq,
					   &rbd_dev->watch_dwork,
					   RBD_RETRY_DELAY);
		goto fail_unlock;
	}

	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
	mutex_unlock(&rbd_dev->watch_mutex);

	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);

	return;

fail_unlock:
	mutex_unlock(&rbd_dev->watch_mutex);
}

3256
/*
3257 3258
 * Synchronous osd object method call.  Returns the number of bytes
 * returned in the outbound buffer, or a negative error code.
3259 3260 3261 3262 3263
 */
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
			     const char *object_name,
			     const char *class_name,
			     const char *method_name,
3264
			     const void *outbound,
3265
			     size_t outbound_size,
3266
			     void *inbound,
3267
			     size_t inbound_size)
3268
{
3269
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3270 3271 3272 3273 3274 3275
	struct rbd_obj_request *obj_request;
	struct page **pages;
	u32 page_count;
	int ret;

	/*
3276 3277 3278 3279 3280
	 * Method calls are ultimately read operations.  The result
	 * should placed into the inbound buffer provided.  They
	 * also supply outbound data--parameters for the object
	 * method.  Currently if this is present it will be a
	 * snapshot id.
3281
	 */
3282
	page_count = (u32)calc_pages_for(0, inbound_size);
3283 3284 3285 3286 3287
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
3288
	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3289 3290 3291 3292 3293 3294 3295
							OBJ_REQUEST_PAGES);
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3296
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3297
						  obj_request);
3298 3299 3300
	if (!obj_request->osd_req)
		goto out;

3301
	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
					class_name, method_name);
	if (outbound_size) {
		struct ceph_pagelist *pagelist;

		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
		if (!pagelist)
			goto out;

		ceph_pagelist_init(pagelist);
		ceph_pagelist_append(pagelist, outbound, outbound_size);
		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
						pagelist);
	}
3315 3316
	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
					obj_request->pages, inbound_size,
3317
					0, false, false);
3318
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3319

3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3330 3331 3332

	rbd_assert(obj_request->xferred < (u64)INT_MAX);
	ret = (int)obj_request->xferred;
3333
	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3334 3335 3336 3337 3338 3339 3340 3341 3342
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

C
Christoph Hellwig 已提交
3343
static void rbd_queue_workfn(struct work_struct *work)
A
Alex Elder 已提交
3344
{
C
Christoph Hellwig 已提交
3345 3346
	struct request *rq = blk_mq_rq_from_pdu(work);
	struct rbd_device *rbd_dev = rq->q->queuedata;
I
Ilya Dryomov 已提交
3347
	struct rbd_img_request *img_request;
3348
	struct ceph_snap_context *snapc = NULL;
I
Ilya Dryomov 已提交
3349 3350
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
G
Guangliang Zhao 已提交
3351
	enum obj_operation_type op_type;
3352
	u64 mapping_size;
A
Alex Elder 已提交
3353 3354
	int result;

C
Christoph Hellwig 已提交
3355 3356 3357 3358 3359 3360 3361
	if (rq->cmd_type != REQ_TYPE_FS) {
		dout("%s: non-fs request type %d\n", __func__,
			(int) rq->cmd_type);
		result = -EIO;
		goto err;
	}

M
Mike Christie 已提交
3362
	if (req_op(rq) == REQ_OP_DISCARD)
3363
		op_type = OBJ_OP_DISCARD;
M
Mike Christie 已提交
3364
	else if (req_op(rq) == REQ_OP_WRITE)
G
Guangliang Zhao 已提交
3365 3366 3367 3368
		op_type = OBJ_OP_WRITE;
	else
		op_type = OBJ_OP_READ;

I
Ilya Dryomov 已提交
3369
	/* Ignore/skip any zero-length requests */
A
Alex Elder 已提交
3370

I
Ilya Dryomov 已提交
3371 3372 3373 3374 3375
	if (!length) {
		dout("%s: zero-length request\n", __func__);
		result = 0;
		goto err_rq;
	}
A
Alex Elder 已提交
3376

G
Guangliang Zhao 已提交
3377
	/* Only reads are allowed to a read-only device */
I
Ilya Dryomov 已提交
3378

G
Guangliang Zhao 已提交
3379
	if (op_type != OBJ_OP_READ) {
I
Ilya Dryomov 已提交
3380 3381 3382
		if (rbd_dev->mapping.read_only) {
			result = -EROFS;
			goto err_rq;
A
Alex Elder 已提交
3383
		}
I
Ilya Dryomov 已提交
3384 3385
		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
	}
A
Alex Elder 已提交
3386

I
Ilya Dryomov 已提交
3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398
	/*
	 * Quit early if the mapped snapshot no longer exists.  It's
	 * still possible the snapshot will have disappeared by the
	 * time our request arrives at the osd, but there's no sense in
	 * sending it if we already know.
	 */
	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
		dout("request for non-existent snapshot");
		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
		result = -ENXIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3399

I
Ilya Dryomov 已提交
3400 3401 3402 3403 3404 3405
	if (offset && length > U64_MAX - offset + 1) {
		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
			 length);
		result = -EINVAL;
		goto err_rq;	/* Shouldn't happen */
	}
A
Alex Elder 已提交
3406

C
Christoph Hellwig 已提交
3407 3408
	blk_mq_start_request(rq);

3409 3410
	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
G
Guangliang Zhao 已提交
3411
	if (op_type != OBJ_OP_READ) {
3412 3413 3414 3415 3416 3417
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
I
Ilya Dryomov 已提交
3418
		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3419
			 length, mapping_size);
I
Ilya Dryomov 已提交
3420 3421 3422
		result = -EIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3423

G
Guangliang Zhao 已提交
3424
	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3425
					     snapc);
I
Ilya Dryomov 已提交
3426 3427 3428 3429 3430
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
3431
	snapc = NULL; /* img_request consumes a ref */
A
Alex Elder 已提交
3432

3433 3434 3435 3436 3437 3438
	if (op_type == OBJ_OP_DISCARD)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
					      NULL);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
					      rq->bio);
I
Ilya Dryomov 已提交
3439 3440
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3441

I
Ilya Dryomov 已提交
3442 3443 3444
	result = rbd_img_request_submit(img_request);
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3445

I
Ilya Dryomov 已提交
3446
	return;
A
Alex Elder 已提交
3447

I
Ilya Dryomov 已提交
3448 3449 3450 3451 3452
err_img_request:
	rbd_img_request_put(img_request);
err_rq:
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
G
Guangliang Zhao 已提交
3453
			 obj_op_name(op_type), length, offset, result);
3454
	ceph_put_snap_context(snapc);
C
Christoph Hellwig 已提交
3455 3456
err:
	blk_mq_end_request(rq, result);
I
Ilya Dryomov 已提交
3457
}
A
Alex Elder 已提交
3458

C
Christoph Hellwig 已提交
3459 3460
static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
I
Ilya Dryomov 已提交
3461
{
C
Christoph Hellwig 已提交
3462 3463
	struct request *rq = bd->rq;
	struct work_struct *work = blk_mq_rq_to_pdu(rq);
A
Alex Elder 已提交
3464

C
Christoph Hellwig 已提交
3465 3466
	queue_work(rbd_wq, work);
	return BLK_MQ_RQ_QUEUE_OK;
A
Alex Elder 已提交
3467 3468
}

3469 3470 3471 3472 3473 3474 3475
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk = rbd_dev->disk;

	if (!disk)
		return;

3476 3477
	rbd_dev->disk = NULL;
	if (disk->flags & GENHD_FL_UP) {
3478
		del_gendisk(disk);
3479 3480
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
C
Christoph Hellwig 已提交
3481
		blk_mq_free_tag_set(&rbd_dev->tag_set);
3482
	}
3483 3484 3485
	put_disk(disk);
}

3486 3487
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
				const char *object_name,
3488
				u64 offset, u64 length, void *buf)
3489 3490

{
3491
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3492 3493 3494
	struct rbd_obj_request *obj_request;
	struct page **pages = NULL;
	u32 page_count;
3495
	size_t size;
3496 3497 3498 3499 3500
	int ret;

	page_count = (u32) calc_pages_for(offset, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
3501
		return PTR_ERR(pages);
3502 3503 3504

	ret = -ENOMEM;
	obj_request = rbd_obj_request_create(object_name, offset, length,
3505
							OBJ_REQUEST_PAGES);
3506 3507 3508 3509 3510 3511
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3512
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3513
						  obj_request);
3514 3515 3516
	if (!obj_request->osd_req)
		goto out;

3517 3518
	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
					offset, length, 0, 0);
3519
	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3520
					obj_request->pages,
3521 3522 3523
					obj_request->length,
					obj_request->offset & ~PAGE_MASK,
					false, false);
3524
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3525

3526 3527 3528 3529 3530 3531 3532 3533 3534 3535
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3536 3537 3538

	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
	size = (size_t) obj_request->xferred;
3539
	ceph_copy_from_page_vector(pages, buf, 0, size);
3540 3541
	rbd_assert(size <= (size_t)INT_MAX);
	ret = (int)size;
3542 3543 3544 3545 3546 3547 3548 3549 3550
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

3551
/*
A
Alex Elder 已提交
3552 3553 3554
 * Read the complete header for the given rbd device.  On successful
 * return, the rbd_dev->header field will contain up-to-date
 * information about the image.
3555
 */
3556
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3557
{
3558
	struct rbd_image_header_ondisk *ondisk = NULL;
3559
	u32 snap_count = 0;
3560 3561 3562
	u64 names_size = 0;
	u32 want_count;
	int ret;
3563

A
Alex Elder 已提交
3564
	/*
3565 3566 3567 3568 3569
	 * The complete header will include an array of its 64-bit
	 * snapshot ids, followed by the names of those snapshots as
	 * a contiguous block of NUL-terminated strings.  Note that
	 * the number of snapshots could change by the time we read
	 * it in, in which case we re-read it.
A
Alex Elder 已提交
3570
	 */
3571 3572 3573 3574 3575 3576 3577 3578 3579 3580
	do {
		size_t size;

		kfree(ondisk);

		size = sizeof (*ondisk);
		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
		size += names_size;
		ondisk = kmalloc(size, GFP_KERNEL);
		if (!ondisk)
A
Alex Elder 已提交
3581
			return -ENOMEM;
3582

3583
		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
3584
				       0, size, ondisk);
3585
		if (ret < 0)
A
Alex Elder 已提交
3586
			goto out;
A
Alex Elder 已提交
3587
		if ((size_t)ret < size) {
3588
			ret = -ENXIO;
A
Alex Elder 已提交
3589 3590
			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
				size, ret);
A
Alex Elder 已提交
3591
			goto out;
3592 3593 3594
		}
		if (!rbd_dev_ondisk_valid(ondisk)) {
			ret = -ENXIO;
A
Alex Elder 已提交
3595
			rbd_warn(rbd_dev, "invalid header");
A
Alex Elder 已提交
3596
			goto out;
3597
		}
3598

3599 3600 3601 3602
		names_size = le64_to_cpu(ondisk->snap_names_len);
		want_count = snap_count;
		snap_count = le32_to_cpu(ondisk->snap_count);
	} while (snap_count != want_count);
A
Alex Elder 已提交
3603

A
Alex Elder 已提交
3604 3605
	ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
3606 3607 3608
	kfree(ondisk);

	return ret;
3609 3610
}

3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629
/*
 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
 * has disappeared from the (just updated) snapshot context.
 */
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
	u64 snap_id;

	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
		return;

	snap_id = rbd_dev->spec->snap_id;
	if (snap_id == CEPH_NOSNAP)
		return;

	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}

3630 3631 3632 3633 3634
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
	sector_t size;

	/*
I
Ilya Dryomov 已提交
3635 3636 3637
	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
	 * try to update its size.  If REMOVING is set, updating size
	 * is just useless work since the device can't be opened.
3638
	 */
I
Ilya Dryomov 已提交
3639 3640
	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3641 3642 3643 3644 3645 3646 3647
		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
		dout("setting size to %llu sectors", (unsigned long long)size);
		set_capacity(rbd_dev->disk, size);
		revalidate_disk(rbd_dev->disk);
	}
}

A
Alex Elder 已提交
3648
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
3649
{
3650
	u64 mapping_size;
A
Alex Elder 已提交
3651 3652
	int ret;

3653
	down_write(&rbd_dev->header_rwsem);
3654
	mapping_size = rbd_dev->mapping.size;
3655 3656

	ret = rbd_dev_header_info(rbd_dev);
3657
	if (ret)
3658
		goto out;
3659

3660 3661 3662 3663 3664 3665 3666
	/*
	 * If there is a parent, see if it has disappeared due to the
	 * mapped image getting flattened.
	 */
	if (rbd_dev->parent) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
3667
			goto out;
3668 3669
	}

3670
	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3671
		rbd_dev->mapping.size = rbd_dev->header.image_size;
3672 3673 3674 3675
	} else {
		/* validate mapped snapshot's EXISTS flag */
		rbd_exists_validate(rbd_dev);
	}
3676

3677
out:
3678
	up_write(&rbd_dev->header_rwsem);
3679
	if (!ret && mapping_size != rbd_dev->mapping.size)
3680
		rbd_dev_update_size(rbd_dev);
A
Alex Elder 已提交
3681

3682
	return ret;
A
Alex Elder 已提交
3683 3684
}

C
Christoph Hellwig 已提交
3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
static int rbd_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
{
	struct work_struct *work = blk_mq_rq_to_pdu(rq);

	INIT_WORK(work, rbd_queue_workfn);
	return 0;
}

static struct blk_mq_ops rbd_mq_ops = {
	.queue_rq	= rbd_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_request	= rbd_init_request,
};

3701 3702 3703 3704
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk;
	struct request_queue *q;
A
Alex Elder 已提交
3705
	u64 segment_size;
C
Christoph Hellwig 已提交
3706
	int err;
3707 3708

	/* create gendisk info */
3709 3710 3711
	disk = alloc_disk(single_major ?
			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
			  RBD_MINORS_PER_MAJOR);
3712
	if (!disk)
A
Alex Elder 已提交
3713
		return -ENOMEM;
3714

A
Alex Elder 已提交
3715
	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
A
Alex Elder 已提交
3716
		 rbd_dev->dev_id);
3717
	disk->major = rbd_dev->major;
3718
	disk->first_minor = rbd_dev->minor;
3719 3720
	if (single_major)
		disk->flags |= GENHD_FL_EXT_DEVT;
3721 3722 3723
	disk->fops = &rbd_bd_ops;
	disk->private_data = rbd_dev;

C
Christoph Hellwig 已提交
3724 3725
	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
	rbd_dev->tag_set.ops = &rbd_mq_ops;
I
Ilya Dryomov 已提交
3726
	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
C
Christoph Hellwig 已提交
3727
	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
I
Ilya Dryomov 已提交
3728
	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
C
Christoph Hellwig 已提交
3729 3730 3731 3732 3733
	rbd_dev->tag_set.nr_hw_queues = 1;
	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);

	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
	if (err)
3734
		goto out_disk;
3735

C
Christoph Hellwig 已提交
3736 3737 3738 3739 3740 3741
	q = blk_mq_init_queue(&rbd_dev->tag_set);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}

3742 3743
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
A
Alex Elder 已提交
3744

3745
	/* set io sizes to object size */
A
Alex Elder 已提交
3746 3747
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
I
Ilya Dryomov 已提交
3748
	q->limits.max_sectors = queue_max_hw_sectors(q);
I
Ilya Dryomov 已提交
3749
	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
A
Alex Elder 已提交
3750 3751 3752
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);
3753

3754 3755 3756 3757
	/* enable the discard support */
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.discard_granularity = segment_size;
	q->limits.discard_alignment = segment_size;
3758
	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3759
	q->limits.discard_zeroes_data = 1;
3760

3761 3762 3763
	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;

3764 3765 3766 3767 3768 3769 3770
	disk->queue = q;

	q->queuedata = rbd_dev;

	rbd_dev->disk = disk;

	return 0;
C
Christoph Hellwig 已提交
3771 3772
out_tag_set:
	blk_mq_free_tag_set(&rbd_dev->tag_set);
3773 3774
out_disk:
	put_disk(disk);
C
Christoph Hellwig 已提交
3775
	return err;
3776 3777
}

3778 3779 3780 3781
/*
  sysfs
*/

A
Alex Elder 已提交
3782 3783 3784 3785 3786
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
	return container_of(dev, struct rbd_device, dev);
}

3787 3788 3789
static ssize_t rbd_size_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3790
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3791

A
Alex Elder 已提交
3792 3793
	return sprintf(buf, "%llu\n",
		(unsigned long long)rbd_dev->mapping.size);
3794 3795
}

A
Alex Elder 已提交
3796 3797 3798 3799 3800 3801 3802 3803 3804 3805
/*
 * Note this shows the features for whatever's mapped, which is not
 * necessarily the base image.
 */
static ssize_t rbd_features_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

	return sprintf(buf, "0x%016llx\n",
A
Alex Elder 已提交
3806
			(unsigned long long)rbd_dev->mapping.features);
A
Alex Elder 已提交
3807 3808
}

3809 3810 3811
static ssize_t rbd_major_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3812
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3813

A
Alex Elder 已提交
3814 3815 3816 3817
	if (rbd_dev->major)
		return sprintf(buf, "%d\n", rbd_dev->major);

	return sprintf(buf, "(none)\n");
3818 3819 3820 3821 3822 3823
}

static ssize_t rbd_minor_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
A
Alex Elder 已提交
3824

3825
	return sprintf(buf, "%d\n", rbd_dev->minor);
3826 3827 3828 3829
}

static ssize_t rbd_client_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
3830
{
A
Alex Elder 已提交
3831
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3832

3833
	return sprintf(buf, "client%lld\n",
3834
		       ceph_client_gid(rbd_dev->rbd_client->client));
3835 3836
}

3837 3838
static ssize_t rbd_pool_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
3839
{
A
Alex Elder 已提交
3840
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3841

3842
	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3843 3844
}

3845 3846 3847 3848 3849
static ssize_t rbd_pool_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3850
	return sprintf(buf, "%llu\n",
A
Alex Elder 已提交
3851
			(unsigned long long) rbd_dev->spec->pool_id);
3852 3853
}

3854 3855 3856
static ssize_t rbd_name_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3857
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3858

A
Alex Elder 已提交
3859 3860 3861 3862
	if (rbd_dev->spec->image_name)
		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);

	return sprintf(buf, "(unknown)\n");
3863 3864
}

A
Alex Elder 已提交
3865 3866 3867 3868 3869
static ssize_t rbd_image_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3870
	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
A
Alex Elder 已提交
3871 3872
}

A
Alex Elder 已提交
3873 3874 3875 3876
/*
 * Shows the name of the currently-mapped snapshot (or
 * RBD_SNAP_HEAD_NAME for the base image).
 */
3877 3878 3879 3880
static ssize_t rbd_snap_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
A
Alex Elder 已提交
3881
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3882

3883
	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3884 3885
}

3886
/*
3887 3888 3889
 * For a v2 image, shows the chain of parent images, separated by empty
 * lines.  For v1 images or if there is no parent, shows "(no parent
 * image)".
3890 3891
 */
static ssize_t rbd_parent_show(struct device *dev,
3892 3893
			       struct device_attribute *attr,
			       char *buf)
3894 3895
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3896
	ssize_t count = 0;
3897

3898
	if (!rbd_dev->parent)
3899 3900
		return sprintf(buf, "(no parent image)\n");

3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
		struct rbd_spec *spec = rbd_dev->parent_spec;

		count += sprintf(&buf[count], "%s"
			    "pool_id %llu\npool_name %s\n"
			    "image_id %s\nimage_name %s\n"
			    "snap_id %llu\nsnap_name %s\n"
			    "overlap %llu\n",
			    !count ? "" : "\n", /* first? */
			    spec->pool_id, spec->pool_name,
			    spec->image_id, spec->image_name ?: "(unknown)",
			    spec->snap_id, spec->snap_name,
			    rbd_dev->parent_overlap);
	}

	return count;
3917 3918
}

3919 3920 3921 3922 3923
static ssize_t rbd_image_refresh(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t size)
{
A
Alex Elder 已提交
3924
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3925
	int ret;
3926

A
Alex Elder 已提交
3927
	ret = rbd_dev_refresh(rbd_dev);
3928
	if (ret)
3929
		return ret;
3930

3931
	return size;
3932
}
3933

3934
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
A
Alex Elder 已提交
3935
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3936
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3937
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3938 3939
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3940
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3941
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
A
Alex Elder 已提交
3942
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3943 3944
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3945
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3946 3947 3948

static struct attribute *rbd_attrs[] = {
	&dev_attr_size.attr,
A
Alex Elder 已提交
3949
	&dev_attr_features.attr,
3950
	&dev_attr_major.attr,
3951
	&dev_attr_minor.attr,
3952 3953
	&dev_attr_client_id.attr,
	&dev_attr_pool.attr,
3954
	&dev_attr_pool_id.attr,
3955
	&dev_attr_name.attr,
A
Alex Elder 已提交
3956
	&dev_attr_image_id.attr,
3957
	&dev_attr_current_snap.attr,
3958
	&dev_attr_parent.attr,
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
	&dev_attr_refresh.attr,
	NULL
};

static struct attribute_group rbd_attr_group = {
	.attrs = rbd_attrs,
};

static const struct attribute_group *rbd_attr_groups[] = {
	&rbd_attr_group,
	NULL
};

3972
static void rbd_dev_release(struct device *dev);
3973 3974 3975 3976

static struct device_type rbd_device_type = {
	.name		= "rbd",
	.groups		= rbd_attr_groups,
3977
	.release	= rbd_dev_release,
3978 3979
};

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
	kref_get(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
	if (spec)
		kref_put(&spec->kref, rbd_spec_free);
}

static struct rbd_spec *rbd_spec_alloc(void)
{
	struct rbd_spec *spec;

	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
	if (!spec)
		return NULL;
4001 4002 4003

	spec->pool_id = CEPH_NOPOOL;
	spec->snap_id = CEPH_NOSNAP;
4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
	kref_init(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref)
{
	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);

	kfree(spec->pool_name);
	kfree(spec->image_id);
	kfree(spec->image_name);
	kfree(spec->snap_name);
	kfree(spec);
}

4020
static void rbd_dev_free(struct rbd_device *rbd_dev)
4021
{
4022 4023
	WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);

4024
	ceph_oid_destroy(&rbd_dev->header_oid);
4025
	ceph_oloc_destroy(&rbd_dev->header_oloc);
4026

4027 4028 4029 4030
	rbd_put_client(rbd_dev->rbd_client);
	rbd_spec_put(rbd_dev->spec);
	kfree(rbd_dev->opts);
	kfree(rbd_dev);
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043
}

static void rbd_dev_release(struct device *dev)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
	bool need_put = !!rbd_dev->opts;

	if (need_put) {
		destroy_workqueue(rbd_dev->task_wq);
		ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
	}

	rbd_dev_free(rbd_dev);
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053

	/*
	 * This is racy, but way better than putting module outside of
	 * the release callback.  The race window is pretty small, so
	 * doing something similar to dm (dm-builtin.c) is overkill.
	 */
	if (need_put)
		module_put(THIS_MODULE);
}

4054 4055
static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
					   struct rbd_spec *spec)
4056 4057 4058
{
	struct rbd_device *rbd_dev;

4059
	rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4060 4061 4062 4063 4064 4065 4066
	if (!rbd_dev)
		return NULL;

	spin_lock_init(&rbd_dev->lock);
	INIT_LIST_HEAD(&rbd_dev->node);
	init_rwsem(&rbd_dev->header_rwsem);

4067
	ceph_oid_init(&rbd_dev->header_oid);
4068
	ceph_oloc_init(&rbd_dev->header_oloc);
4069

4070 4071 4072 4073
	mutex_init(&rbd_dev->watch_mutex);
	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
	INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);

4074 4075 4076 4077 4078
	rbd_dev->dev.bus = &rbd_bus_type;
	rbd_dev->dev.type = &rbd_device_type;
	rbd_dev->dev.parent = &rbd_root_dev;
	device_initialize(&rbd_dev->dev);

4079
	rbd_dev->rbd_client = rbdc;
4080
	rbd_dev->spec = spec;
4081

4082 4083 4084 4085
	rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
	rbd_dev->layout.stripe_count = 1;
	rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
	rbd_dev->layout.pool_id = spec->pool_id;
4086
	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4087

4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117
	return rbd_dev;
}

/*
 * Create a mapping rbd_dev.
 */
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
					 struct rbd_spec *spec,
					 struct rbd_options *opts)
{
	struct rbd_device *rbd_dev;

	rbd_dev = __rbd_dev_create(rbdc, spec);
	if (!rbd_dev)
		return NULL;

	rbd_dev->opts = opts;

	/* get an id and fill in device name */
	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
					 minor_to_rbd_dev_id(1 << MINORBITS),
					 GFP_KERNEL);
	if (rbd_dev->dev_id < 0)
		goto fail_rbd_dev;

	sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
	rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
						   rbd_dev->name);
	if (!rbd_dev->task_wq)
		goto fail_dev_id;
4118

4119 4120 4121 4122
	/* we have a ref from do_rbd_add() */
	__module_get(THIS_MODULE);

	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4123
	return rbd_dev;
4124 4125 4126 4127 4128 4129

fail_dev_id:
	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
	rbd_dev_free(rbd_dev);
	return NULL;
4130 4131 4132 4133
}

static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
4134 4135
	if (rbd_dev)
		put_device(&rbd_dev->dev);
4136 4137
}

4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152
/*
 * Get the size and object order for an image snapshot, or if
 * snap_id is CEPH_NOSNAP, gets this information for the base
 * image.
 */
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size)
{
	__le64 snapid = cpu_to_le64(snap_id);
	int ret;
	struct {
		u8 order;
		__le64 size;
	} __attribute__ ((packed)) size_buf = { 0 };

4153
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4154
				"rbd", "get_size",
4155
				&snapid, sizeof (snapid),
4156
				&size_buf, sizeof (size_buf));
4157
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4158 4159
	if (ret < 0)
		return ret;
4160 4161
	if (ret < sizeof (size_buf))
		return -ERANGE;
4162

J
Josh Durgin 已提交
4163
	if (order) {
4164
		*order = size_buf.order;
J
Josh Durgin 已提交
4165 4166
		dout("  order %u", (unsigned int)*order);
	}
4167 4168
	*snap_size = le64_to_cpu(size_buf.size);

J
Josh Durgin 已提交
4169 4170
	dout("  snap_id 0x%016llx snap_size = %llu\n",
		(unsigned long long)snap_id,
4171
		(unsigned long long)*snap_size);
4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182

	return 0;
}

static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
					&rbd_dev->header.obj_order,
					&rbd_dev->header.image_size);
}

4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
	void *reply_buf;
	int ret;
	void *p;

	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4193
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4194
				"rbd", "get_object_prefix", NULL, 0,
4195
				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4196
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4197 4198 4199 4200 4201
	if (ret < 0)
		goto out;

	p = reply_buf;
	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4202 4203
						p + ret, NULL, GFP_NOIO);
	ret = 0;
4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216

	if (IS_ERR(rbd_dev->header.object_prefix)) {
		ret = PTR_ERR(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	} else {
		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
	}
out:
	kfree(reply_buf);

	return ret;
}

4217 4218 4219 4220 4221 4222 4223
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features)
{
	__le64 snapid = cpu_to_le64(snap_id);
	struct {
		__le64 features;
		__le64 incompat;
4224
	} __attribute__ ((packed)) features_buf = { 0 };
4225
	u64 unsup;
4226 4227
	int ret;

4228
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4229
				"rbd", "get_features",
4230
				&snapid, sizeof (snapid),
4231
				&features_buf, sizeof (features_buf));
4232
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4233 4234
	if (ret < 0)
		return ret;
4235 4236
	if (ret < sizeof (features_buf))
		return -ERANGE;
A
Alex Elder 已提交
4237

4238 4239 4240 4241
	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
	if (unsup) {
		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
			 unsup);
A
Alex Elder 已提交
4242
		return -ENXIO;
4243
	}
A
Alex Elder 已提交
4244

4245 4246 4247
	*snap_features = le64_to_cpu(features_buf.features);

	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4248 4249 4250
		(unsigned long long)snap_id,
		(unsigned long long)*snap_features,
		(unsigned long long)le64_to_cpu(features_buf.incompat));
4251 4252 4253 4254 4255 4256 4257 4258 4259 4260

	return 0;
}

static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
						&rbd_dev->header.features);
}

4261 4262 4263 4264 4265 4266 4267 4268
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
	struct rbd_spec *parent_spec;
	size_t size;
	void *reply_buf = NULL;
	__le64 snapid;
	void *p;
	void *end;
A
Alex Elder 已提交
4269
	u64 pool_id;
4270
	char *image_id;
4271
	u64 snap_id;
4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
	u64 overlap;
	int ret;

	parent_spec = rbd_spec_alloc();
	if (!parent_spec)
		return -ENOMEM;

	size = sizeof (__le64) +				/* pool_id */
		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
		sizeof (__le64) +				/* snap_id */
		sizeof (__le64);				/* overlap */
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf) {
		ret = -ENOMEM;
		goto out_err;
	}

4289
	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4290
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4291
				"rbd", "get_parent",
4292
				&snapid, sizeof (snapid),
4293
				reply_buf, size);
4294
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4295 4296 4297 4298
	if (ret < 0)
		goto out_err;

	p = reply_buf;
4299 4300
	end = reply_buf + ret;
	ret = -ERANGE;
A
Alex Elder 已提交
4301
	ceph_decode_64_safe(&p, end, pool_id, out_err);
4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318
	if (pool_id == CEPH_NOPOOL) {
		/*
		 * Either the parent never existed, or we have
		 * record of it but the image got flattened so it no
		 * longer has a parent.  When the parent of a
		 * layered image disappears we immediately set the
		 * overlap to 0.  The effect of this is that all new
		 * requests will be treated as if the image had no
		 * parent.
		 */
		if (rbd_dev->parent_overlap) {
			rbd_dev->parent_overlap = 0;
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image has been flattened\n",
				rbd_dev->disk->disk_name);
		}

4319
		goto out;	/* No parent?  No problem. */
4320
	}
4321

4322 4323 4324
	/* The ceph file layout needs to fit pool id in 32 bits */

	ret = -EIO;
A
Alex Elder 已提交
4325
	if (pool_id > (u64)U32_MAX) {
4326
		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
A
Alex Elder 已提交
4327
			(unsigned long long)pool_id, U32_MAX);
4328
		goto out_err;
A
Alex Elder 已提交
4329
	}
4330

A
Alex Elder 已提交
4331
	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4332 4333 4334 4335
	if (IS_ERR(image_id)) {
		ret = PTR_ERR(image_id);
		goto out_err;
	}
4336
	ceph_decode_64_safe(&p, end, snap_id, out_err);
4337 4338
	ceph_decode_64_safe(&p, end, overlap, out_err);

4339 4340 4341 4342 4343 4344 4345 4346 4347
	/*
	 * The parent won't change (except when the clone is
	 * flattened, already handled that).  So we only need to
	 * record the parent spec we have not already done so.
	 */
	if (!rbd_dev->parent_spec) {
		parent_spec->pool_id = pool_id;
		parent_spec->image_id = image_id;
		parent_spec->snap_id = snap_id;
A
Alex Elder 已提交
4348 4349
		rbd_dev->parent_spec = parent_spec;
		parent_spec = NULL;	/* rbd_dev now owns this */
4350 4351
	} else {
		kfree(image_id);
4352 4353 4354
	}

	/*
4355 4356
	 * We always update the parent overlap.  If it's zero we issue
	 * a warning, as we will proceed as if there was no parent.
4357 4358 4359
	 */
	if (!overlap) {
		if (parent_spec) {
4360 4361 4362 4363
			/* refresh, careful to warn just once */
			if (rbd_dev->parent_overlap)
				rbd_warn(rbd_dev,
				    "clone now standalone (overlap became 0)");
4364
		} else {
4365 4366
			/* initial probe */
			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4367
		}
A
Alex Elder 已提交
4368
	}
4369 4370
	rbd_dev->parent_overlap = overlap;

4371 4372 4373 4374 4375 4376 4377 4378 4379
out:
	ret = 0;
out_err:
	kfree(reply_buf);
	rbd_spec_put(parent_spec);

	return ret;
}

4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
	struct {
		__le64 stripe_unit;
		__le64 stripe_count;
	} __attribute__ ((packed)) striping_info_buf = { 0 };
	size_t size = sizeof (striping_info_buf);
	void *p;
	u64 obj_size;
	u64 stripe_unit;
	u64 stripe_count;
	int ret;

4393
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4394
				"rbd", "get_stripe_unit_count", NULL, 0,
4395
				(char *)&striping_info_buf, size);
4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
	if (ret < 0)
		return ret;
	if (ret < size)
		return -ERANGE;

	/*
	 * We don't actually support the "fancy striping" feature
	 * (STRIPINGV2) yet, but if the striping sizes are the
	 * defaults the behavior is the same as before.  So find
	 * out, and only fail if the image has non-default values.
	 */
	ret = -EINVAL;
	obj_size = (u64)1 << rbd_dev->header.obj_order;
	p = &striping_info_buf;
	stripe_unit = ceph_decode_64(&p);
	if (stripe_unit != obj_size) {
		rbd_warn(rbd_dev, "unsupported stripe unit "
				"(got %llu want %llu)",
				stripe_unit, obj_size);
		return -EINVAL;
	}
	stripe_count = ceph_decode_64(&p);
	if (stripe_count != 1) {
		rbd_warn(rbd_dev, "unsupported stripe count "
				"(got %llu want 1)", stripe_count);
		return -EINVAL;
	}
4424 4425
	rbd_dev->header.stripe_unit = stripe_unit;
	rbd_dev->header.stripe_count = stripe_count;
4426 4427 4428 4429

	return 0;
}

4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
	size_t image_id_size;
	char *image_id;
	void *p;
	void *end;
	size_t size;
	void *reply_buf = NULL;
	size_t len = 0;
	char *image_name = NULL;
	int ret;

	rbd_assert(!rbd_dev->spec->image_name);

A
Alex Elder 已提交
4444 4445
	len = strlen(rbd_dev->spec->image_id);
	image_id_size = sizeof (__le32) + len;
4446 4447 4448 4449 4450
	image_id = kmalloc(image_id_size, GFP_KERNEL);
	if (!image_id)
		return NULL;

	p = image_id;
4451
	end = image_id + image_id_size;
4452
	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4453 4454 4455 4456 4457 4458

	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		goto out;

4459
	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4460 4461
				"rbd", "dir_get_name",
				image_id, image_id_size,
4462
				reply_buf, size);
4463 4464 4465
	if (ret < 0)
		goto out;
	p = reply_buf;
4466 4467
	end = reply_buf + ret;

4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479
	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
	if (IS_ERR(image_name))
		image_name = NULL;
	else
		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
	kfree(reply_buf);
	kfree(image_id);

	return image_name;
}

4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	const char *snap_name;
	u32 which = 0;

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which < snapc->num_snaps) {
		if (!strcmp(name, snap_name))
			return snapc->snaps[which];
		snap_name += strlen(snap_name) + 1;
		which++;
	}
	return CEPH_NOSNAP;
}

static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	u32 which;
	bool found = false;
	u64 snap_id;

	for (which = 0; !found && which < snapc->num_snaps; which++) {
		const char *snap_name;

		snap_id = snapc->snaps[which];
		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4510 4511 4512 4513 4514 4515 4516
		if (IS_ERR(snap_name)) {
			/* ignore no-longer existing snapshots */
			if (PTR_ERR(snap_name) == -ENOENT)
				continue;
			else
				break;
		}
4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534
		found = !strcmp(name, snap_name);
		kfree(snap_name);
	}
	return found ? snap_id : CEPH_NOSNAP;
}

/*
 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
 * no snapshot by that name is found, or if an error occurs.
 */
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	if (rbd_dev->image_format == 1)
		return rbd_v1_snap_id_by_name(rbd_dev, name);

	return rbd_v2_snap_id_by_name(rbd_dev, name);
}

4535
/*
4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562
 * An image being mapped will have everything but the snap id.
 */
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;

	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
	rbd_assert(spec->image_id && spec->image_name);
	rbd_assert(spec->snap_name);

	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
		u64 snap_id;

		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
		if (snap_id == CEPH_NOSNAP)
			return -ENOENT;

		spec->snap_id = snap_id;
	} else {
		spec->snap_id = CEPH_NOSNAP;
	}

	return 0;
}

/*
 * A parent image will have all ids but none of the names.
4563
 *
4564 4565
 * All names in an rbd spec are dynamically allocated.  It's OK if we
 * can't figure out the name for an image id.
4566
 */
4567
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4568
{
4569 4570 4571 4572 4573
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_spec *spec = rbd_dev->spec;
	const char *pool_name;
	const char *image_name;
	const char *snap_name;
4574 4575
	int ret;

4576 4577 4578
	rbd_assert(spec->pool_id != CEPH_NOPOOL);
	rbd_assert(spec->image_id);
	rbd_assert(spec->snap_id != CEPH_NOSNAP);
4579

4580
	/* Get the pool name; we have to make our own copy of this */
4581

4582 4583 4584
	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
	if (!pool_name) {
		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4585 4586
		return -EIO;
	}
4587 4588
	pool_name = kstrdup(pool_name, GFP_KERNEL);
	if (!pool_name)
4589 4590 4591 4592
		return -ENOMEM;

	/* Fetch the image name; tolerate failure here */

4593 4594
	image_name = rbd_dev_image_name(rbd_dev);
	if (!image_name)
A
Alex Elder 已提交
4595
		rbd_warn(rbd_dev, "unable to get image name");
4596

4597
	/* Fetch the snapshot name */
4598

4599
	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4600 4601
	if (IS_ERR(snap_name)) {
		ret = PTR_ERR(snap_name);
4602
		goto out_err;
4603 4604 4605 4606 4607
	}

	spec->pool_name = pool_name;
	spec->image_name = image_name;
	spec->snap_name = snap_name;
4608 4609

	return 0;
4610

4611
out_err:
4612 4613
	kfree(image_name);
	kfree(pool_name);
4614 4615 4616
	return ret;
}

A
Alex Elder 已提交
4617
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
{
	size_t size;
	int ret;
	void *reply_buf;
	void *p;
	void *end;
	u64 seq;
	u32 snap_count;
	struct ceph_snap_context *snapc;
	u32 i;

	/*
	 * We'll need room for the seq value (maximum snapshot id),
	 * snapshot count, and array of that many snapshot ids.
	 * For now we have a fixed upper limit on the number we're
	 * prepared to receive.
	 */
	size = sizeof (__le64) + sizeof (__le32) +
			RBD_MAX_SNAP_COUNT * sizeof (__le64);
	reply_buf = kzalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4641
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4642
				"rbd", "get_snapcontext", NULL, 0,
4643
				reply_buf, size);
4644
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4645 4646 4647 4648
	if (ret < 0)
		goto out;

	p = reply_buf;
4649 4650
	end = reply_buf + ret;
	ret = -ERANGE;
4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666
	ceph_decode_64_safe(&p, end, seq, out);
	ceph_decode_32_safe(&p, end, snap_count, out);

	/*
	 * Make sure the reported number of snapshot ids wouldn't go
	 * beyond the end of our buffer.  But before checking that,
	 * make sure the computed size of the snapshot context we
	 * allocate is representable in a size_t.
	 */
	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
				 / sizeof (u64)) {
		ret = -EINVAL;
		goto out;
	}
	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
		goto out;
4667
	ret = 0;
4668

4669
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4670 4671 4672 4673 4674 4675 4676 4677
	if (!snapc) {
		ret = -ENOMEM;
		goto out;
	}
	snapc->seq = seq;
	for (i = 0; i < snap_count; i++)
		snapc->snaps[i] = ceph_decode_64(&p);

4678
	ceph_put_snap_context(rbd_dev->header.snapc);
4679 4680 4681
	rbd_dev->header.snapc = snapc;

	dout("  snap context seq = %llu, snap_count = %u\n",
4682
		(unsigned long long)seq, (unsigned int)snap_count);
4683 4684 4685
out:
	kfree(reply_buf);

4686
	return ret;
4687 4688
}

4689 4690
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
A
Alex Elder 已提交
4691 4692 4693
{
	size_t size;
	void *reply_buf;
4694
	__le64 snapid;
A
Alex Elder 已提交
4695 4696 4697 4698 4699 4700 4701 4702 4703 4704
	int ret;
	void *p;
	void *end;
	char *snap_name;

	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return ERR_PTR(-ENOMEM);

4705
	snapid = cpu_to_le64(snap_id);
4706
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
A
Alex Elder 已提交
4707
				"rbd", "get_snapshot_name",
4708
				&snapid, sizeof (snapid),
4709
				reply_buf, size);
4710
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4711 4712
	if (ret < 0) {
		snap_name = ERR_PTR(ret);
A
Alex Elder 已提交
4713
		goto out;
4714
	}
A
Alex Elder 已提交
4715 4716

	p = reply_buf;
4717
	end = reply_buf + ret;
A
Alex Elder 已提交
4718
	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4719
	if (IS_ERR(snap_name))
A
Alex Elder 已提交
4720 4721
		goto out;

4722
	dout("  snap_id 0x%016llx snap_name = %s\n",
4723
		(unsigned long long)snap_id, snap_name);
A
Alex Elder 已提交
4724 4725 4726
out:
	kfree(reply_buf);

4727
	return snap_name;
A
Alex Elder 已提交
4728 4729
}

4730
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
4731
{
4732
	bool first_time = rbd_dev->header.object_prefix == NULL;
A
Alex Elder 已提交
4733 4734
	int ret;

4735 4736
	ret = rbd_dev_v2_image_size(rbd_dev);
	if (ret)
4737
		return ret;
4738

4739 4740 4741
	if (first_time) {
		ret = rbd_dev_v2_header_onetime(rbd_dev);
		if (ret)
4742
			return ret;
4743 4744
	}

A
Alex Elder 已提交
4745
	ret = rbd_dev_v2_snap_context(rbd_dev);
4746 4747 4748 4749
	if (ret && first_time) {
		kfree(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	}
A
Alex Elder 已提交
4750 4751 4752 4753

	return ret;
}

4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_header_info(rbd_dev);

	return rbd_dev_v2_header_info(rbd_dev);
}

4764 4765 4766
/*
 * Skips over white space at *buf, and updates *buf to point to the
 * first found non-space character (if any). Returns the length of
A
Alex Elder 已提交
4767 4768
 * the token (string of non-white space characters) found.  Note
 * that *buf must be terminated with '\0'.
4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
 */
static inline size_t next_token(const char **buf)
{
        /*
        * These are the characters that produce nonzero for
        * isspace() in the "C" and "POSIX" locales.
        */
        const char *spaces = " \f\n\r\t\v";

        *buf += strspn(*buf, spaces);	/* Find start of token */

	return strcspn(*buf, spaces);   /* Return token length */
}

A
Alex Elder 已提交
4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804
/*
 * Finds the next token in *buf, dynamically allocates a buffer big
 * enough to hold a copy of it, and copies the token into the new
 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
 * that a duplicate buffer is created even for a zero-length token.
 *
 * Returns a pointer to the newly-allocated duplicate, or a null
 * pointer if memory for the duplicate was not available.  If
 * the lenp argument is a non-null pointer, the length of the token
 * (not including the '\0') is returned in *lenp.
 *
 * If successful, the *buf pointer will be updated to point beyond
 * the end of the found token.
 *
 * Note: uses GFP_KERNEL for allocation.
 */
static inline char *dup_token(const char **buf, size_t *lenp)
{
	char *dup;
	size_t len;

	len = next_token(buf);
A
Alex Elder 已提交
4805
	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
A
Alex Elder 已提交
4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
	if (!dup)
		return NULL;
	*(dup + len) = '\0';
	*buf += len;

	if (lenp)
		*lenp = len;

	return dup;
}

4817
/*
4818 4819 4820 4821
 * Parse the options provided for an "rbd add" (i.e., rbd image
 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
 * and the data written is passed here via a NUL-terminated buffer.
 * Returns 0 if successful or an error code otherwise.
A
Alex Elder 已提交
4822
 *
4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
 * The information extracted from these options is recorded in
 * the other parameters which return dynamically-allocated
 * structures:
 *  ceph_opts
 *      The address of a pointer that will refer to a ceph options
 *      structure.  Caller must release the returned pointer using
 *      ceph_destroy_options() when it is no longer needed.
 *  rbd_opts
 *	Address of an rbd options pointer.  Fully initialized by
 *	this function; caller must release with kfree().
 *  spec
 *	Address of an rbd image specification pointer.  Fully
 *	initialized by this function based on parsed options.
 *	Caller must release with rbd_spec_put().
 *
 * The options passed take this form:
 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
 * where:
 *  <mon_addrs>
 *      A comma-separated list of one or more monitor addresses.
 *      A monitor address is an ip address, optionally followed
 *      by a port number (separated by a colon).
 *        I.e.:  ip1[:port1][,ip2[:port2]...]
 *  <options>
 *      A comma-separated list of ceph and/or rbd options.
 *  <pool_name>
 *      The name of the rados pool containing the rbd image.
 *  <image_name>
 *      The name of the image in that pool to map.
 *  <snap_id>
 *      An optional snapshot id.  If provided, the mapping will
 *      present data from the image at the time that snapshot was
 *      created.  The image head is used if no snapshot id is
 *      provided.  Snapshot mappings are always read-only.
4857
 */
4858
static int rbd_add_parse_args(const char *buf,
4859
				struct ceph_options **ceph_opts,
4860 4861
				struct rbd_options **opts,
				struct rbd_spec **rbd_spec)
4862
{
A
Alex Elder 已提交
4863
	size_t len;
4864
	char *options;
4865
	const char *mon_addrs;
4866
	char *snap_name;
4867
	size_t mon_addrs_size;
4868
	struct rbd_spec *spec = NULL;
4869
	struct rbd_options *rbd_opts = NULL;
4870
	struct ceph_options *copts;
4871
	int ret;
4872 4873 4874

	/* The first four tokens are required */

4875
	len = next_token(&buf);
4876 4877 4878 4879
	if (!len) {
		rbd_warn(NULL, "no monitor address(es) provided");
		return -EINVAL;
	}
4880
	mon_addrs = buf;
4881
	mon_addrs_size = len + 1;
4882
	buf += len;
4883

4884
	ret = -EINVAL;
4885 4886
	options = dup_token(&buf, NULL);
	if (!options)
4887
		return -ENOMEM;
4888 4889 4890 4891
	if (!*options) {
		rbd_warn(NULL, "no options provided");
		goto out_err;
	}
4892

4893 4894
	spec = rbd_spec_alloc();
	if (!spec)
4895
		goto out_mem;
4896 4897 4898 4899

	spec->pool_name = dup_token(&buf, NULL);
	if (!spec->pool_name)
		goto out_mem;
4900 4901 4902 4903
	if (!*spec->pool_name) {
		rbd_warn(NULL, "no pool name provided");
		goto out_err;
	}
4904

A
Alex Elder 已提交
4905
	spec->image_name = dup_token(&buf, NULL);
4906
	if (!spec->image_name)
4907
		goto out_mem;
4908 4909 4910 4911
	if (!*spec->image_name) {
		rbd_warn(NULL, "no image name provided");
		goto out_err;
	}
4912

4913 4914 4915 4916
	/*
	 * Snapshot name is optional; default is to use "-"
	 * (indicating the head/no snapshot).
	 */
4917
	len = next_token(&buf);
4918
	if (!len) {
4919 4920
		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4921
	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4922
		ret = -ENAMETOOLONG;
4923
		goto out_err;
4924
	}
4925 4926
	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
	if (!snap_name)
4927
		goto out_mem;
4928 4929
	*(snap_name + len) = '\0';
	spec->snap_name = snap_name;
A
Alex Elder 已提交
4930

4931
	/* Initialize all rbd options to the defaults */
4932

4933 4934 4935 4936 4937
	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
	if (!rbd_opts)
		goto out_mem;

	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
I
Ilya Dryomov 已提交
4938
	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
A
Alex Elder 已提交
4939

4940
	copts = ceph_parse_options(options, mon_addrs,
4941
					mon_addrs + mon_addrs_size - 1,
4942
					parse_rbd_opts_token, rbd_opts);
4943 4944
	if (IS_ERR(copts)) {
		ret = PTR_ERR(copts);
4945 4946
		goto out_err;
	}
4947 4948 4949
	kfree(options);

	*ceph_opts = copts;
4950
	*opts = rbd_opts;
4951
	*rbd_spec = spec;
4952

4953
	return 0;
4954
out_mem:
4955
	ret = -ENOMEM;
A
Alex Elder 已提交
4956
out_err:
4957 4958
	kfree(rbd_opts);
	rbd_spec_put(spec);
4959
	kfree(options);
A
Alex Elder 已提交
4960

4961
	return ret;
4962 4963
}

4964 4965 4966 4967 4968
/*
 * Return pool id (>= 0) or a negative error code.
 */
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
4969
	struct ceph_options *opts = rbdc->client->options;
4970 4971 4972 4973 4974 4975 4976
	u64 newest_epoch;
	int tries = 0;
	int ret;

again:
	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
	if (ret == -ENOENT && tries++ < 1) {
4977 4978
		ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
					    &newest_epoch);
4979 4980 4981 4982
		if (ret < 0)
			return ret;

		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4983
			ceph_osdc_maybe_request_map(&rbdc->client->osdc);
4984
			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4985 4986
						     newest_epoch,
						     opts->mount_timeout);
4987 4988 4989 4990 4991 4992 4993 4994 4995 4996
			goto again;
		} else {
			/* the osdmap we have is new enough */
			return -ENOENT;
		}
	}

	return ret;
}

A
Alex Elder 已提交
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016
/*
 * An rbd format 2 image has a unique identifier, distinct from the
 * name given to it by the user.  Internally, that identifier is
 * what's used to specify the names of objects related to the image.
 *
 * A special "rbd id" object is used to map an rbd image name to its
 * id.  If that object doesn't exist, then there is no v2 rbd image
 * with the supplied name.
 *
 * This function will record the given rbd_dev's image_id field if
 * it can be determined, and in that case will return 0.  If any
 * errors occur a negative errno will be returned and the rbd_dev's
 * image_id field will be unchanged (and should be NULL).
 */
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
	int ret;
	size_t size;
	char *object_name;
	void *response;
5017
	char *image_id;
5018

A
Alex Elder 已提交
5019 5020 5021
	/*
	 * When probing a parent image, the image id is already
	 * known (and the image name likely is not).  There's no
5022 5023
	 * need to fetch the image id again in this case.  We
	 * do still need to set the image format though.
A
Alex Elder 已提交
5024
	 */
5025 5026 5027
	if (rbd_dev->spec->image_id) {
		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;

A
Alex Elder 已提交
5028
		return 0;
5029
	}
A
Alex Elder 已提交
5030

A
Alex Elder 已提交
5031 5032 5033 5034
	/*
	 * First, see if the format 2 image id file exists, and if
	 * so, get the image's persistent id from it.
	 */
A
Alex Elder 已提交
5035
	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
A
Alex Elder 已提交
5036 5037 5038
	object_name = kmalloc(size, GFP_NOIO);
	if (!object_name)
		return -ENOMEM;
5039
	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
A
Alex Elder 已提交
5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050
	dout("rbd id object name is %s\n", object_name);

	/* Response will be an encoded string, which includes a length */

	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
	response = kzalloc(size, GFP_NOIO);
	if (!response) {
		ret = -ENOMEM;
		goto out;
	}

5051 5052
	/* If it doesn't exist we'll assume it's a format 1 image */

5053
	ret = rbd_obj_method_sync(rbd_dev, object_name,
5054
				"rbd", "get_id", NULL, 0,
5055
				response, RBD_IMAGE_ID_LEN_MAX);
5056
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5057 5058 5059 5060 5061
	if (ret == -ENOENT) {
		image_id = kstrdup("", GFP_KERNEL);
		ret = image_id ? 0 : -ENOMEM;
		if (!ret)
			rbd_dev->image_format = 1;
5062
	} else if (ret >= 0) {
5063 5064 5065
		void *p = response;

		image_id = ceph_extract_encoded_string(&p, p + ret,
A
Alex Elder 已提交
5066
						NULL, GFP_NOIO);
5067
		ret = PTR_ERR_OR_ZERO(image_id);
5068 5069 5070 5071 5072 5073 5074
		if (!ret)
			rbd_dev->image_format = 2;
	}

	if (!ret) {
		rbd_dev->spec->image_id = image_id;
		dout("image_id is %s\n", image_id);
A
Alex Elder 已提交
5075 5076 5077 5078 5079 5080 5081 5082
	}
out:
	kfree(response);
	kfree(object_name);

	return ret;
}

A
Alex Elder 已提交
5083 5084 5085 5086
/*
 * Undo whatever state changes are made by v1 or v2 header info
 * call.
 */
A
Alex Elder 已提交
5087 5088 5089 5090
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
	struct rbd_image_header	*header;

5091
	rbd_dev_parent_put(rbd_dev);
A
Alex Elder 已提交
5092 5093 5094 5095

	/* Free dynamic fields from the header, then zero it out */

	header = &rbd_dev->header;
5096
	ceph_put_snap_context(header->snapc);
A
Alex Elder 已提交
5097 5098 5099 5100 5101 5102
	kfree(header->snap_sizes);
	kfree(header->snap_names);
	kfree(header->object_prefix);
	memset(header, 0, sizeof (*header));
}

5103
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5104 5105 5106
{
	int ret;

5107
	ret = rbd_dev_v2_object_prefix(rbd_dev);
5108
	if (ret)
5109 5110
		goto out_err;

5111 5112 5113 5114
	/*
	 * Get the and check features for the image.  Currently the
	 * features are assumed to never change.
	 */
5115
	ret = rbd_dev_v2_features(rbd_dev);
5116
	if (ret)
5117
		goto out_err;
5118

5119 5120 5121 5122 5123 5124 5125
	/* If the image supports fancy striping, get its parameters */

	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
		ret = rbd_dev_v2_striping_info(rbd_dev);
		if (ret < 0)
			goto out_err;
	}
5126
	/* No support for crypto and compression type format 2 images */
5127

A
Alex Elder 已提交
5128
	return 0;
5129
out_err:
A
Alex Elder 已提交
5130
	rbd_dev->header.features = 0;
5131 5132
	kfree(rbd_dev->header.object_prefix);
	rbd_dev->header.object_prefix = NULL;
5133 5134

	return ret;
5135 5136
}

5137 5138 5139 5140 5141 5142
/*
 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
 * rbd_dev_image_probe() recursion depth, which means it's also the
 * length of the already discovered part of the parent chain.
 */
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
A
Alex Elder 已提交
5143
{
5144
	struct rbd_device *parent = NULL;
5145 5146 5147 5148 5149
	int ret;

	if (!rbd_dev->parent_spec)
		return 0;

5150 5151 5152 5153 5154 5155
	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
		pr_info("parent chain is too long (%d)\n", depth);
		ret = -EINVAL;
		goto out_err;
	}

5156
	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5157 5158
	if (!parent) {
		ret = -ENOMEM;
5159
		goto out_err;
5160 5161 5162 5163 5164 5165 5166 5167
	}

	/*
	 * Images related by parent/child relationships always share
	 * rbd_client and spec/parent_spec, so bump their refcounts.
	 */
	__rbd_get_client(rbd_dev->rbd_client);
	rbd_spec_get(rbd_dev->parent_spec);
5168

5169
	ret = rbd_dev_image_probe(parent, depth);
5170 5171
	if (ret < 0)
		goto out_err;
5172

5173
	rbd_dev->parent = parent;
5174
	atomic_set(&rbd_dev->parent_ref, 1);
5175
	return 0;
5176

5177
out_err:
5178
	rbd_dev_unparent(rbd_dev);
5179
	rbd_dev_destroy(parent);
5180 5181 5182
	return ret;
}

I
Ilya Dryomov 已提交
5183 5184 5185 5186
/*
 * rbd_dev->header_rwsem must be locked for write and will be unlocked
 * upon return.
 */
5187
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5188
{
A
Alex Elder 已提交
5189
	int ret;
A
Alex Elder 已提交
5190

5191
	/* Record our major and minor device numbers. */
A
Alex Elder 已提交
5192

5193 5194 5195
	if (!single_major) {
		ret = register_blkdev(0, rbd_dev->name);
		if (ret < 0)
5196
			goto err_out_unlock;
5197 5198 5199 5200 5201 5202 5203

		rbd_dev->major = ret;
		rbd_dev->minor = 0;
	} else {
		rbd_dev->major = rbd_major;
		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
	}
A
Alex Elder 已提交
5204 5205 5206 5207 5208 5209 5210

	/* Set up the blkdev mapping. */

	ret = rbd_init_disk(rbd_dev);
	if (ret)
		goto err_out_blkdev;

5211
	ret = rbd_dev_mapping_set(rbd_dev);
A
Alex Elder 已提交
5212 5213
	if (ret)
		goto err_out_disk;
I
Ilya Dryomov 已提交
5214

5215
	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5216
	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5217

5218 5219
	dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
	ret = device_add(&rbd_dev->dev);
5220
	if (ret)
5221
		goto err_out_mapping;
A
Alex Elder 已提交
5222 5223 5224

	/* Everything's ready.  Announce the disk to the world. */

5225
	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
I
Ilya Dryomov 已提交
5226
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5227

5228 5229 5230 5231
	spin_lock(&rbd_dev_list_lock);
	list_add_tail(&rbd_dev->node, &rbd_dev_list);
	spin_unlock(&rbd_dev_list_lock);

I
Ilya Dryomov 已提交
5232
	add_disk(rbd_dev->disk);
A
Alex Elder 已提交
5233 5234 5235 5236
	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
		(unsigned long long) rbd_dev->mapping.size);

	return ret;
5237

5238 5239
err_out_mapping:
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5240 5241 5242
err_out_disk:
	rbd_free_disk(rbd_dev);
err_out_blkdev:
5243 5244
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
I
Ilya Dryomov 已提交
5245 5246
err_out_unlock:
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5247 5248 5249
	return ret;
}

A
Alex Elder 已提交
5250 5251 5252
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;
5253
	int ret;
A
Alex Elder 已提交
5254 5255 5256 5257 5258

	/* Record the header object name for this rbd image. */

	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

5259
	rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
A
Alex Elder 已提交
5260
	if (rbd_dev->image_format == 1)
5261 5262
		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
				       spec->image_name, RBD_SUFFIX);
A
Alex Elder 已提交
5263
	else
5264 5265
		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
				       RBD_HEADER_PREFIX, spec->image_id);
A
Alex Elder 已提交
5266

5267
	return ret;
A
Alex Elder 已提交
5268 5269
}

5270 5271
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5272 5273 5274 5275 5276
	rbd_dev_unprobe(rbd_dev);
	rbd_dev->image_format = 0;
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;

5277 5278 5279
	rbd_dev_destroy(rbd_dev);
}

5280 5281
/*
 * Probe for the existence of the header object for the given rbd
5282 5283 5284
 * device.  If this image is the one being mapped (i.e., not a
 * parent), initiate a watch on its header object before using that
 * object to get detailed information about the rbd image.
5285
 */
5286
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5287 5288 5289 5290
{
	int ret;

	/*
A
Alex Elder 已提交
5291 5292 5293 5294
	 * Get the id from the image id object.  Unless there's an
	 * error, rbd_dev->spec->image_id will be filled in with
	 * a dynamically-allocated string, and rbd_dev->image_format
	 * will be set to either 1 or 2.
5295 5296 5297
	 */
	ret = rbd_dev_image_id(rbd_dev);
	if (ret)
5298 5299
		return ret;

A
Alex Elder 已提交
5300 5301 5302 5303
	ret = rbd_dev_header_name(rbd_dev);
	if (ret)
		goto err_out_format;

5304
	if (!depth) {
5305
		ret = rbd_register_watch(rbd_dev);
5306 5307 5308 5309 5310
		if (ret) {
			if (ret == -ENOENT)
				pr_info("image %s/%s does not exist\n",
					rbd_dev->spec->pool_name,
					rbd_dev->spec->image_name);
5311
			goto err_out_format;
5312
		}
5313
	}
5314

5315
	ret = rbd_dev_header_info(rbd_dev);
5316
	if (ret)
5317
		goto err_out_watch;
A
Alex Elder 已提交
5318

5319 5320 5321 5322 5323 5324
	/*
	 * If this image is the one being mapped, we have pool name and
	 * id, image name and id, and snap name - need to fill snap id.
	 * Otherwise this is a parent image, identified by pool, image
	 * and snap ids - need to fill in names for those ids.
	 */
5325
	if (!depth)
5326 5327 5328
		ret = rbd_spec_fill_snap_id(rbd_dev);
	else
		ret = rbd_spec_fill_names(rbd_dev);
5329 5330 5331 5332 5333 5334
	if (ret) {
		if (ret == -ENOENT)
			pr_info("snap %s/%s@%s does not exist\n",
				rbd_dev->spec->pool_name,
				rbd_dev->spec->image_name,
				rbd_dev->spec->snap_name);
A
Alex Elder 已提交
5335
		goto err_out_probe;
5336
	}
5337

5338 5339 5340 5341 5342 5343 5344 5345 5346
	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			goto err_out_probe;

		/*
		 * Need to warn users if this image is the one being
		 * mapped and has a parent.
		 */
5347
		if (!depth && rbd_dev->parent_spec)
5348 5349 5350 5351
			rbd_warn(rbd_dev,
				 "WARNING: kernel layering is EXPERIMENTAL!");
	}

5352
	ret = rbd_dev_probe_parent(rbd_dev, depth);
A
Alex Elder 已提交
5353 5354 5355 5356
	if (ret)
		goto err_out_probe;

	dout("discovered format %u image, header name is %s\n",
5357
		rbd_dev->image_format, rbd_dev->header_oid.name);
A
Alex Elder 已提交
5358
	return 0;
5359

A
Alex Elder 已提交
5360 5361
err_out_probe:
	rbd_dev_unprobe(rbd_dev);
5362
err_out_watch:
5363
	if (!depth)
5364
		rbd_unregister_watch(rbd_dev);
A
Alex Elder 已提交
5365 5366
err_out_format:
	rbd_dev->image_format = 0;
5367 5368
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;
5369 5370 5371
	return ret;
}

5372 5373 5374
static ssize_t do_rbd_add(struct bus_type *bus,
			  const char *buf,
			  size_t count)
5375
{
5376
	struct rbd_device *rbd_dev = NULL;
5377
	struct ceph_options *ceph_opts = NULL;
5378
	struct rbd_options *rbd_opts = NULL;
5379
	struct rbd_spec *spec = NULL;
5380
	struct rbd_client *rbdc;
5381
	bool read_only;
5382
	int rc;
5383 5384 5385 5386 5387

	if (!try_module_get(THIS_MODULE))
		return -ENODEV;

	/* parse add command */
5388
	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5389
	if (rc < 0)
5390
		goto out;
5391

5392 5393 5394
	rbdc = rbd_get_client(ceph_opts);
	if (IS_ERR(rbdc)) {
		rc = PTR_ERR(rbdc);
5395
		goto err_out_args;
5396
	}
5397 5398

	/* pick the pool */
5399
	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5400 5401 5402
	if (rc < 0) {
		if (rc == -ENOENT)
			pr_info("pool %s does not exist\n", spec->pool_name);
5403
		goto err_out_client;
5404
	}
A
Alex Elder 已提交
5405
	spec->pool_id = (u64)rc;
5406

5407
	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5408 5409
	if (!rbd_dev) {
		rc = -ENOMEM;
5410
		goto err_out_client;
5411
	}
5412 5413
	rbdc = NULL;		/* rbd_dev now owns this */
	spec = NULL;		/* rbd_dev now owns this */
5414
	rbd_opts = NULL;	/* rbd_dev now owns this */
5415

I
Ilya Dryomov 已提交
5416
	down_write(&rbd_dev->header_rwsem);
5417
	rc = rbd_dev_image_probe(rbd_dev, 0);
5418
	if (rc < 0)
5419
		goto err_out_rbd_dev;
5420

5421 5422
	/* If we are mapping a snapshot it must be marked read-only */

5423
	read_only = rbd_dev->opts->read_only;
5424 5425 5426 5427
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
		read_only = true;
	rbd_dev->mapping.read_only = read_only;

5428
	rc = rbd_dev_device_setup(rbd_dev);
A
Alex Elder 已提交
5429
	if (rc) {
5430
		/*
5431
		 * rbd_unregister_watch() can't be moved into
5432 5433 5434
		 * rbd_dev_image_release() without refactoring, see
		 * commit 1f3ef78861ac.
		 */
5435
		rbd_unregister_watch(rbd_dev);
A
Alex Elder 已提交
5436
		rbd_dev_image_release(rbd_dev);
5437
		goto out;
A
Alex Elder 已提交
5438 5439
	}

5440 5441 5442 5443
	rc = count;
out:
	module_put(THIS_MODULE);
	return rc;
5444

5445
err_out_rbd_dev:
I
Ilya Dryomov 已提交
5446
	up_write(&rbd_dev->header_rwsem);
5447
	rbd_dev_destroy(rbd_dev);
5448
err_out_client:
5449
	rbd_put_client(rbdc);
5450
err_out_args:
5451
	rbd_spec_put(spec);
5452
	kfree(rbd_opts);
5453
	goto out;
5454 5455
}

5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472
static ssize_t rbd_add(struct bus_type *bus,
		       const char *buf,
		       size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_add(bus, buf, count);
}

static ssize_t rbd_add_single_major(struct bus_type *bus,
				    const char *buf,
				    size_t count)
{
	return do_rbd_add(bus, buf, count);
}

5473
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5474 5475
{
	rbd_free_disk(rbd_dev);
5476 5477 5478 5479 5480

	spin_lock(&rbd_dev_list_lock);
	list_del_init(&rbd_dev->node);
	spin_unlock(&rbd_dev_list_lock);

5481
	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5482
	device_del(&rbd_dev->dev);
A
Alex Elder 已提交
5483
	rbd_dev_mapping_clear(rbd_dev);
5484 5485
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
5486 5487
}

5488 5489
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5490
	while (rbd_dev->parent) {
5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502
		struct rbd_device *first = rbd_dev;
		struct rbd_device *second = first->parent;
		struct rbd_device *third;

		/*
		 * Follow to the parent with no grandparent and
		 * remove it.
		 */
		while (second && (third = second->parent)) {
			first = second;
			second = third;
		}
A
Alex Elder 已提交
5503
		rbd_assert(second);
5504
		rbd_dev_image_release(second);
A
Alex Elder 已提交
5505 5506 5507 5508
		first->parent = NULL;
		first->parent_overlap = 0;

		rbd_assert(first->parent_spec);
5509 5510 5511 5512 5513
		rbd_spec_put(first->parent_spec);
		first->parent_spec = NULL;
	}
}

5514 5515 5516
static ssize_t do_rbd_remove(struct bus_type *bus,
			     const char *buf,
			     size_t count)
5517 5518
{
	struct rbd_device *rbd_dev = NULL;
5519 5520
	struct list_head *tmp;
	int dev_id;
5521
	unsigned long ul;
5522
	bool already = false;
5523
	int ret;
5524

5525
	ret = kstrtoul(buf, 10, &ul);
5526 5527
	if (ret)
		return ret;
5528 5529

	/* convert to int; abort if we lost anything in the conversion */
5530 5531
	dev_id = (int)ul;
	if (dev_id != ul)
5532 5533
		return -EINVAL;

5534 5535 5536 5537 5538 5539 5540 5541
	ret = -ENOENT;
	spin_lock(&rbd_dev_list_lock);
	list_for_each(tmp, &rbd_dev_list) {
		rbd_dev = list_entry(tmp, struct rbd_device, node);
		if (rbd_dev->dev_id == dev_id) {
			ret = 0;
			break;
		}
5542
	}
5543 5544 5545 5546 5547
	if (!ret) {
		spin_lock_irq(&rbd_dev->lock);
		if (rbd_dev->open_count)
			ret = -EBUSY;
		else
5548 5549
			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
							&rbd_dev->flags);
5550 5551 5552
		spin_unlock_irq(&rbd_dev->lock);
	}
	spin_unlock(&rbd_dev_list_lock);
5553
	if (ret < 0 || already)
5554
		return ret;
5555

5556
	rbd_unregister_watch(rbd_dev);
5557

5558 5559 5560 5561 5562 5563
	/*
	 * Don't free anything from rbd_dev->disk until after all
	 * notifies are completely processed. Otherwise
	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
	 * in a potential use after free of rbd_dev->disk or rbd_dev.
	 */
5564
	rbd_dev_device_release(rbd_dev);
5565
	rbd_dev_image_release(rbd_dev);
A
Alex Elder 已提交
5566

5567
	return count;
5568 5569
}

5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586
static ssize_t rbd_remove(struct bus_type *bus,
			  const char *buf,
			  size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_remove(bus, buf, count);
}

static ssize_t rbd_remove_single_major(struct bus_type *bus,
				       const char *buf,
				       size_t count)
{
	return do_rbd_remove(bus, buf, count);
}

5587 5588
/*
 * create control files in sysfs
5589
 * /sys/bus/rbd/...
5590 5591 5592
 */
static int rbd_sysfs_init(void)
{
5593
	int ret;
5594

5595
	ret = device_register(&rbd_root_dev);
A
Alex Elder 已提交
5596
	if (ret < 0)
5597
		return ret;
5598

5599 5600 5601
	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
		device_unregister(&rbd_root_dev);
5602 5603 5604 5605 5606 5607

	return ret;
}

static void rbd_sysfs_cleanup(void)
{
5608
	bus_unregister(&rbd_bus_type);
5609
	device_unregister(&rbd_root_dev);
5610 5611
}

5612 5613 5614
static int rbd_slab_init(void)
{
	rbd_assert(!rbd_img_request_cache);
G
Geliang Tang 已提交
5615
	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
5616 5617 5618 5619
	if (!rbd_img_request_cache)
		return -ENOMEM;

	rbd_assert(!rbd_obj_request_cache);
G
Geliang Tang 已提交
5620
	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
5621 5622 5623 5624 5625
	if (!rbd_obj_request_cache)
		goto out_err;

	rbd_assert(!rbd_segment_name_cache);
	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5626
					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5627
	if (rbd_segment_name_cache)
5628
		return 0;
5629
out_err:
5630 5631
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;
5632

5633 5634 5635
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;

5636 5637 5638 5639 5640
	return -ENOMEM;
}

static void rbd_slab_exit(void)
{
5641 5642 5643 5644
	rbd_assert(rbd_segment_name_cache);
	kmem_cache_destroy(rbd_segment_name_cache);
	rbd_segment_name_cache = NULL;

5645 5646 5647 5648
	rbd_assert(rbd_obj_request_cache);
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;

5649 5650 5651 5652 5653
	rbd_assert(rbd_img_request_cache);
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;
}

A
Alex Elder 已提交
5654
static int __init rbd_init(void)
5655 5656 5657
{
	int rc;

5658 5659 5660 5661
	if (!libceph_compatible(NULL)) {
		rbd_warn(NULL, "libceph incompatibility (quitting)");
		return -EINVAL;
	}
I
Ilya Dryomov 已提交
5662

5663
	rc = rbd_slab_init();
5664 5665
	if (rc)
		return rc;
I
Ilya Dryomov 已提交
5666

5667 5668
	/*
	 * The number of active work items is limited by the number of
I
Ilya Dryomov 已提交
5669
	 * rbd devices * queue depth, so leave @max_active at default.
5670 5671 5672 5673 5674 5675 5676
	 */
	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
	if (!rbd_wq) {
		rc = -ENOMEM;
		goto err_out_slab;
	}

5677 5678 5679 5680
	if (single_major) {
		rbd_major = register_blkdev(0, RBD_DRV_NAME);
		if (rbd_major < 0) {
			rc = rbd_major;
5681
			goto err_out_wq;
5682 5683 5684
		}
	}

5685 5686
	rc = rbd_sysfs_init();
	if (rc)
5687 5688 5689 5690 5691 5692
		goto err_out_blkdev;

	if (single_major)
		pr_info("loaded (major %d)\n", rbd_major);
	else
		pr_info("loaded\n");
5693

I
Ilya Dryomov 已提交
5694 5695
	return 0;

5696 5697 5698
err_out_blkdev:
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5699 5700
err_out_wq:
	destroy_workqueue(rbd_wq);
I
Ilya Dryomov 已提交
5701 5702
err_out_slab:
	rbd_slab_exit();
5703
	return rc;
5704 5705
}

A
Alex Elder 已提交
5706
static void __exit rbd_exit(void)
5707
{
I
Ilya Dryomov 已提交
5708
	ida_destroy(&rbd_dev_id_ida);
5709
	rbd_sysfs_cleanup();
5710 5711
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5712
	destroy_workqueue(rbd_wq);
5713
	rbd_slab_exit();
5714 5715 5716 5717 5718
}

module_init(rbd_init);
module_exit(rbd_exit);

A
Alex Elder 已提交
5719
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5720 5721 5722 5723 5724
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");

5725
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5726
MODULE_LICENSE("GPL");