rbd.c 142.9 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
   rbd.c -- Export ceph rados objects as a Linux block device


   based on drivers/block/osdblk.c:

   Copyright 2009 Red Hat, Inc.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.



25
   For usage instructions, please refer to:
26

27
                 Documentation/ABI/testing/sysfs-bus-rbd
28 29 30 31 32 33 34

 */

#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
35
#include <linux/parser.h>
36
#include <linux/bsearch.h>
37 38 39 40

#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
C
Christoph Hellwig 已提交
41
#include <linux/blk-mq.h>
42 43
#include <linux/fs.h>
#include <linux/blkdev.h>
44
#include <linux/slab.h>
45
#include <linux/idr.h>
I
Ilya Dryomov 已提交
46
#include <linux/workqueue.h>
47 48 49

#include "rbd_types.h"

A
Alex Elder 已提交
50 51
#define RBD_DEBUG	/* Activate rbd_assert() calls */

A
Alex Elder 已提交
52 53 54 55 56 57 58 59 60
/*
 * The basic unit of block I/O is a sector.  It is interpreted in a
 * number of contexts in Linux (blk, bio, genhd), but the default is
 * universally 512 bytes.  These symbols are just slightly more
 * meaningful than the bare numbers they represent.
 */
#define	SECTOR_SHIFT	9
#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Increment the given counter and return its updated value.
 * If the counter is already 0 it will not be incremented.
 * If the counter is already at its maximum value returns
 * -EINVAL without updating it.
 */
static int atomic_inc_return_safe(atomic_t *v)
{
	unsigned int counter;

	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
	if (counter <= (unsigned int)INT_MAX)
		return (int)counter;

	atomic_dec(v);

	return -EINVAL;
}

/* Decrement the counter.  Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
	int counter;

	counter = atomic_dec_return(v);
	if (counter >= 0)
		return counter;

	atomic_inc(v);

	return -EINVAL;
}

A
Alex Elder 已提交
94
#define RBD_DRV_NAME "rbd"
95

96 97
#define RBD_MINORS_PER_MAJOR		256
#define RBD_SINGLE_MAJOR_PART_SHIFT	4
98

99 100
#define RBD_MAX_PARENT_CHAIN_LEN	16

101 102 103 104
#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
#define RBD_MAX_SNAP_NAME_LEN	\
			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))

105
#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
106 107 108

#define RBD_SNAP_HEAD_NAME	"-"

109 110
#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */

111 112
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
113
#define RBD_IMAGE_ID_LEN_MAX	64
114

115
#define RBD_OBJ_PREFIX_LEN_MAX	64
A
Alex Elder 已提交
116

A
Alex Elder 已提交
117 118
/* Feature bits */

A
Alex Elder 已提交
119 120 121 122
#define RBD_FEATURE_LAYERING	(1<<0)
#define RBD_FEATURE_STRIPINGV2	(1<<1)
#define RBD_FEATURES_ALL \
	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
A
Alex Elder 已提交
123 124 125

/* Features supported by this (client software) implementation. */

126
#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
A
Alex Elder 已提交
127

A
Alex Elder 已提交
128 129 130 131
/*
 * An RBD device name will be "rbd#", where the "rbd" comes from
 * RBD_DRV_NAME above, and # is a unique integer identifier.
 */
132 133 134 135 136 137
#define DEV_NAME_LEN		32

/*
 * block device image metadata (in-memory version)
 */
struct rbd_image_header {
138
	/* These six fields never change for a given rbd image */
139
	char *object_prefix;
140 141 142
	__u8 obj_order;
	__u8 crypt_type;
	__u8 comp_type;
143 144 145
	u64 stripe_unit;
	u64 stripe_count;
	u64 features;		/* Might be changeable someday? */
146

A
Alex Elder 已提交
147 148 149
	/* The remaining fields need to be updated occasionally */
	u64 image_size;
	struct ceph_snap_context *snapc;
150 151
	char *snap_names;	/* format 1 only */
	u64 *snap_sizes;	/* format 1 only */
152 153
};

154 155 156 157
/*
 * An rbd image specification.
 *
 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
A
Alex Elder 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
 * identify an image.  Each rbd_dev structure includes a pointer to
 * an rbd_spec structure that encapsulates this identity.
 *
 * Each of the id's in an rbd_spec has an associated name.  For a
 * user-mapped image, the names are supplied and the id's associated
 * with them are looked up.  For a layered image, a parent image is
 * defined by the tuple, and the names are looked up.
 *
 * An rbd_dev structure contains a parent_spec pointer which is
 * non-null if the image it represents is a child in a layered
 * image.  This pointer will refer to the rbd_spec structure used
 * by the parent rbd_dev for its own identity (i.e., the structure
 * is shared between the parent and child).
 *
 * Since these structures are populated once, during the discovery
 * phase of image construction, they are effectively immutable so
 * we make no effort to synchronize access to them.
 *
 * Note that code herein does not assume the image name is known (it
 * could be a null pointer).
178 179 180
 */
struct rbd_spec {
	u64		pool_id;
181
	const char	*pool_name;
182

183 184
	const char	*image_id;
	const char	*image_name;
185 186

	u64		snap_id;
187
	const char	*snap_name;
188 189 190 191

	struct kref	kref;
};

192
/*
A
Alex Elder 已提交
193
 * an instance of the client.  multiple devices may share an rbd client.
194 195 196 197 198 199 200
 */
struct rbd_client {
	struct ceph_client	*client;
	struct kref		kref;
	struct list_head	node;
};

A
Alex Elder 已提交
201 202 203 204 205 206 207 208
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);

#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */

struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);

209 210 211
enum obj_request_type {
	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
A
Alex Elder 已提交
212

G
Guangliang Zhao 已提交
213 214 215
enum obj_operation_type {
	OBJ_OP_WRITE,
	OBJ_OP_READ,
216
	OBJ_OP_DISCARD,
G
Guangliang Zhao 已提交
217 218
};

219 220
enum obj_req_flags {
	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
221
	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
222 223
	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
224 225
};

A
Alex Elder 已提交
226 227 228 229
struct rbd_obj_request {
	const char		*object_name;
	u64			offset;		/* object start byte */
	u64			length;		/* bytes from offset */
230
	unsigned long		flags;
A
Alex Elder 已提交
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	/*
	 * An object request associated with an image will have its
	 * img_data flag set; a standalone object request will not.
	 *
	 * A standalone object request will have which == BAD_WHICH
	 * and a null obj_request pointer.
	 *
	 * An object request initiated in support of a layered image
	 * object (to check for its existence before a write) will
	 * have which == BAD_WHICH and a non-null obj_request pointer.
	 *
	 * Finally, an object request for rbd image data will have
	 * which != BAD_WHICH, and will have a non-null img_request
	 * pointer.  The value of which will be in the range
	 * 0..(img_request->obj_request_count-1).
	 */
	union {
		struct rbd_obj_request	*obj_request;	/* STAT op */
		struct {
			struct rbd_img_request	*img_request;
			u64			img_offset;
			/* links for img_request->obj_requests list */
			struct list_head	links;
		};
	};
A
Alex Elder 已提交
257 258 259
	u32			which;		/* posn image request list */

	enum obj_request_type	type;
260 261 262 263 264 265 266
	union {
		struct bio	*bio_list;
		struct {
			struct page	**pages;
			u32		page_count;
		};
	};
267
	struct page		**copyup_pages;
268
	u32			copyup_page_count;
A
Alex Elder 已提交
269 270 271 272

	struct ceph_osd_request	*osd_req;

	u64			xferred;	/* bytes transferred */
273
	int			result;
A
Alex Elder 已提交
274 275

	rbd_obj_callback_t	callback;
276
	struct completion	completion;
A
Alex Elder 已提交
277 278 279 280

	struct kref		kref;
};

A
Alex Elder 已提交
281
enum img_req_flags {
282 283
	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
284
	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
285
	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
A
Alex Elder 已提交
286 287
};

A
Alex Elder 已提交
288 289 290 291
struct rbd_img_request {
	struct rbd_device	*rbd_dev;
	u64			offset;	/* starting image byte offset */
	u64			length;	/* byte count from offset */
A
Alex Elder 已提交
292
	unsigned long		flags;
A
Alex Elder 已提交
293
	union {
294
		u64			snap_id;	/* for reads */
A
Alex Elder 已提交
295
		struct ceph_snap_context *snapc;	/* for writes */
296 297 298 299
	};
	union {
		struct request		*rq;		/* block request */
		struct rbd_obj_request	*obj_request;	/* obj req initiator */
A
Alex Elder 已提交
300
	};
301
	struct page		**copyup_pages;
302
	u32			copyup_page_count;
A
Alex Elder 已提交
303 304 305
	spinlock_t		completion_lock;/* protects next_completion */
	u32			next_completion;
	rbd_img_callback_t	callback;
306
	u64			xferred;/* aggregate bytes transferred */
307
	int			result;	/* first nonzero obj_request result */
A
Alex Elder 已提交
308 309 310 311 312 313 314 315

	u32			obj_request_count;
	struct list_head	obj_requests;	/* rbd_obj_request structs */

	struct kref		kref;
};

#define for_each_obj_request(ireq, oreq) \
316
	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
317
#define for_each_obj_request_from(ireq, oreq) \
318
	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
319
#define for_each_obj_request_safe(ireq, oreq, n) \
320
	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
321

A
Alex Elder 已提交
322
struct rbd_mapping {
A
Alex Elder 已提交
323
	u64                     size;
A
Alex Elder 已提交
324
	u64                     features;
A
Alex Elder 已提交
325 326 327
	bool			read_only;
};

328 329 330 331
/*
 * a single device
 */
struct rbd_device {
A
Alex Elder 已提交
332
	int			dev_id;		/* blkdev unique id */
333 334

	int			major;		/* blkdev assigned major */
335
	int			minor;
336 337
	struct gendisk		*disk;		/* blkdev's gendisk and rq */

338
	u32			image_format;	/* Either 1 or 2 */
339 340 341 342
	struct rbd_client	*rbd_client;

	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */

343
	spinlock_t		lock;		/* queue, flags, open_count */
344 345

	struct rbd_image_header	header;
346
	unsigned long		flags;		/* possibly lock protected */
347
	struct rbd_spec		*spec;
348
	struct rbd_options	*opts;
349

350
	struct ceph_object_id	header_oid;
351
	struct ceph_object_locator header_oloc;
352

353
	struct ceph_file_layout	layout;		/* used for all rbd requests */
354

355
	struct ceph_osd_linger_request *watch_handle;
356

357 358
	struct workqueue_struct	*task_wq;

359 360
	struct rbd_spec		*parent_spec;
	u64			parent_overlap;
361
	atomic_t		parent_ref;
362
	struct rbd_device	*parent;
363

C
Christoph Hellwig 已提交
364 365 366
	/* Block layer tags. */
	struct blk_mq_tag_set	tag_set;

367 368
	/* protects updating the header */
	struct rw_semaphore     header_rwsem;
A
Alex Elder 已提交
369 370

	struct rbd_mapping	mapping;
371 372

	struct list_head	node;
373 374 375

	/* sysfs related */
	struct device		dev;
376
	unsigned long		open_count;	/* protected by lock */
377 378
};

379 380 381 382 383 384 385
/*
 * Flag bits for rbd_dev->flags.  If atomicity is required,
 * rbd_dev->lock is used to protect access.
 *
 * Currently, only the "removing" flag (which is coupled with the
 * "open_count" field) requires atomic access.
 */
386 387
enum rbd_dev_flags {
	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
388
	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
389 390
};

391
static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
392

393
static LIST_HEAD(rbd_dev_list);    /* devices */
394 395
static DEFINE_SPINLOCK(rbd_dev_list_lock);

A
Alex Elder 已提交
396 397
static LIST_HEAD(rbd_client_list);		/* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
398

399 400
/* Slab caches for frequently-allocated structures */

401
static struct kmem_cache	*rbd_img_request_cache;
402
static struct kmem_cache	*rbd_obj_request_cache;
403
static struct kmem_cache	*rbd_segment_name_cache;
404

405
static int rbd_major;
406 407
static DEFINE_IDA(rbd_dev_id_ida);

408 409
static struct workqueue_struct *rbd_wq;

410 411 412 413 414 415 416 417
/*
 * Default to false for now, as single-major requires >= 0.75 version of
 * userspace rbd utility.
 */
static bool single_major = false;
module_param(single_major, bool, S_IRUGO);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");

418 419
static int rbd_img_request_submit(struct rbd_img_request *img_request);

A
Alex Elder 已提交
420 421 422 423
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
		       size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
			  size_t count);
424 425 426 427
static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
				    size_t count);
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
				       size_t count);
428
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
429
static void rbd_spec_put(struct rbd_spec *spec);
A
Alex Elder 已提交
430

431 432
static int rbd_dev_id_to_minor(int dev_id)
{
433
	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
434 435 436 437
}

static int minor_to_rbd_dev_id(int minor)
{
438
	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
439 440
}

441 442
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
443 444
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
445 446 447 448

static struct attribute *rbd_bus_attrs[] = {
	&bus_attr_add.attr,
	&bus_attr_remove.attr,
449 450
	&bus_attr_add_single_major.attr,
	&bus_attr_remove_single_major.attr,
451
	NULL,
A
Alex Elder 已提交
452
};
453 454 455 456

static umode_t rbd_bus_is_visible(struct kobject *kobj,
				  struct attribute *attr, int index)
{
457 458 459 460 461
	if (!single_major &&
	    (attr == &bus_attr_add_single_major.attr ||
	     attr == &bus_attr_remove_single_major.attr))
		return 0;

462 463 464 465 466 467 468 469
	return attr->mode;
}

static const struct attribute_group rbd_bus_group = {
	.attrs = rbd_bus_attrs,
	.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
A
Alex Elder 已提交
470 471 472

static struct bus_type rbd_bus_type = {
	.name		= "rbd",
473
	.bus_groups	= rbd_bus_groups,
A
Alex Elder 已提交
474 475 476 477 478 479 480 481 482 483 484
};

static void rbd_root_dev_release(struct device *dev)
{
}

static struct device rbd_root_dev = {
	.init_name =    "rbd",
	.release =      rbd_root_dev_release,
};

A
Alex Elder 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (!rbd_dev)
		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
	else if (rbd_dev->disk)
		printk(KERN_WARNING "%s: %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_name)
		printk(KERN_WARNING "%s: image %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_id)
		printk(KERN_WARNING "%s: id %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
	else	/* punt */
		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
			RBD_DRV_NAME, rbd_dev, &vaf);
	va_end(args);
}

A
Alex Elder 已提交
512 513 514 515 516 517 518 519 520 521 522 523
#ifdef RBD_DEBUG
#define rbd_assert(expr)						\
		if (unlikely(!(expr))) {				\
			printk(KERN_ERR "\nAssertion failure in %s() "	\
						"at line %d:\n\n"	\
					"\trbd_assert(%s);\n\n",	\
					__func__, __LINE__, #expr);	\
			BUG();						\
		}
#else /* !RBD_DEBUG */
#  define rbd_assert(expr)	((void) 0)
#endif /* !RBD_DEBUG */
524

I
Ilya Dryomov 已提交
525
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527 528
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
529

A
Alex Elder 已提交
530
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
531
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
532
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
533
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
534 535
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id);
536 537 538 539
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features);
540

541 542
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
A
Alex Elder 已提交
543
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
544
	bool removing = false;
545

A
Alex Elder 已提交
546
	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
547 548
		return -EROFS;

549
	spin_lock_irq(&rbd_dev->lock);
550 551 552 553
	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
		removing = true;
	else
		rbd_dev->open_count++;
554
	spin_unlock_irq(&rbd_dev->lock);
555 556 557
	if (removing)
		return -ENOENT;

A
Alex Elder 已提交
558
	(void) get_device(&rbd_dev->dev);
559

560 561 562
	return 0;
}

563
static void rbd_release(struct gendisk *disk, fmode_t mode)
564 565
{
	struct rbd_device *rbd_dev = disk->private_data;
566 567
	unsigned long open_count_before;

568
	spin_lock_irq(&rbd_dev->lock);
569
	open_count_before = rbd_dev->open_count--;
570
	spin_unlock_irq(&rbd_dev->lock);
571
	rbd_assert(open_count_before > 0);
572

A
Alex Elder 已提交
573
	put_device(&rbd_dev->dev);
574 575
}

G
Guangliang Zhao 已提交
576 577
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
578
	int ret = 0;
G
Guangliang Zhao 已提交
579 580
	int val;
	bool ro;
581
	bool ro_changed = false;
G
Guangliang Zhao 已提交
582

583
	/* get_user() may sleep, so call it before taking rbd_dev->lock */
G
Guangliang Zhao 已提交
584 585 586 587 588 589 590 591
	if (get_user(val, (int __user *)(arg)))
		return -EFAULT;

	ro = val ? true : false;
	/* Snapshot doesn't allow to write*/
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
		return -EROFS;

592 593 594 595 596 597 598
	spin_lock_irq(&rbd_dev->lock);
	/* prevent others open this device */
	if (rbd_dev->open_count > 1) {
		ret = -EBUSY;
		goto out;
	}

G
Guangliang Zhao 已提交
599 600
	if (rbd_dev->mapping.read_only != ro) {
		rbd_dev->mapping.read_only = ro;
601
		ro_changed = true;
G
Guangliang Zhao 已提交
602 603
	}

604 605 606 607 608 609 610
out:
	spin_unlock_irq(&rbd_dev->lock);
	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
	if (ret == 0 && ro_changed)
		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);

	return ret;
G
Guangliang Zhao 已提交
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
}

static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
	int ret = 0;

	switch (cmd) {
	case BLKROSET:
		ret = rbd_ioctl_set_ro(rbd_dev, arg);
		break;
	default:
		ret = -ENOTTY;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
				unsigned int cmd, unsigned long arg)
{
	return rbd_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */

638 639 640
static const struct block_device_operations rbd_bd_ops = {
	.owner			= THIS_MODULE,
	.open			= rbd_open,
641
	.release		= rbd_release,
G
Guangliang Zhao 已提交
642 643 644 645
	.ioctl			= rbd_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= rbd_compat_ioctl,
#endif
646 647 648
};

/*
649
 * Initialize an rbd client instance.  Success or not, this function
650
 * consumes ceph_opts.  Caller holds client_mutex.
651
 */
652
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
653 654 655 656
{
	struct rbd_client *rbdc;
	int ret = -ENOMEM;

A
Alex Elder 已提交
657
	dout("%s:\n", __func__);
658 659 660 661 662 663 664
	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
	if (!rbdc)
		goto out_opt;

	kref_init(&rbdc->kref);
	INIT_LIST_HEAD(&rbdc->node);

A
Alex Elder 已提交
665
	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
666
	if (IS_ERR(rbdc->client))
667
		goto out_rbdc;
A
Alex Elder 已提交
668
	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
669 670 671

	ret = ceph_open_session(rbdc->client);
	if (ret < 0)
672
		goto out_client;
673

A
Alex Elder 已提交
674
	spin_lock(&rbd_client_list_lock);
675
	list_add_tail(&rbdc->node, &rbd_client_list);
A
Alex Elder 已提交
676
	spin_unlock(&rbd_client_list_lock);
677

A
Alex Elder 已提交
678
	dout("%s: rbdc %p\n", __func__, rbdc);
679

680
	return rbdc;
681
out_client:
682
	ceph_destroy_client(rbdc->client);
683
out_rbdc:
684 685
	kfree(rbdc);
out_opt:
A
Alex Elder 已提交
686 687
	if (ceph_opts)
		ceph_destroy_options(ceph_opts);
A
Alex Elder 已提交
688 689
	dout("%s: error %d\n", __func__, ret);

V
Vasiliy Kulikov 已提交
690
	return ERR_PTR(ret);
691 692
}

693 694 695 696 697 698 699
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
	kref_get(&rbdc->kref);

	return rbdc;
}

700
/*
701 702
 * Find a ceph client with specific addr and configuration.  If
 * found, bump its reference count.
703
 */
704
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
705 706
{
	struct rbd_client *client_node;
707
	bool found = false;
708

A
Alex Elder 已提交
709
	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
710 711
		return NULL;

712 713 714
	spin_lock(&rbd_client_list_lock);
	list_for_each_entry(client_node, &rbd_client_list, node) {
		if (!ceph_compare_options(ceph_opts, client_node->client)) {
715 716
			__rbd_get_client(client_node);

717 718 719 720 721 722 723
			found = true;
			break;
		}
	}
	spin_unlock(&rbd_client_list_lock);

	return found ? client_node : NULL;
724 725
}

726
/*
727
 * (Per device) rbd map options
728 729
 */
enum {
I
Ilya Dryomov 已提交
730
	Opt_queue_depth,
731 732 733 734
	Opt_last_int,
	/* int args above */
	Opt_last_string,
	/* string args above */
A
Alex Elder 已提交
735 736
	Opt_read_only,
	Opt_read_write,
737
	Opt_err
738 739
};

A
Alex Elder 已提交
740
static match_table_t rbd_opts_tokens = {
I
Ilya Dryomov 已提交
741
	{Opt_queue_depth, "queue_depth=%d"},
742 743
	/* int args above */
	/* string args above */
A
Alex Elder 已提交
744
	{Opt_read_only, "read_only"},
A
Alex Elder 已提交
745 746 747
	{Opt_read_only, "ro"},		/* Alternate spelling */
	{Opt_read_write, "read_write"},
	{Opt_read_write, "rw"},		/* Alternate spelling */
748
	{Opt_err, NULL}
749 750
};

A
Alex Elder 已提交
751
struct rbd_options {
I
Ilya Dryomov 已提交
752
	int	queue_depth;
A
Alex Elder 已提交
753 754 755
	bool	read_only;
};

I
Ilya Dryomov 已提交
756
#define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
A
Alex Elder 已提交
757 758
#define RBD_READ_ONLY_DEFAULT	false

759 760
static int parse_rbd_opts_token(char *c, void *private)
{
A
Alex Elder 已提交
761
	struct rbd_options *rbd_opts = private;
762 763 764
	substring_t argstr[MAX_OPT_ARGS];
	int token, intval, ret;

A
Alex Elder 已提交
765
	token = match_token(c, rbd_opts_tokens, argstr);
766 767 768
	if (token < Opt_last_int) {
		ret = match_int(&argstr[0], &intval);
		if (ret < 0) {
769
			pr_err("bad mount option arg (not int) at '%s'\n", c);
770 771 772 773
			return ret;
		}
		dout("got int token %d val %d\n", token, intval);
	} else if (token > Opt_last_int && token < Opt_last_string) {
774
		dout("got string token %d val %s\n", token, argstr[0].from);
775 776 777 778 779
	} else {
		dout("got token %d\n", token);
	}

	switch (token) {
I
Ilya Dryomov 已提交
780 781 782 783 784 785 786
	case Opt_queue_depth:
		if (intval < 1) {
			pr_err("queue_depth out of range\n");
			return -EINVAL;
		}
		rbd_opts->queue_depth = intval;
		break;
A
Alex Elder 已提交
787 788 789 790 791 792
	case Opt_read_only:
		rbd_opts->read_only = true;
		break;
	case Opt_read_write:
		rbd_opts->read_only = false;
		break;
793
	default:
794 795
		/* libceph prints "bad option" msg */
		return -EINVAL;
796
	}
797

798 799 800
	return 0;
}

G
Guangliang Zhao 已提交
801 802 803 804 805 806 807
static char* obj_op_name(enum obj_operation_type op_type)
{
	switch (op_type) {
	case OBJ_OP_READ:
		return "read";
	case OBJ_OP_WRITE:
		return "write";
808 809
	case OBJ_OP_DISCARD:
		return "discard";
G
Guangliang Zhao 已提交
810 811 812 813 814
	default:
		return "???";
	}
}

815 816
/*
 * Get a ceph client with specific addr and configuration, if one does
817 818
 * not exist create it.  Either way, ceph_opts is consumed by this
 * function.
819
 */
820
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
821
{
822
	struct rbd_client *rbdc;
823

824
	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
825
	rbdc = rbd_client_find(ceph_opts);
826
	if (rbdc)	/* using an existing client */
A
Alex Elder 已提交
827
		ceph_destroy_options(ceph_opts);
828
	else
829
		rbdc = rbd_client_create(ceph_opts);
830
	mutex_unlock(&client_mutex);
831

832
	return rbdc;
833 834 835 836
}

/*
 * Destroy ceph client
A
Alex Elder 已提交
837
 *
A
Alex Elder 已提交
838
 * Caller must hold rbd_client_list_lock.
839 840 841 842 843
 */
static void rbd_client_release(struct kref *kref)
{
	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);

A
Alex Elder 已提交
844
	dout("%s: rbdc %p\n", __func__, rbdc);
845
	spin_lock(&rbd_client_list_lock);
846
	list_del(&rbdc->node);
847
	spin_unlock(&rbd_client_list_lock);
848 849 850 851 852 853 854 855 856

	ceph_destroy_client(rbdc->client);
	kfree(rbdc);
}

/*
 * Drop reference to ceph client node. If it's not referenced anymore, release
 * it.
 */
857
static void rbd_put_client(struct rbd_client *rbdc)
858
{
859 860
	if (rbdc)
		kref_put(&rbdc->kref, rbd_client_release);
861 862
}

863 864 865 866 867
static bool rbd_image_format_valid(u32 image_format)
{
	return image_format == 1 || image_format == 2;
}

868 869
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
870 871 872 873 874 875 876
	size_t size;
	u32 snap_count;

	/* The header has to start with the magic rbd header text */
	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
		return false;

A
Alex Elder 已提交
877 878 879 880 881 882 883 884 885 886
	/* The bio layer requires at least sector-sized I/O */

	if (ondisk->options.order < SECTOR_SHIFT)
		return false;

	/* If we use u64 in a few spots we may be able to loosen this */

	if (ondisk->options.order > 8 * sizeof (int) - 1)
		return false;

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	/*
	 * The size of a snapshot header has to fit in a size_t, and
	 * that limits the number of snapshots.
	 */
	snap_count = le32_to_cpu(ondisk->snap_count);
	size = SIZE_MAX - sizeof (struct ceph_snap_context);
	if (snap_count > size / sizeof (__le64))
		return false;

	/*
	 * Not only that, but the size of the entire the snapshot
	 * header must also be representable in a size_t.
	 */
	size -= snap_count * sizeof (__le64);
	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
		return false;

	return true;
905 906
}

907
/*
908 909
 * Fill an rbd image header with information from the given format 1
 * on-disk header.
910
 */
A
Alex Elder 已提交
911
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
912
				 struct rbd_image_header_ondisk *ondisk)
913
{
A
Alex Elder 已提交
914
	struct rbd_image_header *header = &rbd_dev->header;
915 916 917 918 919
	bool first_time = header->object_prefix == NULL;
	struct ceph_snap_context *snapc;
	char *object_prefix = NULL;
	char *snap_names = NULL;
	u64 *snap_sizes = NULL;
920
	u32 snap_count;
921
	size_t size;
922
	int ret = -ENOMEM;
923
	u32 i;
924

925
	/* Allocate this now to avoid having to handle failure below */
A
Alex Elder 已提交
926

927 928
	if (first_time) {
		size_t len;
929

930 931 932 933 934 935 936 937
		len = strnlen(ondisk->object_prefix,
				sizeof (ondisk->object_prefix));
		object_prefix = kmalloc(len + 1, GFP_KERNEL);
		if (!object_prefix)
			return -ENOMEM;
		memcpy(object_prefix, ondisk->object_prefix, len);
		object_prefix[len] = '\0';
	}
A
Alex Elder 已提交
938

939
	/* Allocate the snapshot context and fill it in */
A
Alex Elder 已提交
940

941 942 943 944 945
	snap_count = le32_to_cpu(ondisk->snap_count);
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
	if (!snapc)
		goto out_err;
	snapc->seq = le64_to_cpu(ondisk->snap_seq);
946
	if (snap_count) {
947
		struct rbd_image_snap_ondisk *snaps;
A
Alex Elder 已提交
948 949
		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);

950
		/* We'll keep a copy of the snapshot names... */
951

952 953 954 955
		if (snap_names_len > (u64)SIZE_MAX)
			goto out_2big;
		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
		if (!snap_names)
A
Alex Elder 已提交
956 957
			goto out_err;

958
		/* ...as well as the array of their sizes. */
959

960
		size = snap_count * sizeof (*header->snap_sizes);
961 962
		snap_sizes = kmalloc(size, GFP_KERNEL);
		if (!snap_sizes)
A
Alex Elder 已提交
963
			goto out_err;
964

A
Alex Elder 已提交
965
		/*
966 967 968
		 * Copy the names, and fill in each snapshot's id
		 * and size.
		 *
969
		 * Note that rbd_dev_v1_header_info() guarantees the
970
		 * ondisk buffer we're working with has
A
Alex Elder 已提交
971 972 973
		 * snap_names_len bytes beyond the end of the
		 * snapshot id array, this memcpy() is safe.
		 */
974 975 976 977 978 979
		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
		snaps = ondisk->snaps;
		for (i = 0; i < snap_count; i++) {
			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
		}
980
	}
A
Alex Elder 已提交
981

982
	/* We won't fail any more, fill in the header */
983

984 985 986 987 988 989 990 991 992
	if (first_time) {
		header->object_prefix = object_prefix;
		header->obj_order = ondisk->options.order;
		header->crypt_type = ondisk->options.crypt_type;
		header->comp_type = ondisk->options.comp_type;
		/* The rest aren't used for format 1 images */
		header->stripe_unit = 0;
		header->stripe_count = 0;
		header->features = 0;
993
	} else {
A
Alex Elder 已提交
994 995 996
		ceph_put_snap_context(header->snapc);
		kfree(header->snap_names);
		kfree(header->snap_sizes);
997
	}
998

999
	/* The remaining fields always get updated (when we refresh) */
1000

A
Alex Elder 已提交
1001
	header->image_size = le64_to_cpu(ondisk->image_size);
1002 1003 1004
	header->snapc = snapc;
	header->snap_names = snap_names;
	header->snap_sizes = snap_sizes;
1005

1006
	return 0;
1007 1008
out_2big:
	ret = -EIO;
A
Alex Elder 已提交
1009
out_err:
1010 1011 1012 1013
	kfree(snap_sizes);
	kfree(snap_names);
	ceph_put_snap_context(snapc);
	kfree(object_prefix);
1014

1015
	return ret;
1016 1017
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
	const char *snap_name;

	rbd_assert(which < rbd_dev->header.snapc->num_snaps);

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which--)
		snap_name += strlen(snap_name) + 1;

	return kstrdup(snap_name, GFP_KERNEL);
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
/*
 * Snapshot id comparison function for use with qsort()/bsearch().
 * Note that result is for snapshots in *descending* order.
 */
static int snapid_compare_reverse(const void *s1, const void *s2)
{
	u64 snap_id1 = *(u64 *)s1;
	u64 snap_id2 = *(u64 *)s2;

	if (snap_id1 < snap_id2)
		return 1;
	return snap_id1 == snap_id2 ? 0 : -1;
}

/*
 * Search a snapshot context to see if the given snapshot id is
 * present.
 *
 * Returns the position of the snapshot id in the array if it's found,
 * or BAD_SNAP_INDEX otherwise.
 *
 * Note: The snapshot array is in kept sorted (by the osd) in
 * reverse order, highest snapshot id first.
 */
1057 1058 1059
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1060
	u64 *found;
1061

1062 1063
	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
				sizeof (snap_id), snapid_compare_reverse);
1064

1065
	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1066 1067
}

1068 1069
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
1070
{
1071
	u32 which;
1072
	const char *snap_name;
1073

1074 1075
	which = rbd_dev_snap_index(rbd_dev, snap_id);
	if (which == BAD_SNAP_INDEX)
1076
		return ERR_PTR(-ENOENT);
1077

1078 1079
	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1080 1081 1082 1083
}

static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
1084 1085 1086
	if (snap_id == CEPH_NOSNAP)
		return RBD_SNAP_HEAD_NAME;

1087 1088 1089
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1090

1091
	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1092 1093
}

1094 1095
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u64 *snap_size)
1096
{
1097 1098 1099 1100 1101
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_size = rbd_dev->header.image_size;
	} else if (rbd_dev->image_format == 1) {
		u32 which;
1102

1103 1104 1105
		which = rbd_dev_snap_index(rbd_dev, snap_id);
		if (which == BAD_SNAP_INDEX)
			return -ENOENT;
1106

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
		*snap_size = rbd_dev->header.snap_sizes[which];
	} else {
		u64 size = 0;
		int ret;

		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
		if (ret)
			return ret;

		*snap_size = size;
	}
	return 0;
1119 1120
}

1121 1122
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
			u64 *snap_features)
1123
{
1124 1125 1126 1127 1128
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_features = rbd_dev->header.features;
	} else if (rbd_dev->image_format == 1) {
		*snap_features = 0;	/* No features for format 1 */
1129
	} else {
1130 1131
		u64 features = 0;
		int ret;
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
		if (ret)
			return ret;

		*snap_features = features;
	}
	return 0;
}

static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
1144
	u64 snap_id = rbd_dev->spec->snap_id;
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	u64 size = 0;
	u64 features = 0;
	int ret;

	ret = rbd_snap_size(rbd_dev, snap_id, &size);
	if (ret)
		return ret;
	ret = rbd_snap_features(rbd_dev, snap_id, &features);
	if (ret)
		return ret;

	rbd_dev->mapping.size = size;
	rbd_dev->mapping.features = features;

1159
	return 0;
1160 1161
}

A
Alex Elder 已提交
1162 1163 1164 1165
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
	rbd_dev->mapping.size = 0;
	rbd_dev->mapping.features = 0;
1166 1167
}

1168 1169 1170 1171 1172 1173 1174
static void rbd_segment_name_free(const char *name)
{
	/* The explicit cast here is needed to drop the const qualifier */

	kmem_cache_free(rbd_segment_name_cache, (void *)name);
}

A
Alex Elder 已提交
1175
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1176
{
A
Alex Elder 已提交
1177 1178 1179
	char *name;
	u64 segment;
	int ret;
1180
	char *name_format;
1181

1182
	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
A
Alex Elder 已提交
1183 1184 1185
	if (!name)
		return NULL;
	segment = offset >> rbd_dev->header.obj_order;
1186 1187 1188
	name_format = "%s.%012llx";
	if (rbd_dev->image_format == 2)
		name_format = "%s.%016llx";
1189
	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
A
Alex Elder 已提交
1190
			rbd_dev->header.object_prefix, segment);
1191
	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
A
Alex Elder 已提交
1192 1193
		pr_err("error formatting segment name for #%llu (%d)\n",
			segment, ret);
1194
		rbd_segment_name_free(name);
A
Alex Elder 已提交
1195 1196
		name = NULL;
	}
1197

A
Alex Elder 已提交
1198 1199
	return name;
}
1200

A
Alex Elder 已提交
1201 1202 1203
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1204

A
Alex Elder 已提交
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	return offset & (segment_size - 1);
}

static u64 rbd_segment_length(struct rbd_device *rbd_dev,
				u64 offset, u64 length)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;

	offset &= segment_size - 1;

A
Alex Elder 已提交
1215
	rbd_assert(length <= U64_MAX - offset);
A
Alex Elder 已提交
1216 1217 1218 1219
	if (offset + length > segment_size)
		length = segment_size - offset;

	return length;
1220 1221
}

1222 1223 1224 1225 1226 1227 1228 1229
/*
 * returns the size of an object in the image
 */
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
	return 1 << header->obj_order;
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
/*
 * bio helpers
 */

static void bio_chain_put(struct bio *chain)
{
	struct bio *tmp;

	while (chain) {
		tmp = chain;
		chain = chain->bi_next;
		bio_put(tmp);
	}
}

/*
 * zeros a bio chain, starting at specific offset
 */
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
1250 1251
	struct bio_vec bv;
	struct bvec_iter iter;
1252 1253 1254 1255 1256
	unsigned long flags;
	void *buf;
	int pos = 0;

	while (chain) {
1257 1258
		bio_for_each_segment(bv, chain, iter) {
			if (pos + bv.bv_len > start_ofs) {
1259
				int remainder = max(start_ofs - pos, 0);
1260
				buf = bvec_kmap_irq(&bv, &flags);
1261
				memset(buf + remainder, 0,
1262 1263
				       bv.bv_len - remainder);
				flush_dcache_page(bv.bv_page);
1264
				bvec_kunmap_irq(buf, &flags);
1265
			}
1266
			pos += bv.bv_len;
1267 1268 1269 1270 1271 1272
		}

		chain = chain->bi_next;
	}
}

A
Alex Elder 已提交
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
/*
 * similar to zero_bio_chain(), zeros data defined by a page array,
 * starting at the given byte offset from the start of the array and
 * continuing up to the given end offset.  The pages array is
 * assumed to be big enough to hold all bytes up to the end.
 */
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
	struct page **page = &pages[offset >> PAGE_SHIFT];

	rbd_assert(end > offset);
	rbd_assert(end - offset <= (u64)SIZE_MAX);
	while (offset < end) {
		size_t page_offset;
		size_t length;
		unsigned long flags;
		void *kaddr;

1291 1292
		page_offset = offset & ~PAGE_MASK;
		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
A
Alex Elder 已提交
1293 1294 1295
		local_irq_save(flags);
		kaddr = kmap_atomic(*page);
		memset(kaddr + page_offset, 0, length);
1296
		flush_dcache_page(*page);
A
Alex Elder 已提交
1297 1298 1299 1300 1301 1302 1303 1304
		kunmap_atomic(kaddr);
		local_irq_restore(flags);

		offset += length;
		page++;
	}
}

1305
/*
A
Alex Elder 已提交
1306 1307
 * Clone a portion of a bio, starting at the given byte offset
 * and continuing for the number of bytes indicated.
1308
 */
A
Alex Elder 已提交
1309 1310 1311 1312
static struct bio *bio_clone_range(struct bio *bio_src,
					unsigned int offset,
					unsigned int len,
					gfp_t gfpmask)
1313
{
A
Alex Elder 已提交
1314 1315
	struct bio *bio;

K
Kent Overstreet 已提交
1316
	bio = bio_clone(bio_src, gfpmask);
A
Alex Elder 已提交
1317 1318
	if (!bio)
		return NULL;	/* ENOMEM */
1319

K
Kent Overstreet 已提交
1320
	bio_advance(bio, offset);
1321
	bio->bi_iter.bi_size = len;
A
Alex Elder 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351

	return bio;
}

/*
 * Clone a portion of a bio chain, starting at the given byte offset
 * into the first bio in the source chain and continuing for the
 * number of bytes indicated.  The result is another bio chain of
 * exactly the given length, or a null pointer on error.
 *
 * The bio_src and offset parameters are both in-out.  On entry they
 * refer to the first source bio and the offset into that bio where
 * the start of data to be cloned is located.
 *
 * On return, bio_src is updated to refer to the bio in the source
 * chain that contains first un-cloned byte, and *offset will
 * contain the offset of that byte within that bio.
 */
static struct bio *bio_chain_clone_range(struct bio **bio_src,
					unsigned int *offset,
					unsigned int len,
					gfp_t gfpmask)
{
	struct bio *bi = *bio_src;
	unsigned int off = *offset;
	struct bio *chain = NULL;
	struct bio **end;

	/* Build up a chain of clone bios up to the limit */

1352
	if (!bi || off >= bi->bi_iter.bi_size || !len)
A
Alex Elder 已提交
1353
		return NULL;		/* Nothing to clone */
1354

A
Alex Elder 已提交
1355 1356 1357 1358 1359
	end = &chain;
	while (len) {
		unsigned int bi_size;
		struct bio *bio;

1360 1361
		if (!bi) {
			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
A
Alex Elder 已提交
1362
			goto out_err;	/* EINVAL; ran out of bio's */
1363
		}
1364
		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
A
Alex Elder 已提交
1365 1366 1367 1368 1369 1370
		bio = bio_clone_range(bi, off, bi_size, gfpmask);
		if (!bio)
			goto out_err;	/* ENOMEM */

		*end = bio;
		end = &bio->bi_next;
1371

A
Alex Elder 已提交
1372
		off += bi_size;
1373
		if (off == bi->bi_iter.bi_size) {
A
Alex Elder 已提交
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
			bi = bi->bi_next;
			off = 0;
		}
		len -= bi_size;
	}
	*bio_src = bi;
	*offset = off;

	return chain;
out_err:
	bio_chain_put(chain);
1385 1386 1387 1388

	return NULL;
}

1389 1390 1391 1392 1393
/*
 * The default/initial value for all object request flags is 0.  For
 * each flag, once its value is set to 1 it is never reset to 0
 * again.
 */
A
Alex Elder 已提交
1394
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1395
{
A
Alex Elder 已提交
1396
	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1397 1398
		struct rbd_device *rbd_dev;

A
Alex Elder 已提交
1399
		rbd_dev = obj_request->img_request->rbd_dev;
1400
		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1401 1402 1403 1404
			obj_request);
	}
}

A
Alex Elder 已提交
1405
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1406 1407
{
	smp_mb();
A
Alex Elder 已提交
1408
	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1409 1410
}

A
Alex Elder 已提交
1411
static void obj_request_done_set(struct rbd_obj_request *obj_request)
1412
{
A
Alex Elder 已提交
1413 1414
	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
		struct rbd_device *rbd_dev = NULL;
1415

A
Alex Elder 已提交
1416 1417
		if (obj_request_img_data_test(obj_request))
			rbd_dev = obj_request->img_request->rbd_dev;
1418
		rbd_warn(rbd_dev, "obj_request %p already marked done",
1419 1420 1421 1422
			obj_request);
	}
}

A
Alex Elder 已提交
1423
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1424 1425
{
	smp_mb();
A
Alex Elder 已提交
1426
	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1427 1428
}

1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
/*
 * This sets the KNOWN flag after (possibly) setting the EXISTS
 * flag.  The latter is set based on the "exists" value provided.
 *
 * Note that for our purposes once an object exists it never goes
 * away again.  It's possible that the response from two existence
 * checks are separated by the creation of the target object, and
 * the first ("doesn't exist") response arrives *after* the second
 * ("does exist").  In that case we ignore the second one.
 */
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
				bool exists)
{
	if (exists)
		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
	smp_mb();
}

static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}

static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}

1460 1461 1462 1463 1464 1465 1466 1467
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;

	return obj_request->img_offset <
	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}

A
Alex Elder 已提交
1468 1469
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1470 1471
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1472 1473 1474 1475 1476 1477 1478
	kref_get(&obj_request->kref);
}

static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request != NULL);
A
Alex Elder 已提交
1479 1480
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1481 1482 1483
	kref_put(&obj_request->kref, rbd_obj_request_destroy);
}

1484 1485 1486 1487 1488 1489 1490
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
	dout("%s: img %p (was %d)\n", __func__, img_request,
	     atomic_read(&img_request->kref.refcount));
	kref_get(&img_request->kref);
}

1491 1492
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
A
Alex Elder 已提交
1493 1494 1495 1496
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
	rbd_assert(img_request != NULL);
A
Alex Elder 已提交
1497 1498
	dout("%s: img %p (was %d)\n", __func__, img_request,
		atomic_read(&img_request->kref.refcount));
1499 1500 1501 1502
	if (img_request_child_test(img_request))
		kref_put(&img_request->kref, rbd_parent_request_destroy);
	else
		kref_put(&img_request->kref, rbd_img_request_destroy);
A
Alex Elder 已提交
1503 1504 1505 1506 1507
}

static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
1508 1509
	rbd_assert(obj_request->img_request == NULL);

1510
	/* Image request now owns object's original reference */
A
Alex Elder 已提交
1511
	obj_request->img_request = img_request;
1512
	obj_request->which = img_request->obj_request_count;
1513 1514
	rbd_assert(!obj_request_img_data_test(obj_request));
	obj_request_img_data_set(obj_request);
A
Alex Elder 已提交
1515
	rbd_assert(obj_request->which != BAD_WHICH);
1516 1517
	img_request->obj_request_count++;
	list_add_tail(&obj_request->links, &img_request->obj_requests);
A
Alex Elder 已提交
1518 1519
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1520 1521 1522 1523 1524 1525
}

static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request->which != BAD_WHICH);
1526

A
Alex Elder 已提交
1527 1528
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1529
	list_del(&obj_request->links);
1530 1531 1532 1533
	rbd_assert(img_request->obj_request_count > 0);
	img_request->obj_request_count--;
	rbd_assert(obj_request->which == img_request->obj_request_count);
	obj_request->which = BAD_WHICH;
1534
	rbd_assert(obj_request_img_data_test(obj_request));
A
Alex Elder 已提交
1535 1536
	rbd_assert(obj_request->img_request == img_request);
	obj_request->img_request = NULL;
1537
	obj_request->callback = NULL;
A
Alex Elder 已提交
1538 1539 1540 1541 1542 1543
	rbd_obj_request_put(obj_request);
}

static bool obj_request_type_valid(enum obj_request_type type)
{
	switch (type) {
1544
	case OBJ_REQUEST_NODATA:
A
Alex Elder 已提交
1545
	case OBJ_REQUEST_BIO:
1546
	case OBJ_REQUEST_PAGES:
A
Alex Elder 已提交
1547 1548 1549 1550 1551 1552 1553 1554 1555
		return true;
	default:
		return false;
	}
}

static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
				struct rbd_obj_request *obj_request)
{
1556
	dout("%s %p\n", __func__, obj_request);
A
Alex Elder 已提交
1557 1558 1559
	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}

1560 1561 1562 1563 1564 1565 1566 1567 1568
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
{
	dout("%s %p\n", __func__, obj_request);
	ceph_osdc_cancel_request(obj_request->osd_req);
}

/*
 * Wait for an object request to complete.  If interrupted, cancel the
 * underlying osd request.
1569 1570
 *
 * @timeout: in jiffies, 0 means "wait forever"
1571
 */
1572 1573
static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
				  unsigned long timeout)
1574
{
1575
	long ret;
1576 1577

	dout("%s %p\n", __func__, obj_request);
1578 1579 1580 1581 1582 1583
	ret = wait_for_completion_interruptible_timeout(
					&obj_request->completion,
					ceph_timeout_jiffies(timeout));
	if (ret <= 0) {
		if (ret == 0)
			ret = -ETIMEDOUT;
1584
		rbd_obj_request_end(obj_request);
1585 1586
	} else {
		ret = 0;
1587 1588
	}

1589 1590 1591 1592 1593 1594 1595 1596 1597
	dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
	return ret;
}

static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
	return __rbd_obj_request_wait(obj_request, 0);
}

A
Alex Elder 已提交
1598 1599
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
1600

A
Alex Elder 已提交
1601
	dout("%s: img %p\n", __func__, img_request);
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617

	/*
	 * If no error occurred, compute the aggregate transfer
	 * count for the image request.  We could instead use
	 * atomic64_cmpxchg() to update it as each object request
	 * completes; not clear which way is better off hand.
	 */
	if (!img_request->result) {
		struct rbd_obj_request *obj_request;
		u64 xferred = 0;

		for_each_obj_request(img_request, obj_request)
			xferred += obj_request->xferred;
		img_request->xferred = xferred;
	}

A
Alex Elder 已提交
1618 1619 1620 1621 1622 1623
	if (img_request->callback)
		img_request->callback(img_request);
	else
		rbd_img_request_put(img_request);
}

A
Alex Elder 已提交
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
/*
 * The default/initial value for all image request flags is 0.  Each
 * is conditionally set to 1 at image request initialization time
 * and currently never change thereafter.
 */
static void img_request_write_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_WRITE, &img_request->flags);
	smp_mb();
}

static bool img_request_write_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
/*
 * Set the discard flag when the img_request is an discard request
 */
static void img_request_discard_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_DISCARD, &img_request->flags);
	smp_mb();
}

static bool img_request_discard_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
}

1656 1657 1658 1659 1660 1661
static void img_request_child_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1662 1663 1664 1665 1666 1667
static void img_request_child_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1668 1669 1670 1671 1672 1673
static bool img_request_child_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}

1674 1675 1676 1677 1678 1679
static void img_request_layered_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1680 1681 1682 1683 1684 1685
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1686 1687 1688 1689 1690 1691
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}

1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
static enum obj_operation_type
rbd_img_request_op_type(struct rbd_img_request *img_request)
{
	if (img_request_write_test(img_request))
		return OBJ_OP_WRITE;
	else if (img_request_discard_test(img_request))
		return OBJ_OP_DISCARD;
	else
		return OBJ_OP_READ;
}

1703 1704 1705
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1706 1707 1708
	u64 xferred = obj_request->xferred;
	u64 length = obj_request->length;

1709 1710
	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, obj_request->img_request, obj_request->result,
A
Alex Elder 已提交
1711
		xferred, length);
1712
	/*
1713 1714 1715 1716 1717 1718
	 * ENOENT means a hole in the image.  We zero-fill the entire
	 * length of the request.  A short read also implies zero-fill
	 * to the end of the request.  An error requires the whole
	 * length of the request to be reported finished with an error
	 * to the block layer.  In each case we update the xferred
	 * count to indicate the whole request was satisfied.
1719
	 */
A
Alex Elder 已提交
1720
	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1721
	if (obj_request->result == -ENOENT) {
A
Alex Elder 已提交
1722 1723 1724 1725
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, 0);
		else
			zero_pages(obj_request->pages, 0, length);
1726
		obj_request->result = 0;
A
Alex Elder 已提交
1727 1728 1729 1730 1731
	} else if (xferred < length && !obj_request->result) {
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, xferred);
		else
			zero_pages(obj_request->pages, xferred, length);
1732
	}
1733
	obj_request->xferred = length;
1734 1735 1736
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1737 1738
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1739 1740
	dout("%s: obj %p cb %p\n", __func__, obj_request,
		obj_request->callback);
A
Alex Elder 已提交
1741 1742
	if (obj_request->callback)
		obj_request->callback(obj_request);
1743 1744
	else
		complete_all(&obj_request->completion);
A
Alex Elder 已提交
1745 1746
}

1747
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1748
{
A
Alex Elder 已提交
1749
	struct rbd_img_request *img_request = NULL;
A
Alex Elder 已提交
1750
	struct rbd_device *rbd_dev = NULL;
A
Alex Elder 已提交
1751 1752 1753 1754 1755
	bool layered = false;

	if (obj_request_img_data_test(obj_request)) {
		img_request = obj_request->img_request;
		layered = img_request && img_request_layered_test(img_request);
A
Alex Elder 已提交
1756
		rbd_dev = img_request->rbd_dev;
A
Alex Elder 已提交
1757
	}
A
Alex Elder 已提交
1758 1759 1760 1761

	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, img_request, obj_request->result,
		obj_request->xferred, obj_request->length);
A
Alex Elder 已提交
1762 1763
	if (layered && obj_request->result == -ENOENT &&
			obj_request->img_offset < rbd_dev->parent_overlap)
A
Alex Elder 已提交
1764 1765
		rbd_img_parent_read(obj_request);
	else if (img_request)
1766 1767 1768
		rbd_img_obj_request_read_callback(obj_request);
	else
		obj_request_done_set(obj_request);
A
Alex Elder 已提交
1769 1770
}

1771
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1772
{
1773 1774 1775
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
A
Alex Elder 已提交
1776 1777
	 * There is no such thing as a successful short write.  Set
	 * it to our originally-requested length.
1778 1779
	 */
	obj_request->xferred = obj_request->length;
1780
	obj_request_done_set(obj_request);
A
Alex Elder 已提交
1781 1782
}

1783 1784 1785 1786 1787 1788 1789 1790 1791
static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
	 * There is no such thing as a successful short discard.  Set
	 * it to our originally-requested length.
	 */
	obj_request->xferred = obj_request->length;
1792 1793 1794
	/* discarding a non-existent object is not a problem */
	if (obj_request->result == -ENOENT)
		obj_request->result = 0;
1795 1796 1797
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1798 1799 1800 1801
/*
 * For a simple stat call there's nothing to do.  We'll do more if
 * this is part of a write sequence for a layered image.
 */
1802
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1803
{
A
Alex Elder 已提交
1804
	dout("%s: obj %p\n", __func__, obj_request);
A
Alex Elder 已提交
1805 1806 1807
	obj_request_done_set(obj_request);
}

I
Ilya Dryomov 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p\n", __func__, obj_request);

	if (obj_request_img_data_test(obj_request))
		rbd_osd_copyup_callback(obj_request);
	else
		obj_request_done_set(obj_request);
}

1818
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
A
Alex Elder 已提交
1819 1820 1821 1822
{
	struct rbd_obj_request *obj_request = osd_req->r_priv;
	u16 opcode;

1823
	dout("%s: osd_req %p\n", __func__, osd_req);
A
Alex Elder 已提交
1824
	rbd_assert(osd_req == obj_request->osd_req);
A
Alex Elder 已提交
1825 1826 1827 1828 1829 1830
	if (obj_request_img_data_test(obj_request)) {
		rbd_assert(obj_request->img_request);
		rbd_assert(obj_request->which != BAD_WHICH);
	} else {
		rbd_assert(obj_request->which == BAD_WHICH);
	}
A
Alex Elder 已提交
1831

1832 1833
	if (osd_req->r_result < 0)
		obj_request->result = osd_req->r_result;
A
Alex Elder 已提交
1834

1835 1836
	/*
	 * We support a 64-bit length, but ultimately it has to be
C
Christoph Hellwig 已提交
1837 1838
	 * passed to the block layer, which just supports a 32-bit
	 * length field.
1839
	 */
1840
	obj_request->xferred = osd_req->r_ops[0].outdata_len;
A
Alex Elder 已提交
1841
	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1842

1843
	opcode = osd_req->r_ops[0].op;
A
Alex Elder 已提交
1844 1845
	switch (opcode) {
	case CEPH_OSD_OP_READ:
1846
		rbd_osd_read_callback(obj_request);
A
Alex Elder 已提交
1847
		break;
1848
	case CEPH_OSD_OP_SETALLOCHINT:
1849 1850
		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1851
		/* fall through */
A
Alex Elder 已提交
1852
	case CEPH_OSD_OP_WRITE:
1853
	case CEPH_OSD_OP_WRITEFULL:
1854
		rbd_osd_write_callback(obj_request);
A
Alex Elder 已提交
1855
		break;
A
Alex Elder 已提交
1856
	case CEPH_OSD_OP_STAT:
1857
		rbd_osd_stat_callback(obj_request);
A
Alex Elder 已提交
1858
		break;
1859 1860 1861 1862 1863
	case CEPH_OSD_OP_DELETE:
	case CEPH_OSD_OP_TRUNCATE:
	case CEPH_OSD_OP_ZERO:
		rbd_osd_discard_callback(obj_request);
		break;
1864
	case CEPH_OSD_OP_CALL:
I
Ilya Dryomov 已提交
1865 1866
		rbd_osd_call_callback(obj_request);
		break;
A
Alex Elder 已提交
1867
	default:
1868
		rbd_warn(NULL, "%s: unsupported op %hu",
A
Alex Elder 已提交
1869 1870 1871 1872
			obj_request->object_name, (unsigned short) opcode);
		break;
	}

1873
	if (obj_request_done_test(obj_request))
A
Alex Elder 已提交
1874 1875 1876
		rbd_obj_request_complete(obj_request);
}

1877
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1878 1879
{
	struct rbd_img_request *img_request = obj_request->img_request;
1880
	struct ceph_osd_request *osd_req = obj_request->osd_req;
A
Alex Elder 已提交
1881

1882 1883
	if (img_request)
		osd_req->r_snapid = img_request->snap_id;
1884 1885 1886 1887 1888 1889
}

static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
	struct ceph_osd_request *osd_req = obj_request->osd_req;

1890 1891
	osd_req->r_mtime = CURRENT_TIME;
	osd_req->r_data_offset = obj_request->offset;
A
Alex Elder 已提交
1892 1893
}

1894 1895 1896 1897 1898 1899
/*
 * Create an osd request.  A read request has one osd op (read).
 * A write request has either one (watch) or two (hint+write) osd ops.
 * (All rbd data writes are prefixed with an allocation hint op, but
 * technically osd watch is a write request, hence this distinction.)
 */
A
Alex Elder 已提交
1900 1901
static struct ceph_osd_request *rbd_osd_req_create(
					struct rbd_device *rbd_dev,
G
Guangliang Zhao 已提交
1902
					enum obj_operation_type op_type,
1903
					unsigned int num_ops,
A
Alex Elder 已提交
1904
					struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1905 1906 1907 1908 1909
{
	struct ceph_snap_context *snapc = NULL;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

1910 1911
	if (obj_request_img_data_test(obj_request) &&
		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1912
		struct rbd_img_request *img_request = obj_request->img_request;
1913 1914 1915 1916 1917
		if (op_type == OBJ_OP_WRITE) {
			rbd_assert(img_request_write_test(img_request));
		} else {
			rbd_assert(img_request_discard_test(img_request));
		}
G
Guangliang Zhao 已提交
1918
		snapc = img_request->snapc;
A
Alex Elder 已提交
1919 1920
	}

G
Guangliang Zhao 已提交
1921
	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1922 1923

	/* Allocate and initialize the request, for the num_ops ops */
A
Alex Elder 已提交
1924 1925

	osdc = &rbd_dev->rbd_client->client->osdc;
1926
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1927
					  GFP_NOIO);
A
Alex Elder 已提交
1928
	if (!osd_req)
1929
		goto fail;
A
Alex Elder 已提交
1930

1931
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
A
Alex Elder 已提交
1932
		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
A
Alex Elder 已提交
1933
	else
A
Alex Elder 已提交
1934 1935 1936 1937 1938
		osd_req->r_flags = CEPH_OSD_FLAG_READ;

	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1939
	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1940 1941 1942
	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
			     obj_request->object_name))
		goto fail;
A
Alex Elder 已提交
1943

1944 1945 1946
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

A
Alex Elder 已提交
1947
	return osd_req;
1948 1949 1950 1951

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
A
Alex Elder 已提交
1952 1953
}

1954
/*
1955 1956 1957 1958
 * Create a copyup osd request based on the information in the object
 * request supplied.  A copyup request has two or three osd ops, a
 * copyup method call, potentially a hint op, and a write or truncate
 * or zero op.
1959 1960 1961 1962 1963 1964 1965 1966 1967
 */
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;
1968
	int num_osd_ops = 3;
1969 1970 1971 1972

	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);
1973 1974
	rbd_assert(img_request_write_test(img_request) ||
			img_request_discard_test(img_request));
1975

1976 1977 1978 1979
	if (img_request_discard_test(img_request))
		num_osd_ops = 2;

	/* Allocate and initialize the request, for all the ops */
1980 1981 1982 1983

	snapc = img_request->snapc;
	rbd_dev = img_request->rbd_dev;
	osdc = &rbd_dev->rbd_client->client->osdc;
1984
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
1985
						false, GFP_NOIO);
1986
	if (!osd_req)
1987
		goto fail;
1988 1989 1990 1991 1992

	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1993
	osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1994 1995 1996
	if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
			     obj_request->object_name))
		goto fail;
1997

1998 1999 2000
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

2001
	return osd_req;
2002 2003 2004 2005

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
2006 2007 2008
}


A
Alex Elder 已提交
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
	ceph_osdc_put_request(osd_req);
}

/* object_name is assumed to be a non-null pointer and NUL-terminated */

static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
						u64 offset, u64 length,
						enum obj_request_type type)
{
	struct rbd_obj_request *obj_request;
	size_t size;
	char *name;

	rbd_assert(obj_request_type_valid(type));

	size = strlen(object_name) + 1;
2027
	name = kmalloc(size, GFP_NOIO);
2028
	if (!name)
A
Alex Elder 已提交
2029 2030
		return NULL;

2031
	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2032 2033 2034 2035 2036
	if (!obj_request) {
		kfree(name);
		return NULL;
	}

A
Alex Elder 已提交
2037 2038 2039
	obj_request->object_name = memcpy(name, object_name, size);
	obj_request->offset = offset;
	obj_request->length = length;
2040
	obj_request->flags = 0;
A
Alex Elder 已提交
2041 2042 2043
	obj_request->which = BAD_WHICH;
	obj_request->type = type;
	INIT_LIST_HEAD(&obj_request->links);
2044
	init_completion(&obj_request->completion);
A
Alex Elder 已提交
2045 2046
	kref_init(&obj_request->kref);

A
Alex Elder 已提交
2047 2048 2049
	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
		offset, length, (int)type, obj_request);

A
Alex Elder 已提交
2050 2051 2052 2053 2054 2055 2056 2057 2058
	return obj_request;
}

static void rbd_obj_request_destroy(struct kref *kref)
{
	struct rbd_obj_request *obj_request;

	obj_request = container_of(kref, struct rbd_obj_request, kref);

A
Alex Elder 已提交
2059 2060
	dout("%s: obj %p\n", __func__, obj_request);

A
Alex Elder 已提交
2061 2062 2063 2064 2065 2066 2067 2068
	rbd_assert(obj_request->img_request == NULL);
	rbd_assert(obj_request->which == BAD_WHICH);

	if (obj_request->osd_req)
		rbd_osd_req_destroy(obj_request->osd_req);

	rbd_assert(obj_request_type_valid(obj_request->type));
	switch (obj_request->type) {
2069 2070
	case OBJ_REQUEST_NODATA:
		break;		/* Nothing to do */
A
Alex Elder 已提交
2071 2072 2073 2074
	case OBJ_REQUEST_BIO:
		if (obj_request->bio_list)
			bio_chain_put(obj_request->bio_list);
		break;
2075 2076 2077 2078 2079
	case OBJ_REQUEST_PAGES:
		if (obj_request->pages)
			ceph_release_page_vector(obj_request->pages,
						obj_request->page_count);
		break;
A
Alex Elder 已提交
2080 2081
	}

2082
	kfree(obj_request->object_name);
2083 2084
	obj_request->object_name = NULL;
	kmem_cache_free(rbd_obj_request_cache, obj_request);
A
Alex Elder 已提交
2085 2086
}

A
Alex Elder 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
/* It's OK to call this for a device with no parent */

static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
	rbd_dev_remove_parent(rbd_dev);
	rbd_spec_put(rbd_dev->parent_spec);
	rbd_dev->parent_spec = NULL;
	rbd_dev->parent_overlap = 0;
}

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
/*
 * Parent image reference counting is used to determine when an
 * image's parent fields can be safely torn down--after there are no
 * more in-flight requests to the parent image.  When the last
 * reference is dropped, cleaning them up is safe.
 */
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return;

	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
	if (counter > 0)
		return;

	/* Last reference; clean up parent data structures */

	if (!counter)
		rbd_dev_unparent(rbd_dev);
	else
2120
		rbd_warn(rbd_dev, "parent reference underflow");
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
}

/*
 * If an image has a non-zero parent overlap, get a reference to its
 * parent.
 *
 * Returns true if the rbd device has a parent with a non-zero
 * overlap and a reference for it was successfully taken, or
 * false otherwise.
 */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
2133
	int counter = 0;
2134 2135 2136 2137

	if (!rbd_dev->parent_spec)
		return false;

2138 2139 2140 2141
	down_read(&rbd_dev->header_rwsem);
	if (rbd_dev->parent_overlap)
		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
	up_read(&rbd_dev->header_rwsem);
2142 2143

	if (counter < 0)
2144
		rbd_warn(rbd_dev, "parent reference overflow");
2145

2146
	return counter > 0;
2147 2148
}

A
Alex Elder 已提交
2149 2150 2151 2152 2153
/*
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 */
A
Alex Elder 已提交
2154 2155
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
A
Alex Elder 已提交
2156
					u64 offset, u64 length,
G
Guangliang Zhao 已提交
2157
					enum obj_operation_type op_type,
2158
					struct ceph_snap_context *snapc)
A
Alex Elder 已提交
2159 2160 2161
{
	struct rbd_img_request *img_request;

2162
	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
A
Alex Elder 已提交
2163 2164 2165 2166 2167 2168 2169
	if (!img_request)
		return NULL;

	img_request->rq = NULL;
	img_request->rbd_dev = rbd_dev;
	img_request->offset = offset;
	img_request->length = length;
A
Alex Elder 已提交
2170
	img_request->flags = 0;
2171 2172 2173 2174
	if (op_type == OBJ_OP_DISCARD) {
		img_request_discard_set(img_request);
		img_request->snapc = snapc;
	} else if (op_type == OBJ_OP_WRITE) {
A
Alex Elder 已提交
2175
		img_request_write_set(img_request);
2176
		img_request->snapc = snapc;
A
Alex Elder 已提交
2177
	} else {
A
Alex Elder 已提交
2178
		img_request->snap_id = rbd_dev->spec->snap_id;
A
Alex Elder 已提交
2179
	}
2180
	if (rbd_dev_parent_get(rbd_dev))
2181
		img_request_layered_set(img_request);
A
Alex Elder 已提交
2182 2183 2184
	spin_lock_init(&img_request->completion_lock);
	img_request->next_completion = 0;
	img_request->callback = NULL;
2185
	img_request->result = 0;
A
Alex Elder 已提交
2186 2187 2188 2189
	img_request->obj_request_count = 0;
	INIT_LIST_HEAD(&img_request->obj_requests);
	kref_init(&img_request->kref);

A
Alex Elder 已提交
2190
	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
G
Guangliang Zhao 已提交
2191
		obj_op_name(op_type), offset, length, img_request);
A
Alex Elder 已提交
2192

A
Alex Elder 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	return img_request;
}

static void rbd_img_request_destroy(struct kref *kref)
{
	struct rbd_img_request *img_request;
	struct rbd_obj_request *obj_request;
	struct rbd_obj_request *next_obj_request;

	img_request = container_of(kref, struct rbd_img_request, kref);

A
Alex Elder 已提交
2204 2205
	dout("%s: img %p\n", __func__, img_request);

A
Alex Elder 已提交
2206 2207
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
		rbd_img_obj_request_del(img_request, obj_request);
2208
	rbd_assert(img_request->obj_request_count == 0);
A
Alex Elder 已提交
2209

2210 2211 2212 2213 2214
	if (img_request_layered_test(img_request)) {
		img_request_layered_clear(img_request);
		rbd_dev_parent_put(img_request->rbd_dev);
	}

2215 2216
	if (img_request_write_test(img_request) ||
		img_request_discard_test(img_request))
2217
		ceph_put_snap_context(img_request->snapc);
A
Alex Elder 已提交
2218

2219
	kmem_cache_free(rbd_img_request_cache, img_request);
A
Alex Elder 已提交
2220 2221
}

2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
static struct rbd_img_request *rbd_parent_request_create(
					struct rbd_obj_request *obj_request,
					u64 img_offset, u64 length)
{
	struct rbd_img_request *parent_request;
	struct rbd_device *rbd_dev;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;

2232
	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
G
Guangliang Zhao 已提交
2233
						length, OBJ_OP_READ, NULL);
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
	if (!parent_request)
		return NULL;

	img_request_child_set(parent_request);
	rbd_obj_request_get(obj_request);
	parent_request->obj_request = obj_request;

	return parent_request;
}

static void rbd_parent_request_destroy(struct kref *kref)
{
	struct rbd_img_request *parent_request;
	struct rbd_obj_request *orig_request;

	parent_request = container_of(kref, struct rbd_img_request, kref);
	orig_request = parent_request->obj_request;

	parent_request->obj_request = NULL;
	rbd_obj_request_put(orig_request);
	img_request_child_clear(parent_request);

	rbd_img_request_destroy(kref);
}

2259 2260
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
2261
	struct rbd_img_request *img_request;
2262 2263
	unsigned int xferred;
	int result;
A
Alex Elder 已提交
2264
	bool more;
2265

2266 2267 2268
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;

2269 2270 2271 2272 2273
	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
	xferred = (unsigned int)obj_request->xferred;
	result = obj_request->result;
	if (result) {
		struct rbd_device *rbd_dev = img_request->rbd_dev;
G
Guangliang Zhao 已提交
2274 2275
		enum obj_operation_type op_type;

2276 2277 2278 2279 2280 2281
		if (img_request_discard_test(img_request))
			op_type = OBJ_OP_DISCARD;
		else if (img_request_write_test(img_request))
			op_type = OBJ_OP_WRITE;
		else
			op_type = OBJ_OP_READ;
2282

2283
		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
G
Guangliang Zhao 已提交
2284 2285
			obj_op_name(op_type), obj_request->length,
			obj_request->img_offset, obj_request->offset);
2286
		rbd_warn(rbd_dev, "  result %d xferred %x",
2287 2288 2289
			result, xferred);
		if (!img_request->result)
			img_request->result = result;
2290 2291 2292 2293 2294
		/*
		 * Need to end I/O on the entire obj_request worth of
		 * bytes in case of error.
		 */
		xferred = obj_request->length;
2295 2296
	}

2297 2298 2299 2300 2301 2302 2303
	/* Image object requests don't own their page array */

	if (obj_request->type == OBJ_REQUEST_PAGES) {
		obj_request->pages = NULL;
		obj_request->page_count = 0;
	}

A
Alex Elder 已提交
2304 2305 2306 2307 2308
	if (img_request_child_test(img_request)) {
		rbd_assert(img_request->obj_request != NULL);
		more = obj_request->which < img_request->obj_request_count - 1;
	} else {
		rbd_assert(img_request->rq != NULL);
C
Christoph Hellwig 已提交
2309 2310 2311 2312

		more = blk_update_request(img_request->rq, result, xferred);
		if (!more)
			__blk_mq_end_request(img_request->rq, result);
A
Alex Elder 已提交
2313 2314 2315
	}

	return more;
2316 2317
}

2318 2319 2320 2321 2322 2323
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	u32 which = obj_request->which;
	bool more = true;

2324
	rbd_assert(obj_request_img_data_test(obj_request));
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	img_request = obj_request->img_request;

	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
	rbd_assert(img_request != NULL);
	rbd_assert(img_request->obj_request_count > 0);
	rbd_assert(which != BAD_WHICH);
	rbd_assert(which < img_request->obj_request_count);

	spin_lock_irq(&img_request->completion_lock);
	if (which != img_request->next_completion)
		goto out;

	for_each_obj_request_from(img_request, obj_request) {
		rbd_assert(more);
		rbd_assert(which < img_request->obj_request_count);

		if (!obj_request_done_test(obj_request))
			break;
2343
		more = rbd_img_obj_end_request(obj_request);
2344 2345 2346 2347 2348 2349 2350
		which++;
	}

	rbd_assert(more ^ (which == img_request->obj_request_count));
	img_request->next_completion = which;
out:
	spin_unlock_irq(&img_request->completion_lock);
2351
	rbd_img_request_put(img_request);
2352 2353 2354 2355 2356

	if (!more)
		rbd_img_request_complete(img_request);
}

2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
/*
 * Add individual osd ops to the given ceph_osd_request and prepare
 * them for submission. num_ops is the current number of
 * osd operations already to the object request.
 */
static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
				struct ceph_osd_request *osd_request,
				enum obj_operation_type op_type,
				unsigned int num_ops)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
	u64 offset = obj_request->offset;
	u64 length = obj_request->length;
	u64 img_end;
	u16 opcode;

	if (op_type == OBJ_OP_DISCARD) {
2376 2377 2378
		if (!offset && length == object_size &&
		    (!img_request_layered_test(img_request) ||
		     !obj_request_overlaps_parent(obj_request))) {
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
			opcode = CEPH_OSD_OP_DELETE;
		} else if ((offset + length == object_size)) {
			opcode = CEPH_OSD_OP_TRUNCATE;
		} else {
			down_read(&rbd_dev->header_rwsem);
			img_end = rbd_dev->header.image_size;
			up_read(&rbd_dev->header_rwsem);

			if (obj_request->img_offset + length == img_end)
				opcode = CEPH_OSD_OP_TRUNCATE;
			else
				opcode = CEPH_OSD_OP_ZERO;
		}
	} else if (op_type == OBJ_OP_WRITE) {
2393 2394 2395 2396
		if (!offset && length == object_size)
			opcode = CEPH_OSD_OP_WRITEFULL;
		else
			opcode = CEPH_OSD_OP_WRITE;
2397 2398 2399 2400 2401 2402 2403
		osd_req_op_alloc_hint_init(osd_request, num_ops,
					object_size, object_size);
		num_ops++;
	} else {
		opcode = CEPH_OSD_OP_READ;
	}

2404
	if (opcode == CEPH_OSD_OP_DELETE)
2405
		osd_req_op_init(osd_request, num_ops, opcode, 0);
2406 2407 2408 2409
	else
		osd_req_op_extent_init(osd_request, num_ops, opcode,
				       offset, length, 0, 0);

2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
	if (obj_request->type == OBJ_REQUEST_BIO)
		osd_req_op_extent_osd_data_bio(osd_request, num_ops,
					obj_request->bio_list, length);
	else if (obj_request->type == OBJ_REQUEST_PAGES)
		osd_req_op_extent_osd_data_pages(osd_request, num_ops,
					obj_request->pages, length,
					offset & ~PAGE_MASK, false, false);

	/* Discards are also writes */
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
		rbd_osd_req_format_write(obj_request);
	else
		rbd_osd_req_format_read(obj_request);
}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
/*
 * Split up an image request into one or more object requests, each
 * to a different object.  The "type" parameter indicates whether
 * "data_desc" is the pointer to the head of a list of bio
 * structures, or the base of a page array.  In either case this
 * function assumes data_desc describes memory sufficient to hold
 * all data described by the image request.
 */
static int rbd_img_request_fill(struct rbd_img_request *img_request,
					enum obj_request_type type,
					void *data_desc)
A
Alex Elder 已提交
2436 2437 2438 2439
{
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	struct rbd_obj_request *obj_request = NULL;
	struct rbd_obj_request *next_obj_request;
J
Jingoo Han 已提交
2440
	struct bio *bio_list = NULL;
2441
	unsigned int bio_offset = 0;
J
Jingoo Han 已提交
2442
	struct page **pages = NULL;
G
Guangliang Zhao 已提交
2443
	enum obj_operation_type op_type;
2444
	u64 img_offset;
A
Alex Elder 已提交
2445 2446
	u64 resid;

2447 2448
	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
		(int)type, data_desc);
A
Alex Elder 已提交
2449

2450
	img_offset = img_request->offset;
A
Alex Elder 已提交
2451
	resid = img_request->length;
A
Alex Elder 已提交
2452
	rbd_assert(resid > 0);
2453
	op_type = rbd_img_request_op_type(img_request);
2454 2455 2456

	if (type == OBJ_REQUEST_BIO) {
		bio_list = data_desc;
2457 2458
		rbd_assert(img_offset ==
			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2459
	} else if (type == OBJ_REQUEST_PAGES) {
2460 2461 2462
		pages = data_desc;
	}

A
Alex Elder 已提交
2463
	while (resid) {
2464
		struct ceph_osd_request *osd_req;
A
Alex Elder 已提交
2465 2466 2467 2468
		const char *object_name;
		u64 offset;
		u64 length;

2469
		object_name = rbd_segment_name(rbd_dev, img_offset);
A
Alex Elder 已提交
2470 2471
		if (!object_name)
			goto out_unwind;
2472 2473
		offset = rbd_segment_offset(rbd_dev, img_offset);
		length = rbd_segment_length(rbd_dev, img_offset, resid);
A
Alex Elder 已提交
2474
		obj_request = rbd_obj_request_create(object_name,
2475
						offset, length, type);
2476 2477
		/* object request has its own copy of the object name */
		rbd_segment_name_free(object_name);
A
Alex Elder 已提交
2478 2479
		if (!obj_request)
			goto out_unwind;
2480

2481 2482 2483 2484 2485
		/*
		 * set obj_request->img_request before creating the
		 * osd_request so that it gets the right snapc
		 */
		rbd_img_obj_request_add(img_request, obj_request);
A
Alex Elder 已提交
2486

2487 2488 2489 2490 2491 2492 2493 2494 2495
		if (type == OBJ_REQUEST_BIO) {
			unsigned int clone_size;

			rbd_assert(length <= (u64)UINT_MAX);
			clone_size = (unsigned int)length;
			obj_request->bio_list =
					bio_chain_clone_range(&bio_list,
								&bio_offset,
								clone_size,
2496
								GFP_NOIO);
2497
			if (!obj_request->bio_list)
2498
				goto out_unwind;
2499
		} else if (type == OBJ_REQUEST_PAGES) {
2500 2501 2502 2503 2504 2505 2506 2507 2508
			unsigned int page_count;

			obj_request->pages = pages;
			page_count = (u32)calc_pages_for(offset, length);
			obj_request->page_count = page_count;
			if ((offset + length) & ~PAGE_MASK)
				page_count--;	/* more on last page */
			pages += page_count;
		}
A
Alex Elder 已提交
2509

G
Guangliang Zhao 已提交
2510 2511 2512
		osd_req = rbd_osd_req_create(rbd_dev, op_type,
					(op_type == OBJ_OP_WRITE) ? 2 : 1,
					obj_request);
2513
		if (!osd_req)
2514
			goto out_unwind;
2515

2516
		obj_request->osd_req = osd_req;
2517
		obj_request->callback = rbd_img_obj_callback;
2518
		obj_request->img_offset = img_offset;
2519

2520
		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
A
Alex Elder 已提交
2521

2522
		rbd_img_request_get(img_request);
A
Alex Elder 已提交
2523

2524
		img_offset += length;
A
Alex Elder 已提交
2525 2526 2527 2528 2529 2530 2531
		resid -= length;
	}

	return 0;

out_unwind:
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2532
		rbd_img_obj_request_del(img_request, obj_request);
A
Alex Elder 已提交
2533 2534 2535 2536

	return -ENOMEM;
}

2537
static void
I
Ilya Dryomov 已提交
2538
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2539 2540 2541
{
	struct rbd_img_request *img_request;
	struct rbd_device *rbd_dev;
2542
	struct page **pages;
2543 2544
	u32 page_count;

I
Ilya Dryomov 已提交
2545 2546
	dout("%s: obj %p\n", __func__, obj_request);

2547 2548
	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
		obj_request->type == OBJ_REQUEST_NODATA);
2549 2550 2551 2552 2553 2554 2555
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);

	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev);

2556 2557
	pages = obj_request->copyup_pages;
	rbd_assert(pages != NULL);
2558
	obj_request->copyup_pages = NULL;
2559 2560 2561 2562
	page_count = obj_request->copyup_page_count;
	rbd_assert(page_count);
	obj_request->copyup_page_count = 0;
	ceph_release_page_vector(pages, page_count);
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572

	/*
	 * We want the transfer count to reflect the size of the
	 * original write request.  There is no such thing as a
	 * successful short write, so if the request was successful
	 * we can just set it to the originally-requested length.
	 */
	if (!obj_request->result)
		obj_request->xferred = obj_request->length;

I
Ilya Dryomov 已提交
2573
	obj_request_done_set(obj_request);
2574 2575
}

2576 2577 2578 2579
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *orig_request;
2580 2581 2582
	struct ceph_osd_request *osd_req;
	struct ceph_osd_client *osdc;
	struct rbd_device *rbd_dev;
2583
	struct page **pages;
2584
	enum obj_operation_type op_type;
2585
	u32 page_count;
2586
	int img_result;
2587
	u64 parent_length;
2588 2589 2590 2591 2592 2593 2594 2595

	rbd_assert(img_request_child_test(img_request));

	/* First get what we need from the image request */

	pages = img_request->copyup_pages;
	rbd_assert(pages != NULL);
	img_request->copyup_pages = NULL;
2596 2597 2598
	page_count = img_request->copyup_page_count;
	rbd_assert(page_count);
	img_request->copyup_page_count = 0;
2599 2600 2601

	orig_request = img_request->obj_request;
	rbd_assert(orig_request != NULL);
2602
	rbd_assert(obj_request_type_valid(orig_request->type));
2603
	img_result = img_request->result;
2604 2605
	parent_length = img_request->length;
	rbd_assert(parent_length == img_request->xferred);
2606
	rbd_img_request_put(img_request);
2607

2608 2609
	rbd_assert(orig_request->img_request);
	rbd_dev = orig_request->img_request->rbd_dev;
2610 2611
	rbd_assert(rbd_dev);

2612 2613 2614 2615 2616 2617 2618
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;
2619

2620 2621 2622 2623 2624 2625
		ceph_release_page_vector(pages, page_count);
		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, orig_request);
		if (!img_result)
			return;
	}
2626

2627
	if (img_result)
2628 2629
		goto out_err;

2630 2631
	/*
	 * The original osd request is of no use to use any more.
2632
	 * We need a new one that can hold the three ops in a copyup
2633 2634 2635
	 * request.  Allocate the new copyup osd request for the
	 * original request, and release the old one.
	 */
2636
	img_result = -ENOMEM;
2637 2638 2639
	osd_req = rbd_osd_req_create_copyup(orig_request);
	if (!osd_req)
		goto out_err;
2640
	rbd_osd_req_destroy(orig_request->osd_req);
2641 2642
	orig_request->osd_req = osd_req;
	orig_request->copyup_pages = pages;
2643
	orig_request->copyup_page_count = page_count;
2644

2645
	/* Initialize the copyup op */
2646

2647
	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2648
	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2649
						false, false);
2650

2651
	/* Add the other op(s) */
2652

2653 2654
	op_type = rbd_img_request_op_type(orig_request->img_request);
	rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2655 2656 2657 2658

	/* All set, send it off. */

	osdc = &rbd_dev->rbd_client->client->osdc;
2659 2660
	img_result = rbd_obj_request_submit(osdc, orig_request);
	if (!img_result)
2661 2662 2663 2664
		return;
out_err:
	/* Record the error code and complete the request */

2665
	orig_request->result = img_result;
2666 2667 2668
	orig_request->xferred = 0;
	obj_request_done_set(orig_request);
	rbd_obj_request_complete(orig_request);
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
}

/*
 * Read from the parent image the range of data that covers the
 * entire target of the given object request.  This is used for
 * satisfying a layered image write request when the target of an
 * object request from the image request does not exist.
 *
 * A page array big enough to hold the returned data is allocated
 * and supplied to rbd_img_request_fill() as the "data descriptor."
 * When the read completes, this page array will be transferred to
 * the original object request for the copyup operation.
 *
 * If an error occurs, record it as the result of the original
 * object request and mark it done so it gets completed.
 */
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = NULL;
	struct rbd_img_request *parent_request = NULL;
	struct rbd_device *rbd_dev;
	u64 img_offset;
	u64 length;
	struct page **pages = NULL;
	u32 page_count;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
2697
	rbd_assert(obj_request_type_valid(obj_request->type));
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710

	img_request = obj_request->img_request;
	rbd_assert(img_request != NULL);
	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev->parent != NULL);

	/*
	 * Determine the byte range covered by the object in the
	 * child image to which the original request was to be sent.
	 */
	img_offset = obj_request->img_offset - obj_request->offset;
	length = (u64)1 << rbd_dev->header.obj_order;

A
Alex Elder 已提交
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	/*
	 * There is no defined parent data beyond the parent
	 * overlap, so limit what we read at that boundary if
	 * necessary.
	 */
	if (img_offset + length > rbd_dev->parent_overlap) {
		rbd_assert(img_offset < rbd_dev->parent_overlap);
		length = rbd_dev->parent_overlap - img_offset;
	}

2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
	/*
	 * Allocate a page array big enough to receive the data read
	 * from the parent.
	 */
	page_count = (u32)calc_pages_for(0, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages)) {
		result = PTR_ERR(pages);
		pages = NULL;
		goto out_err;
	}

	result = -ENOMEM;
2734 2735
	parent_request = rbd_parent_request_create(obj_request,
						img_offset, length);
2736 2737 2738 2739 2740 2741 2742
	if (!parent_request)
		goto out_err;

	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
	if (result)
		goto out_err;
	parent_request->copyup_pages = pages;
2743
	parent_request->copyup_page_count = page_count;
2744 2745 2746 2747 2748 2749 2750

	parent_request->callback = rbd_img_obj_parent_read_full_callback;
	result = rbd_img_request_submit(parent_request);
	if (!result)
		return 0;

	parent_request->copyup_pages = NULL;
2751
	parent_request->copyup_page_count = 0;
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
	parent_request->obj_request = NULL;
	rbd_obj_request_put(obj_request);
out_err:
	if (pages)
		ceph_release_page_vector(pages, page_count);
	if (parent_request)
		rbd_img_request_put(parent_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);

	return result;
}

2766 2767 2768
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *orig_request;
2769
	struct rbd_device *rbd_dev;
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
	int result;

	rbd_assert(!obj_request_img_data_test(obj_request));

	/*
	 * All we need from the object request is the original
	 * request and the result of the STAT op.  Grab those, then
	 * we're done with the request.
	 */
	orig_request = obj_request->obj_request;
	obj_request->obj_request = NULL;
2781
	rbd_obj_request_put(orig_request);
2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
	rbd_assert(orig_request);
	rbd_assert(orig_request->img_request);

	result = obj_request->result;
	obj_request->result = 0;

	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
		obj_request, orig_request, result,
		obj_request->xferred, obj_request->length);
	rbd_obj_request_put(obj_request);

2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	rbd_dev = orig_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		result = rbd_obj_request_submit(osdc, orig_request);
		if (!result)
			return;
	}
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819

	/*
	 * Our only purpose here is to determine whether the object
	 * exists, and we don't want to treat the non-existence as
	 * an error.  If something else comes back, transfer the
	 * error to the original request and complete it now.
	 */
	if (!result) {
		obj_request_existence_set(orig_request, true);
	} else if (result == -ENOENT) {
		obj_request_existence_set(orig_request, false);
	} else if (result) {
		orig_request->result = result;
2820
		goto out;
2821 2822 2823 2824 2825 2826
	}

	/*
	 * Resubmit the original request now that we have recorded
	 * whether the target object exists.
	 */
2827
	orig_request->result = rbd_img_obj_request_submit(orig_request);
2828
out:
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
	if (orig_request->result)
		rbd_obj_request_complete(orig_request);
}

static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *stat_request;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct page **pages = NULL;
	u32 page_count;
	size_t size;
	int ret;

	/*
	 * The response data for a STAT call consists of:
	 *     le64 length;
	 *     struct {
	 *         le32 tv_sec;
	 *         le32 tv_nsec;
	 *     } mtime;
	 */
	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
	page_count = (u32)calc_pages_for(0, size);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
							OBJ_REQUEST_PAGES);
	if (!stat_request)
		goto out;

	rbd_obj_request_get(obj_request);
	stat_request->obj_request = obj_request;
	stat_request->pages = pages;
	stat_request->page_count = page_count;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;
G
Guangliang Zhao 已提交
2870
	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2871
						   stat_request);
2872 2873 2874 2875
	if (!stat_request->osd_req)
		goto out;
	stat_request->callback = rbd_img_obj_exists_callback;

2876
	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2877 2878
	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
					false, false);
2879
	rbd_osd_req_format_read(stat_request);
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889

	osdc = &rbd_dev->rbd_client->client->osdc;
	ret = rbd_obj_request_submit(osdc, stat_request);
out:
	if (ret)
		rbd_obj_request_put(obj_request);

	return ret;
}

2890
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2891 2892
{
	struct rbd_img_request *img_request;
A
Alex Elder 已提交
2893
	struct rbd_device *rbd_dev;
2894 2895 2896 2897 2898

	rbd_assert(obj_request_img_data_test(obj_request));

	img_request = obj_request->img_request;
	rbd_assert(img_request);
A
Alex Elder 已提交
2899
	rbd_dev = img_request->rbd_dev;
2900

2901
	/* Reads */
2902 2903
	if (!img_request_write_test(img_request) &&
	    !img_request_discard_test(img_request))
2904 2905 2906 2907 2908 2909
		return true;

	/* Non-layered writes */
	if (!img_request_layered_test(img_request))
		return true;

2910
	/*
2911 2912
	 * Layered writes outside of the parent overlap range don't
	 * share any data with the parent.
2913
	 */
2914 2915
	if (!obj_request_overlaps_parent(obj_request))
		return true;
2916

2917 2918 2919 2920 2921 2922 2923 2924
	/*
	 * Entire-object layered writes - we will overwrite whatever
	 * parent data there is anyway.
	 */
	if (!obj_request->offset &&
	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
		return true;

2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
	/*
	 * If the object is known to already exist, its parent data has
	 * already been copied.
	 */
	if (obj_request_known_test(obj_request) &&
	    obj_request_exists_test(obj_request))
		return true;

	return false;
}

static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
	if (img_obj_request_simple(obj_request)) {
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
		struct rbd_device *rbd_dev;
		struct ceph_osd_client *osdc;

		rbd_dev = obj_request->img_request->rbd_dev;
		osdc = &rbd_dev->rbd_client->client->osdc;

		return rbd_obj_request_submit(osdc, obj_request);
	}

	/*
2949 2950 2951 2952
	 * It's a layered write.  The target object might exist but
	 * we may not know that yet.  If we know it doesn't exist,
	 * start by reading the data for the full target object from
	 * the parent so we can use it for a copyup to the target.
2953
	 */
2954
	if (obj_request_known_test(obj_request))
2955 2956 2957
		return rbd_img_obj_parent_read_full(obj_request);

	/* We don't know whether the target exists.  Go find out. */
2958 2959 2960 2961

	return rbd_img_obj_exists_submit(obj_request);
}

A
Alex Elder 已提交
2962 2963 2964
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
2965
	struct rbd_obj_request *next_obj_request;
2966
	int ret = 0;
A
Alex Elder 已提交
2967

A
Alex Elder 已提交
2968
	dout("%s: img %p\n", __func__, img_request);
A
Alex Elder 已提交
2969

2970 2971
	rbd_img_request_get(img_request);
	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2972
		ret = rbd_img_obj_request_submit(obj_request);
A
Alex Elder 已提交
2973
		if (ret)
2974
			goto out_put_ireq;
A
Alex Elder 已提交
2975 2976
	}

2977 2978 2979
out_put_ireq:
	rbd_img_request_put(img_request);
	return ret;
A
Alex Elder 已提交
2980
}
A
Alex Elder 已提交
2981 2982 2983 2984

static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
A
Alex Elder 已提交
2985 2986
	struct rbd_device *rbd_dev;
	u64 obj_end;
2987 2988
	u64 img_xferred;
	int img_result;
A
Alex Elder 已提交
2989 2990 2991

	rbd_assert(img_request_child_test(img_request));

2992 2993
	/* First get what we need from the image request and release it */

A
Alex Elder 已提交
2994
	obj_request = img_request->obj_request;
2995 2996 2997 2998 2999 3000 3001 3002 3003
	img_xferred = img_request->xferred;
	img_result = img_request->result;
	rbd_img_request_put(img_request);

	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to re-submit the
	 * original request.
	 */
A
Alex Elder 已提交
3004 3005
	rbd_assert(obj_request);
	rbd_assert(obj_request->img_request);
3006 3007 3008 3009 3010 3011 3012 3013 3014
	rbd_dev = obj_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, obj_request);
		if (!img_result)
			return;
	}
A
Alex Elder 已提交
3015

3016
	obj_request->result = img_result;
A
Alex Elder 已提交
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
	if (obj_request->result)
		goto out;

	/*
	 * We need to zero anything beyond the parent overlap
	 * boundary.  Since rbd_img_obj_request_read_callback()
	 * will zero anything beyond the end of a short read, an
	 * easy way to do this is to pretend the data from the
	 * parent came up short--ending at the overlap boundary.
	 */
	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
	obj_end = obj_request->img_offset + obj_request->length;
	if (obj_end > rbd_dev->parent_overlap) {
		u64 xferred = 0;

		if (obj_request->img_offset < rbd_dev->parent_overlap)
			xferred = rbd_dev->parent_overlap -
					obj_request->img_offset;
A
Alex Elder 已提交
3035

3036
		obj_request->xferred = min(img_xferred, xferred);
A
Alex Elder 已提交
3037
	} else {
3038
		obj_request->xferred = img_xferred;
A
Alex Elder 已提交
3039 3040
	}
out:
A
Alex Elder 已提交
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052
	rbd_img_obj_request_read_callback(obj_request);
	rbd_obj_request_complete(obj_request);
}

static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
	rbd_assert(obj_request->img_request != NULL);
	rbd_assert(obj_request->result == (s32) -ENOENT);
3053
	rbd_assert(obj_request_type_valid(obj_request->type));
A
Alex Elder 已提交
3054 3055

	/* rbd_read_finish(obj_request, obj_request->length); */
3056
	img_request = rbd_parent_request_create(obj_request,
A
Alex Elder 已提交
3057
						obj_request->img_offset,
3058
						obj_request->length);
A
Alex Elder 已提交
3059 3060 3061 3062
	result = -ENOMEM;
	if (!img_request)
		goto out_err;

3063 3064 3065 3066 3067 3068
	if (obj_request->type == OBJ_REQUEST_BIO)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
						obj_request->bio_list);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
						obj_request->pages);
A
Alex Elder 已提交
3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
	if (result)
		goto out_err;

	img_request->callback = rbd_img_parent_read_callback;
	result = rbd_img_request_submit(img_request);
	if (result)
		goto out_err;

	return;
out_err:
	if (img_request)
		rbd_img_request_put(img_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);
}
A
Alex Elder 已提交
3085

3086 3087
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
3088

3089 3090
static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
			 u64 notifier_id, void *data, size_t data_len)
A
Alex Elder 已提交
3091
{
3092 3093
	struct rbd_device *rbd_dev = arg;
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3094
	int ret;
A
Alex Elder 已提交
3095

3096 3097
	dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
	     cookie, notify_id);
3098 3099 3100 3101 3102 3103 3104

	/*
	 * Until adequate refresh error handling is in place, there is
	 * not much we can do here, except warn.
	 *
	 * See http://tracker.ceph.com/issues/5040
	 */
3105 3106
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
3107
		rbd_warn(rbd_dev, "refresh failed: %d", ret);
A
Alex Elder 已提交
3108

3109 3110 3111
	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
				   &rbd_dev->header_oloc, notify_id, cookie,
				   NULL, 0);
3112
	if (ret)
3113
		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
A
Alex Elder 已提交
3114 3115
}

3116
static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3117
{
3118
	struct rbd_device *rbd_dev = arg;
3119 3120
	int ret;

3121
	rbd_warn(rbd_dev, "encountered watch error: %d", err);
3122

3123
	__rbd_dev_header_unwatch_sync(rbd_dev);
3124

3125
	ret = rbd_dev_header_watch_sync(rbd_dev);
3126
	if (ret) {
3127 3128
		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
		return;
3129 3130
	}

3131 3132 3133
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
		rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3134 3135
}

3136
/*
3137
 * Initiate a watch request, synchronously.
3138
 */
3139
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3140 3141
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3142
	struct ceph_osd_linger_request *handle;
3143

3144
	rbd_assert(!rbd_dev->watch_handle);
3145

3146 3147 3148 3149 3150
	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
				 &rbd_dev->header_oloc, rbd_watch_cb,
				 rbd_watch_errcb, rbd_dev);
	if (IS_ERR(handle))
		return PTR_ERR(handle);
3151

3152
	rbd_dev->watch_handle = handle;
3153 3154 3155
	return 0;
}

3156
static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3157
{
3158 3159
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	int ret;
3160

3161 3162
	if (!rbd_dev->watch_handle)
		return;
3163

3164 3165 3166
	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
	if (ret)
		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3167

3168
	rbd_dev->watch_handle = NULL;
3169 3170 3171 3172 3173 3174 3175 3176
}

/*
 * Tear down a watch request, synchronously.
 */
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
{
	__rbd_dev_header_unwatch_sync(rbd_dev);
I
Ilya Dryomov 已提交
3177 3178 3179

	dout("%s flushing notifies\n", __func__);
	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3180 3181
}

3182
/*
3183 3184
 * Synchronous osd object method call.  Returns the number of bytes
 * returned in the outbound buffer, or a negative error code.
3185 3186 3187 3188 3189
 */
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
			     const char *object_name,
			     const char *class_name,
			     const char *method_name,
3190
			     const void *outbound,
3191
			     size_t outbound_size,
3192
			     void *inbound,
3193
			     size_t inbound_size)
3194
{
3195
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3196 3197 3198 3199 3200 3201
	struct rbd_obj_request *obj_request;
	struct page **pages;
	u32 page_count;
	int ret;

	/*
3202 3203 3204 3205 3206
	 * Method calls are ultimately read operations.  The result
	 * should placed into the inbound buffer provided.  They
	 * also supply outbound data--parameters for the object
	 * method.  Currently if this is present it will be a
	 * snapshot id.
3207
	 */
3208
	page_count = (u32)calc_pages_for(0, inbound_size);
3209 3210 3211 3212 3213
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
3214
	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3215 3216 3217 3218 3219 3220 3221
							OBJ_REQUEST_PAGES);
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3222
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3223
						  obj_request);
3224 3225 3226
	if (!obj_request->osd_req)
		goto out;

3227
	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
					class_name, method_name);
	if (outbound_size) {
		struct ceph_pagelist *pagelist;

		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
		if (!pagelist)
			goto out;

		ceph_pagelist_init(pagelist);
		ceph_pagelist_append(pagelist, outbound, outbound_size);
		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
						pagelist);
	}
3241 3242
	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
					obj_request->pages, inbound_size,
3243
					0, false, false);
3244
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3245

3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3256 3257 3258

	rbd_assert(obj_request->xferred < (u64)INT_MAX);
	ret = (int)obj_request->xferred;
3259
	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3260 3261 3262 3263 3264 3265 3266 3267 3268
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

C
Christoph Hellwig 已提交
3269
static void rbd_queue_workfn(struct work_struct *work)
A
Alex Elder 已提交
3270
{
C
Christoph Hellwig 已提交
3271 3272
	struct request *rq = blk_mq_rq_from_pdu(work);
	struct rbd_device *rbd_dev = rq->q->queuedata;
I
Ilya Dryomov 已提交
3273
	struct rbd_img_request *img_request;
3274
	struct ceph_snap_context *snapc = NULL;
I
Ilya Dryomov 已提交
3275 3276
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
G
Guangliang Zhao 已提交
3277
	enum obj_operation_type op_type;
3278
	u64 mapping_size;
A
Alex Elder 已提交
3279 3280
	int result;

C
Christoph Hellwig 已提交
3281 3282 3283 3284 3285 3286 3287
	if (rq->cmd_type != REQ_TYPE_FS) {
		dout("%s: non-fs request type %d\n", __func__,
			(int) rq->cmd_type);
		result = -EIO;
		goto err;
	}

M
Mike Christie 已提交
3288
	if (req_op(rq) == REQ_OP_DISCARD)
3289
		op_type = OBJ_OP_DISCARD;
M
Mike Christie 已提交
3290
	else if (req_op(rq) == REQ_OP_WRITE)
G
Guangliang Zhao 已提交
3291 3292 3293 3294
		op_type = OBJ_OP_WRITE;
	else
		op_type = OBJ_OP_READ;

I
Ilya Dryomov 已提交
3295
	/* Ignore/skip any zero-length requests */
A
Alex Elder 已提交
3296

I
Ilya Dryomov 已提交
3297 3298 3299 3300 3301
	if (!length) {
		dout("%s: zero-length request\n", __func__);
		result = 0;
		goto err_rq;
	}
A
Alex Elder 已提交
3302

G
Guangliang Zhao 已提交
3303
	/* Only reads are allowed to a read-only device */
I
Ilya Dryomov 已提交
3304

G
Guangliang Zhao 已提交
3305
	if (op_type != OBJ_OP_READ) {
I
Ilya Dryomov 已提交
3306 3307 3308
		if (rbd_dev->mapping.read_only) {
			result = -EROFS;
			goto err_rq;
A
Alex Elder 已提交
3309
		}
I
Ilya Dryomov 已提交
3310 3311
		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
	}
A
Alex Elder 已提交
3312

I
Ilya Dryomov 已提交
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324
	/*
	 * Quit early if the mapped snapshot no longer exists.  It's
	 * still possible the snapshot will have disappeared by the
	 * time our request arrives at the osd, but there's no sense in
	 * sending it if we already know.
	 */
	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
		dout("request for non-existent snapshot");
		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
		result = -ENXIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3325

I
Ilya Dryomov 已提交
3326 3327 3328 3329 3330 3331
	if (offset && length > U64_MAX - offset + 1) {
		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
			 length);
		result = -EINVAL;
		goto err_rq;	/* Shouldn't happen */
	}
A
Alex Elder 已提交
3332

C
Christoph Hellwig 已提交
3333 3334
	blk_mq_start_request(rq);

3335 3336
	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
G
Guangliang Zhao 已提交
3337
	if (op_type != OBJ_OP_READ) {
3338 3339 3340 3341 3342 3343
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
I
Ilya Dryomov 已提交
3344
		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3345
			 length, mapping_size);
I
Ilya Dryomov 已提交
3346 3347 3348
		result = -EIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3349

G
Guangliang Zhao 已提交
3350
	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3351
					     snapc);
I
Ilya Dryomov 已提交
3352 3353 3354 3355 3356
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
3357
	snapc = NULL; /* img_request consumes a ref */
A
Alex Elder 已提交
3358

3359 3360 3361 3362 3363 3364
	if (op_type == OBJ_OP_DISCARD)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
					      NULL);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
					      rq->bio);
I
Ilya Dryomov 已提交
3365 3366
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3367

I
Ilya Dryomov 已提交
3368 3369 3370
	result = rbd_img_request_submit(img_request);
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3371

I
Ilya Dryomov 已提交
3372
	return;
A
Alex Elder 已提交
3373

I
Ilya Dryomov 已提交
3374 3375 3376 3377 3378
err_img_request:
	rbd_img_request_put(img_request);
err_rq:
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
G
Guangliang Zhao 已提交
3379
			 obj_op_name(op_type), length, offset, result);
3380
	ceph_put_snap_context(snapc);
C
Christoph Hellwig 已提交
3381 3382
err:
	blk_mq_end_request(rq, result);
I
Ilya Dryomov 已提交
3383
}
A
Alex Elder 已提交
3384

C
Christoph Hellwig 已提交
3385 3386
static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
I
Ilya Dryomov 已提交
3387
{
C
Christoph Hellwig 已提交
3388 3389
	struct request *rq = bd->rq;
	struct work_struct *work = blk_mq_rq_to_pdu(rq);
A
Alex Elder 已提交
3390

C
Christoph Hellwig 已提交
3391 3392
	queue_work(rbd_wq, work);
	return BLK_MQ_RQ_QUEUE_OK;
A
Alex Elder 已提交
3393 3394
}

3395 3396 3397 3398 3399 3400 3401
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk = rbd_dev->disk;

	if (!disk)
		return;

3402 3403
	rbd_dev->disk = NULL;
	if (disk->flags & GENHD_FL_UP) {
3404
		del_gendisk(disk);
3405 3406
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
C
Christoph Hellwig 已提交
3407
		blk_mq_free_tag_set(&rbd_dev->tag_set);
3408
	}
3409 3410 3411
	put_disk(disk);
}

3412 3413
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
				const char *object_name,
3414
				u64 offset, u64 length, void *buf)
3415 3416

{
3417
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3418 3419 3420
	struct rbd_obj_request *obj_request;
	struct page **pages = NULL;
	u32 page_count;
3421
	size_t size;
3422 3423 3424 3425 3426
	int ret;

	page_count = (u32) calc_pages_for(offset, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
3427
		return PTR_ERR(pages);
3428 3429 3430

	ret = -ENOMEM;
	obj_request = rbd_obj_request_create(object_name, offset, length,
3431
							OBJ_REQUEST_PAGES);
3432 3433 3434 3435 3436 3437
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3438
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3439
						  obj_request);
3440 3441 3442
	if (!obj_request->osd_req)
		goto out;

3443 3444
	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
					offset, length, 0, 0);
3445
	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3446
					obj_request->pages,
3447 3448 3449
					obj_request->length,
					obj_request->offset & ~PAGE_MASK,
					false, false);
3450
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3451

3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3462 3463 3464

	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
	size = (size_t) obj_request->xferred;
3465
	ceph_copy_from_page_vector(pages, buf, 0, size);
3466 3467
	rbd_assert(size <= (size_t)INT_MAX);
	ret = (int)size;
3468 3469 3470 3471 3472 3473 3474 3475 3476
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

3477
/*
A
Alex Elder 已提交
3478 3479 3480
 * Read the complete header for the given rbd device.  On successful
 * return, the rbd_dev->header field will contain up-to-date
 * information about the image.
3481
 */
3482
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3483
{
3484
	struct rbd_image_header_ondisk *ondisk = NULL;
3485
	u32 snap_count = 0;
3486 3487 3488
	u64 names_size = 0;
	u32 want_count;
	int ret;
3489

A
Alex Elder 已提交
3490
	/*
3491 3492 3493 3494 3495
	 * The complete header will include an array of its 64-bit
	 * snapshot ids, followed by the names of those snapshots as
	 * a contiguous block of NUL-terminated strings.  Note that
	 * the number of snapshots could change by the time we read
	 * it in, in which case we re-read it.
A
Alex Elder 已提交
3496
	 */
3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
	do {
		size_t size;

		kfree(ondisk);

		size = sizeof (*ondisk);
		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
		size += names_size;
		ondisk = kmalloc(size, GFP_KERNEL);
		if (!ondisk)
A
Alex Elder 已提交
3507
			return -ENOMEM;
3508

3509
		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
3510
				       0, size, ondisk);
3511
		if (ret < 0)
A
Alex Elder 已提交
3512
			goto out;
A
Alex Elder 已提交
3513
		if ((size_t)ret < size) {
3514
			ret = -ENXIO;
A
Alex Elder 已提交
3515 3516
			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
				size, ret);
A
Alex Elder 已提交
3517
			goto out;
3518 3519 3520
		}
		if (!rbd_dev_ondisk_valid(ondisk)) {
			ret = -ENXIO;
A
Alex Elder 已提交
3521
			rbd_warn(rbd_dev, "invalid header");
A
Alex Elder 已提交
3522
			goto out;
3523
		}
3524

3525 3526 3527 3528
		names_size = le64_to_cpu(ondisk->snap_names_len);
		want_count = snap_count;
		snap_count = le32_to_cpu(ondisk->snap_count);
	} while (snap_count != want_count);
A
Alex Elder 已提交
3529

A
Alex Elder 已提交
3530 3531
	ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
3532 3533 3534
	kfree(ondisk);

	return ret;
3535 3536
}

3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555
/*
 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
 * has disappeared from the (just updated) snapshot context.
 */
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
	u64 snap_id;

	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
		return;

	snap_id = rbd_dev->spec->snap_id;
	if (snap_id == CEPH_NOSNAP)
		return;

	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}

3556 3557 3558 3559 3560
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
	sector_t size;

	/*
I
Ilya Dryomov 已提交
3561 3562 3563
	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
	 * try to update its size.  If REMOVING is set, updating size
	 * is just useless work since the device can't be opened.
3564
	 */
I
Ilya Dryomov 已提交
3565 3566
	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3567 3568 3569 3570 3571 3572 3573
		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
		dout("setting size to %llu sectors", (unsigned long long)size);
		set_capacity(rbd_dev->disk, size);
		revalidate_disk(rbd_dev->disk);
	}
}

A
Alex Elder 已提交
3574
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
3575
{
3576
	u64 mapping_size;
A
Alex Elder 已提交
3577 3578
	int ret;

3579
	down_write(&rbd_dev->header_rwsem);
3580
	mapping_size = rbd_dev->mapping.size;
3581 3582

	ret = rbd_dev_header_info(rbd_dev);
3583
	if (ret)
3584
		goto out;
3585

3586 3587 3588 3589 3590 3591 3592
	/*
	 * If there is a parent, see if it has disappeared due to the
	 * mapped image getting flattened.
	 */
	if (rbd_dev->parent) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
3593
			goto out;
3594 3595
	}

3596
	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3597
		rbd_dev->mapping.size = rbd_dev->header.image_size;
3598 3599 3600 3601
	} else {
		/* validate mapped snapshot's EXISTS flag */
		rbd_exists_validate(rbd_dev);
	}
3602

3603
out:
3604
	up_write(&rbd_dev->header_rwsem);
3605
	if (!ret && mapping_size != rbd_dev->mapping.size)
3606
		rbd_dev_update_size(rbd_dev);
A
Alex Elder 已提交
3607

3608
	return ret;
A
Alex Elder 已提交
3609 3610
}

C
Christoph Hellwig 已提交
3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
static int rbd_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
{
	struct work_struct *work = blk_mq_rq_to_pdu(rq);

	INIT_WORK(work, rbd_queue_workfn);
	return 0;
}

static struct blk_mq_ops rbd_mq_ops = {
	.queue_rq	= rbd_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_request	= rbd_init_request,
};

3627 3628 3629 3630
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk;
	struct request_queue *q;
A
Alex Elder 已提交
3631
	u64 segment_size;
C
Christoph Hellwig 已提交
3632
	int err;
3633 3634

	/* create gendisk info */
3635 3636 3637
	disk = alloc_disk(single_major ?
			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
			  RBD_MINORS_PER_MAJOR);
3638
	if (!disk)
A
Alex Elder 已提交
3639
		return -ENOMEM;
3640

A
Alex Elder 已提交
3641
	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
A
Alex Elder 已提交
3642
		 rbd_dev->dev_id);
3643
	disk->major = rbd_dev->major;
3644
	disk->first_minor = rbd_dev->minor;
3645 3646
	if (single_major)
		disk->flags |= GENHD_FL_EXT_DEVT;
3647 3648 3649
	disk->fops = &rbd_bd_ops;
	disk->private_data = rbd_dev;

C
Christoph Hellwig 已提交
3650 3651
	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
	rbd_dev->tag_set.ops = &rbd_mq_ops;
I
Ilya Dryomov 已提交
3652
	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
C
Christoph Hellwig 已提交
3653
	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
I
Ilya Dryomov 已提交
3654
	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
C
Christoph Hellwig 已提交
3655 3656 3657 3658 3659
	rbd_dev->tag_set.nr_hw_queues = 1;
	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);

	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
	if (err)
3660
		goto out_disk;
3661

C
Christoph Hellwig 已提交
3662 3663 3664 3665 3666 3667
	q = blk_mq_init_queue(&rbd_dev->tag_set);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}

3668 3669
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
A
Alex Elder 已提交
3670

3671
	/* set io sizes to object size */
A
Alex Elder 已提交
3672 3673
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
I
Ilya Dryomov 已提交
3674
	q->limits.max_sectors = queue_max_hw_sectors(q);
I
Ilya Dryomov 已提交
3675
	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
A
Alex Elder 已提交
3676 3677 3678
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);
3679

3680 3681 3682 3683
	/* enable the discard support */
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.discard_granularity = segment_size;
	q->limits.discard_alignment = segment_size;
3684
	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3685
	q->limits.discard_zeroes_data = 1;
3686

3687 3688 3689
	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;

3690 3691 3692 3693 3694 3695 3696
	disk->queue = q;

	q->queuedata = rbd_dev;

	rbd_dev->disk = disk;

	return 0;
C
Christoph Hellwig 已提交
3697 3698
out_tag_set:
	blk_mq_free_tag_set(&rbd_dev->tag_set);
3699 3700
out_disk:
	put_disk(disk);
C
Christoph Hellwig 已提交
3701
	return err;
3702 3703
}

3704 3705 3706 3707
/*
  sysfs
*/

A
Alex Elder 已提交
3708 3709 3710 3711 3712
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
	return container_of(dev, struct rbd_device, dev);
}

3713 3714 3715
static ssize_t rbd_size_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3716
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3717

A
Alex Elder 已提交
3718 3719
	return sprintf(buf, "%llu\n",
		(unsigned long long)rbd_dev->mapping.size);
3720 3721
}

A
Alex Elder 已提交
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731
/*
 * Note this shows the features for whatever's mapped, which is not
 * necessarily the base image.
 */
static ssize_t rbd_features_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

	return sprintf(buf, "0x%016llx\n",
A
Alex Elder 已提交
3732
			(unsigned long long)rbd_dev->mapping.features);
A
Alex Elder 已提交
3733 3734
}

3735 3736 3737
static ssize_t rbd_major_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3738
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3739

A
Alex Elder 已提交
3740 3741 3742 3743
	if (rbd_dev->major)
		return sprintf(buf, "%d\n", rbd_dev->major);

	return sprintf(buf, "(none)\n");
3744 3745 3746 3747 3748 3749
}

static ssize_t rbd_minor_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
A
Alex Elder 已提交
3750

3751
	return sprintf(buf, "%d\n", rbd_dev->minor);
3752 3753 3754 3755
}

static ssize_t rbd_client_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
3756
{
A
Alex Elder 已提交
3757
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3758

3759
	return sprintf(buf, "client%lld\n",
3760
		       ceph_client_gid(rbd_dev->rbd_client->client));
3761 3762
}

3763 3764
static ssize_t rbd_pool_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
3765
{
A
Alex Elder 已提交
3766
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3767

3768
	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3769 3770
}

3771 3772 3773 3774 3775
static ssize_t rbd_pool_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3776
	return sprintf(buf, "%llu\n",
A
Alex Elder 已提交
3777
			(unsigned long long) rbd_dev->spec->pool_id);
3778 3779
}

3780 3781 3782
static ssize_t rbd_name_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3783
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3784

A
Alex Elder 已提交
3785 3786 3787 3788
	if (rbd_dev->spec->image_name)
		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);

	return sprintf(buf, "(unknown)\n");
3789 3790
}

A
Alex Elder 已提交
3791 3792 3793 3794 3795
static ssize_t rbd_image_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3796
	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
A
Alex Elder 已提交
3797 3798
}

A
Alex Elder 已提交
3799 3800 3801 3802
/*
 * Shows the name of the currently-mapped snapshot (or
 * RBD_SNAP_HEAD_NAME for the base image).
 */
3803 3804 3805 3806
static ssize_t rbd_snap_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
A
Alex Elder 已提交
3807
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3808

3809
	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3810 3811
}

3812
/*
3813 3814 3815
 * For a v2 image, shows the chain of parent images, separated by empty
 * lines.  For v1 images or if there is no parent, shows "(no parent
 * image)".
3816 3817
 */
static ssize_t rbd_parent_show(struct device *dev,
3818 3819
			       struct device_attribute *attr,
			       char *buf)
3820 3821
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3822
	ssize_t count = 0;
3823

3824
	if (!rbd_dev->parent)
3825 3826
		return sprintf(buf, "(no parent image)\n");

3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842
	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
		struct rbd_spec *spec = rbd_dev->parent_spec;

		count += sprintf(&buf[count], "%s"
			    "pool_id %llu\npool_name %s\n"
			    "image_id %s\nimage_name %s\n"
			    "snap_id %llu\nsnap_name %s\n"
			    "overlap %llu\n",
			    !count ? "" : "\n", /* first? */
			    spec->pool_id, spec->pool_name,
			    spec->image_id, spec->image_name ?: "(unknown)",
			    spec->snap_id, spec->snap_name,
			    rbd_dev->parent_overlap);
	}

	return count;
3843 3844
}

3845 3846 3847 3848 3849
static ssize_t rbd_image_refresh(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t size)
{
A
Alex Elder 已提交
3850
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3851
	int ret;
3852

A
Alex Elder 已提交
3853
	ret = rbd_dev_refresh(rbd_dev);
3854
	if (ret)
3855
		return ret;
3856

3857
	return size;
3858
}
3859

3860
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
A
Alex Elder 已提交
3861
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3862
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3863
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3864 3865
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3866
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3867
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
A
Alex Elder 已提交
3868
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3869 3870
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3871
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3872 3873 3874

static struct attribute *rbd_attrs[] = {
	&dev_attr_size.attr,
A
Alex Elder 已提交
3875
	&dev_attr_features.attr,
3876
	&dev_attr_major.attr,
3877
	&dev_attr_minor.attr,
3878 3879
	&dev_attr_client_id.attr,
	&dev_attr_pool.attr,
3880
	&dev_attr_pool_id.attr,
3881
	&dev_attr_name.attr,
A
Alex Elder 已提交
3882
	&dev_attr_image_id.attr,
3883
	&dev_attr_current_snap.attr,
3884
	&dev_attr_parent.attr,
3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897
	&dev_attr_refresh.attr,
	NULL
};

static struct attribute_group rbd_attr_group = {
	.attrs = rbd_attrs,
};

static const struct attribute_group *rbd_attr_groups[] = {
	&rbd_attr_group,
	NULL
};

3898
static void rbd_dev_release(struct device *dev);
3899 3900 3901 3902

static struct device_type rbd_device_type = {
	.name		= "rbd",
	.groups		= rbd_attr_groups,
3903
	.release	= rbd_dev_release,
3904 3905
};

3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
	kref_get(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
	if (spec)
		kref_put(&spec->kref, rbd_spec_free);
}

static struct rbd_spec *rbd_spec_alloc(void)
{
	struct rbd_spec *spec;

	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
	if (!spec)
		return NULL;
3927 3928 3929

	spec->pool_id = CEPH_NOPOOL;
	spec->snap_id = CEPH_NOSNAP;
3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
	kref_init(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref)
{
	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);

	kfree(spec->pool_name);
	kfree(spec->image_id);
	kfree(spec->image_name);
	kfree(spec->snap_name);
	kfree(spec);
}

3946
static void rbd_dev_free(struct rbd_device *rbd_dev)
3947
{
3948
	ceph_oid_destroy(&rbd_dev->header_oid);
3949
	ceph_oloc_destroy(&rbd_dev->header_oloc);
3950

3951 3952 3953 3954
	rbd_put_client(rbd_dev->rbd_client);
	rbd_spec_put(rbd_dev->spec);
	kfree(rbd_dev->opts);
	kfree(rbd_dev);
3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
}

static void rbd_dev_release(struct device *dev)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
	bool need_put = !!rbd_dev->opts;

	if (need_put) {
		destroy_workqueue(rbd_dev->task_wq);
		ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
	}

	rbd_dev_free(rbd_dev);
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977

	/*
	 * This is racy, but way better than putting module outside of
	 * the release callback.  The race window is pretty small, so
	 * doing something similar to dm (dm-builtin.c) is overkill.
	 */
	if (need_put)
		module_put(THIS_MODULE);
}

3978 3979
static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
					   struct rbd_spec *spec)
3980 3981 3982
{
	struct rbd_device *rbd_dev;

3983
	rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
3984 3985 3986 3987 3988 3989 3990
	if (!rbd_dev)
		return NULL;

	spin_lock_init(&rbd_dev->lock);
	INIT_LIST_HEAD(&rbd_dev->node);
	init_rwsem(&rbd_dev->header_rwsem);

3991
	ceph_oid_init(&rbd_dev->header_oid);
3992
	ceph_oloc_init(&rbd_dev->header_oloc);
3993

3994 3995 3996 3997 3998
	rbd_dev->dev.bus = &rbd_bus_type;
	rbd_dev->dev.type = &rbd_device_type;
	rbd_dev->dev.parent = &rbd_root_dev;
	device_initialize(&rbd_dev->dev);

3999
	rbd_dev->rbd_client = rbdc;
4000
	rbd_dev->spec = spec;
4001

4002 4003 4004 4005
	rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
	rbd_dev->layout.stripe_count = 1;
	rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
	rbd_dev->layout.pool_id = spec->pool_id;
4006
	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4007

4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
	return rbd_dev;
}

/*
 * Create a mapping rbd_dev.
 */
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
					 struct rbd_spec *spec,
					 struct rbd_options *opts)
{
	struct rbd_device *rbd_dev;

	rbd_dev = __rbd_dev_create(rbdc, spec);
	if (!rbd_dev)
		return NULL;

	rbd_dev->opts = opts;

	/* get an id and fill in device name */
	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
					 minor_to_rbd_dev_id(1 << MINORBITS),
					 GFP_KERNEL);
	if (rbd_dev->dev_id < 0)
		goto fail_rbd_dev;

	sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
	rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
						   rbd_dev->name);
	if (!rbd_dev->task_wq)
		goto fail_dev_id;
4038

4039 4040 4041 4042
	/* we have a ref from do_rbd_add() */
	__module_get(THIS_MODULE);

	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4043
	return rbd_dev;
4044 4045 4046 4047 4048 4049

fail_dev_id:
	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
	rbd_dev_free(rbd_dev);
	return NULL;
4050 4051 4052 4053
}

static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
4054 4055
	if (rbd_dev)
		put_device(&rbd_dev->dev);
4056 4057
}

4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
/*
 * Get the size and object order for an image snapshot, or if
 * snap_id is CEPH_NOSNAP, gets this information for the base
 * image.
 */
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size)
{
	__le64 snapid = cpu_to_le64(snap_id);
	int ret;
	struct {
		u8 order;
		__le64 size;
	} __attribute__ ((packed)) size_buf = { 0 };

4073
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4074
				"rbd", "get_size",
4075
				&snapid, sizeof (snapid),
4076
				&size_buf, sizeof (size_buf));
4077
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4078 4079
	if (ret < 0)
		return ret;
4080 4081
	if (ret < sizeof (size_buf))
		return -ERANGE;
4082

J
Josh Durgin 已提交
4083
	if (order) {
4084
		*order = size_buf.order;
J
Josh Durgin 已提交
4085 4086
		dout("  order %u", (unsigned int)*order);
	}
4087 4088
	*snap_size = le64_to_cpu(size_buf.size);

J
Josh Durgin 已提交
4089 4090
	dout("  snap_id 0x%016llx snap_size = %llu\n",
		(unsigned long long)snap_id,
4091
		(unsigned long long)*snap_size);
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102

	return 0;
}

static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
					&rbd_dev->header.obj_order,
					&rbd_dev->header.image_size);
}

4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
	void *reply_buf;
	int ret;
	void *p;

	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4113
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4114
				"rbd", "get_object_prefix", NULL, 0,
4115
				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4116
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4117 4118 4119 4120 4121
	if (ret < 0)
		goto out;

	p = reply_buf;
	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4122 4123
						p + ret, NULL, GFP_NOIO);
	ret = 0;
4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136

	if (IS_ERR(rbd_dev->header.object_prefix)) {
		ret = PTR_ERR(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	} else {
		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
	}
out:
	kfree(reply_buf);

	return ret;
}

4137 4138 4139 4140 4141 4142 4143
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features)
{
	__le64 snapid = cpu_to_le64(snap_id);
	struct {
		__le64 features;
		__le64 incompat;
4144
	} __attribute__ ((packed)) features_buf = { 0 };
4145
	u64 unsup;
4146 4147
	int ret;

4148
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4149
				"rbd", "get_features",
4150
				&snapid, sizeof (snapid),
4151
				&features_buf, sizeof (features_buf));
4152
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4153 4154
	if (ret < 0)
		return ret;
4155 4156
	if (ret < sizeof (features_buf))
		return -ERANGE;
A
Alex Elder 已提交
4157

4158 4159 4160 4161
	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
	if (unsup) {
		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
			 unsup);
A
Alex Elder 已提交
4162
		return -ENXIO;
4163
	}
A
Alex Elder 已提交
4164

4165 4166 4167
	*snap_features = le64_to_cpu(features_buf.features);

	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4168 4169 4170
		(unsigned long long)snap_id,
		(unsigned long long)*snap_features,
		(unsigned long long)le64_to_cpu(features_buf.incompat));
4171 4172 4173 4174 4175 4176 4177 4178 4179 4180

	return 0;
}

static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
						&rbd_dev->header.features);
}

4181 4182 4183 4184 4185 4186 4187 4188
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
	struct rbd_spec *parent_spec;
	size_t size;
	void *reply_buf = NULL;
	__le64 snapid;
	void *p;
	void *end;
A
Alex Elder 已提交
4189
	u64 pool_id;
4190
	char *image_id;
4191
	u64 snap_id;
4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
	u64 overlap;
	int ret;

	parent_spec = rbd_spec_alloc();
	if (!parent_spec)
		return -ENOMEM;

	size = sizeof (__le64) +				/* pool_id */
		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
		sizeof (__le64) +				/* snap_id */
		sizeof (__le64);				/* overlap */
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf) {
		ret = -ENOMEM;
		goto out_err;
	}

4209
	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4210
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4211
				"rbd", "get_parent",
4212
				&snapid, sizeof (snapid),
4213
				reply_buf, size);
4214
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4215 4216 4217 4218
	if (ret < 0)
		goto out_err;

	p = reply_buf;
4219 4220
	end = reply_buf + ret;
	ret = -ERANGE;
A
Alex Elder 已提交
4221
	ceph_decode_64_safe(&p, end, pool_id, out_err);
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238
	if (pool_id == CEPH_NOPOOL) {
		/*
		 * Either the parent never existed, or we have
		 * record of it but the image got flattened so it no
		 * longer has a parent.  When the parent of a
		 * layered image disappears we immediately set the
		 * overlap to 0.  The effect of this is that all new
		 * requests will be treated as if the image had no
		 * parent.
		 */
		if (rbd_dev->parent_overlap) {
			rbd_dev->parent_overlap = 0;
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image has been flattened\n",
				rbd_dev->disk->disk_name);
		}

4239
		goto out;	/* No parent?  No problem. */
4240
	}
4241

4242 4243 4244
	/* The ceph file layout needs to fit pool id in 32 bits */

	ret = -EIO;
A
Alex Elder 已提交
4245
	if (pool_id > (u64)U32_MAX) {
4246
		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
A
Alex Elder 已提交
4247
			(unsigned long long)pool_id, U32_MAX);
4248
		goto out_err;
A
Alex Elder 已提交
4249
	}
4250

A
Alex Elder 已提交
4251
	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4252 4253 4254 4255
	if (IS_ERR(image_id)) {
		ret = PTR_ERR(image_id);
		goto out_err;
	}
4256
	ceph_decode_64_safe(&p, end, snap_id, out_err);
4257 4258
	ceph_decode_64_safe(&p, end, overlap, out_err);

4259 4260 4261 4262 4263 4264 4265 4266 4267
	/*
	 * The parent won't change (except when the clone is
	 * flattened, already handled that).  So we only need to
	 * record the parent spec we have not already done so.
	 */
	if (!rbd_dev->parent_spec) {
		parent_spec->pool_id = pool_id;
		parent_spec->image_id = image_id;
		parent_spec->snap_id = snap_id;
A
Alex Elder 已提交
4268 4269
		rbd_dev->parent_spec = parent_spec;
		parent_spec = NULL;	/* rbd_dev now owns this */
4270 4271
	} else {
		kfree(image_id);
4272 4273 4274
	}

	/*
4275 4276
	 * We always update the parent overlap.  If it's zero we issue
	 * a warning, as we will proceed as if there was no parent.
4277 4278 4279
	 */
	if (!overlap) {
		if (parent_spec) {
4280 4281 4282 4283
			/* refresh, careful to warn just once */
			if (rbd_dev->parent_overlap)
				rbd_warn(rbd_dev,
				    "clone now standalone (overlap became 0)");
4284
		} else {
4285 4286
			/* initial probe */
			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4287
		}
A
Alex Elder 已提交
4288
	}
4289 4290
	rbd_dev->parent_overlap = overlap;

4291 4292 4293 4294 4295 4296 4297 4298 4299
out:
	ret = 0;
out_err:
	kfree(reply_buf);
	rbd_spec_put(parent_spec);

	return ret;
}

4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
	struct {
		__le64 stripe_unit;
		__le64 stripe_count;
	} __attribute__ ((packed)) striping_info_buf = { 0 };
	size_t size = sizeof (striping_info_buf);
	void *p;
	u64 obj_size;
	u64 stripe_unit;
	u64 stripe_count;
	int ret;

4313
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4314
				"rbd", "get_stripe_unit_count", NULL, 0,
4315
				(char *)&striping_info_buf, size);
4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
	if (ret < 0)
		return ret;
	if (ret < size)
		return -ERANGE;

	/*
	 * We don't actually support the "fancy striping" feature
	 * (STRIPINGV2) yet, but if the striping sizes are the
	 * defaults the behavior is the same as before.  So find
	 * out, and only fail if the image has non-default values.
	 */
	ret = -EINVAL;
	obj_size = (u64)1 << rbd_dev->header.obj_order;
	p = &striping_info_buf;
	stripe_unit = ceph_decode_64(&p);
	if (stripe_unit != obj_size) {
		rbd_warn(rbd_dev, "unsupported stripe unit "
				"(got %llu want %llu)",
				stripe_unit, obj_size);
		return -EINVAL;
	}
	stripe_count = ceph_decode_64(&p);
	if (stripe_count != 1) {
		rbd_warn(rbd_dev, "unsupported stripe count "
				"(got %llu want 1)", stripe_count);
		return -EINVAL;
	}
4344 4345
	rbd_dev->header.stripe_unit = stripe_unit;
	rbd_dev->header.stripe_count = stripe_count;
4346 4347 4348 4349

	return 0;
}

4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
	size_t image_id_size;
	char *image_id;
	void *p;
	void *end;
	size_t size;
	void *reply_buf = NULL;
	size_t len = 0;
	char *image_name = NULL;
	int ret;

	rbd_assert(!rbd_dev->spec->image_name);

A
Alex Elder 已提交
4364 4365
	len = strlen(rbd_dev->spec->image_id);
	image_id_size = sizeof (__le32) + len;
4366 4367 4368 4369 4370
	image_id = kmalloc(image_id_size, GFP_KERNEL);
	if (!image_id)
		return NULL;

	p = image_id;
4371
	end = image_id + image_id_size;
4372
	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4373 4374 4375 4376 4377 4378

	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		goto out;

4379
	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4380 4381
				"rbd", "dir_get_name",
				image_id, image_id_size,
4382
				reply_buf, size);
4383 4384 4385
	if (ret < 0)
		goto out;
	p = reply_buf;
4386 4387
	end = reply_buf + ret;

4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
	if (IS_ERR(image_name))
		image_name = NULL;
	else
		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
	kfree(reply_buf);
	kfree(image_id);

	return image_name;
}

4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	const char *snap_name;
	u32 which = 0;

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which < snapc->num_snaps) {
		if (!strcmp(name, snap_name))
			return snapc->snaps[which];
		snap_name += strlen(snap_name) + 1;
		which++;
	}
	return CEPH_NOSNAP;
}

static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	u32 which;
	bool found = false;
	u64 snap_id;

	for (which = 0; !found && which < snapc->num_snaps; which++) {
		const char *snap_name;

		snap_id = snapc->snaps[which];
		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4430 4431 4432 4433 4434 4435 4436
		if (IS_ERR(snap_name)) {
			/* ignore no-longer existing snapshots */
			if (PTR_ERR(snap_name) == -ENOENT)
				continue;
			else
				break;
		}
4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454
		found = !strcmp(name, snap_name);
		kfree(snap_name);
	}
	return found ? snap_id : CEPH_NOSNAP;
}

/*
 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
 * no snapshot by that name is found, or if an error occurs.
 */
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	if (rbd_dev->image_format == 1)
		return rbd_v1_snap_id_by_name(rbd_dev, name);

	return rbd_v2_snap_id_by_name(rbd_dev, name);
}

4455
/*
4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482
 * An image being mapped will have everything but the snap id.
 */
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;

	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
	rbd_assert(spec->image_id && spec->image_name);
	rbd_assert(spec->snap_name);

	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
		u64 snap_id;

		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
		if (snap_id == CEPH_NOSNAP)
			return -ENOENT;

		spec->snap_id = snap_id;
	} else {
		spec->snap_id = CEPH_NOSNAP;
	}

	return 0;
}

/*
 * A parent image will have all ids but none of the names.
4483
 *
4484 4485
 * All names in an rbd spec are dynamically allocated.  It's OK if we
 * can't figure out the name for an image id.
4486
 */
4487
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4488
{
4489 4490 4491 4492 4493
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_spec *spec = rbd_dev->spec;
	const char *pool_name;
	const char *image_name;
	const char *snap_name;
4494 4495
	int ret;

4496 4497 4498
	rbd_assert(spec->pool_id != CEPH_NOPOOL);
	rbd_assert(spec->image_id);
	rbd_assert(spec->snap_id != CEPH_NOSNAP);
4499

4500
	/* Get the pool name; we have to make our own copy of this */
4501

4502 4503 4504
	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
	if (!pool_name) {
		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4505 4506
		return -EIO;
	}
4507 4508
	pool_name = kstrdup(pool_name, GFP_KERNEL);
	if (!pool_name)
4509 4510 4511 4512
		return -ENOMEM;

	/* Fetch the image name; tolerate failure here */

4513 4514
	image_name = rbd_dev_image_name(rbd_dev);
	if (!image_name)
A
Alex Elder 已提交
4515
		rbd_warn(rbd_dev, "unable to get image name");
4516

4517
	/* Fetch the snapshot name */
4518

4519
	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4520 4521
	if (IS_ERR(snap_name)) {
		ret = PTR_ERR(snap_name);
4522
		goto out_err;
4523 4524 4525 4526 4527
	}

	spec->pool_name = pool_name;
	spec->image_name = image_name;
	spec->snap_name = snap_name;
4528 4529

	return 0;
4530

4531
out_err:
4532 4533
	kfree(image_name);
	kfree(pool_name);
4534 4535 4536
	return ret;
}

A
Alex Elder 已提交
4537
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560
{
	size_t size;
	int ret;
	void *reply_buf;
	void *p;
	void *end;
	u64 seq;
	u32 snap_count;
	struct ceph_snap_context *snapc;
	u32 i;

	/*
	 * We'll need room for the seq value (maximum snapshot id),
	 * snapshot count, and array of that many snapshot ids.
	 * For now we have a fixed upper limit on the number we're
	 * prepared to receive.
	 */
	size = sizeof (__le64) + sizeof (__le32) +
			RBD_MAX_SNAP_COUNT * sizeof (__le64);
	reply_buf = kzalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4561
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4562
				"rbd", "get_snapcontext", NULL, 0,
4563
				reply_buf, size);
4564
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4565 4566 4567 4568
	if (ret < 0)
		goto out;

	p = reply_buf;
4569 4570
	end = reply_buf + ret;
	ret = -ERANGE;
4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586
	ceph_decode_64_safe(&p, end, seq, out);
	ceph_decode_32_safe(&p, end, snap_count, out);

	/*
	 * Make sure the reported number of snapshot ids wouldn't go
	 * beyond the end of our buffer.  But before checking that,
	 * make sure the computed size of the snapshot context we
	 * allocate is representable in a size_t.
	 */
	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
				 / sizeof (u64)) {
		ret = -EINVAL;
		goto out;
	}
	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
		goto out;
4587
	ret = 0;
4588

4589
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4590 4591 4592 4593 4594 4595 4596 4597
	if (!snapc) {
		ret = -ENOMEM;
		goto out;
	}
	snapc->seq = seq;
	for (i = 0; i < snap_count; i++)
		snapc->snaps[i] = ceph_decode_64(&p);

4598
	ceph_put_snap_context(rbd_dev->header.snapc);
4599 4600 4601
	rbd_dev->header.snapc = snapc;

	dout("  snap context seq = %llu, snap_count = %u\n",
4602
		(unsigned long long)seq, (unsigned int)snap_count);
4603 4604 4605
out:
	kfree(reply_buf);

4606
	return ret;
4607 4608
}

4609 4610
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
A
Alex Elder 已提交
4611 4612 4613
{
	size_t size;
	void *reply_buf;
4614
	__le64 snapid;
A
Alex Elder 已提交
4615 4616 4617 4618 4619 4620 4621 4622 4623 4624
	int ret;
	void *p;
	void *end;
	char *snap_name;

	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return ERR_PTR(-ENOMEM);

4625
	snapid = cpu_to_le64(snap_id);
4626
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
A
Alex Elder 已提交
4627
				"rbd", "get_snapshot_name",
4628
				&snapid, sizeof (snapid),
4629
				reply_buf, size);
4630
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4631 4632
	if (ret < 0) {
		snap_name = ERR_PTR(ret);
A
Alex Elder 已提交
4633
		goto out;
4634
	}
A
Alex Elder 已提交
4635 4636

	p = reply_buf;
4637
	end = reply_buf + ret;
A
Alex Elder 已提交
4638
	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4639
	if (IS_ERR(snap_name))
A
Alex Elder 已提交
4640 4641
		goto out;

4642
	dout("  snap_id 0x%016llx snap_name = %s\n",
4643
		(unsigned long long)snap_id, snap_name);
A
Alex Elder 已提交
4644 4645 4646
out:
	kfree(reply_buf);

4647
	return snap_name;
A
Alex Elder 已提交
4648 4649
}

4650
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
4651
{
4652
	bool first_time = rbd_dev->header.object_prefix == NULL;
A
Alex Elder 已提交
4653 4654
	int ret;

4655 4656
	ret = rbd_dev_v2_image_size(rbd_dev);
	if (ret)
4657
		return ret;
4658

4659 4660 4661
	if (first_time) {
		ret = rbd_dev_v2_header_onetime(rbd_dev);
		if (ret)
4662
			return ret;
4663 4664
	}

A
Alex Elder 已提交
4665
	ret = rbd_dev_v2_snap_context(rbd_dev);
4666 4667 4668 4669
	if (ret && first_time) {
		kfree(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	}
A
Alex Elder 已提交
4670 4671 4672 4673

	return ret;
}

4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_header_info(rbd_dev);

	return rbd_dev_v2_header_info(rbd_dev);
}

4684 4685 4686
/*
 * Skips over white space at *buf, and updates *buf to point to the
 * first found non-space character (if any). Returns the length of
A
Alex Elder 已提交
4687 4688
 * the token (string of non-white space characters) found.  Note
 * that *buf must be terminated with '\0'.
4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702
 */
static inline size_t next_token(const char **buf)
{
        /*
        * These are the characters that produce nonzero for
        * isspace() in the "C" and "POSIX" locales.
        */
        const char *spaces = " \f\n\r\t\v";

        *buf += strspn(*buf, spaces);	/* Find start of token */

	return strcspn(*buf, spaces);   /* Return token length */
}

A
Alex Elder 已提交
4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724
/*
 * Finds the next token in *buf, dynamically allocates a buffer big
 * enough to hold a copy of it, and copies the token into the new
 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
 * that a duplicate buffer is created even for a zero-length token.
 *
 * Returns a pointer to the newly-allocated duplicate, or a null
 * pointer if memory for the duplicate was not available.  If
 * the lenp argument is a non-null pointer, the length of the token
 * (not including the '\0') is returned in *lenp.
 *
 * If successful, the *buf pointer will be updated to point beyond
 * the end of the found token.
 *
 * Note: uses GFP_KERNEL for allocation.
 */
static inline char *dup_token(const char **buf, size_t *lenp)
{
	char *dup;
	size_t len;

	len = next_token(buf);
A
Alex Elder 已提交
4725
	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
A
Alex Elder 已提交
4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736
	if (!dup)
		return NULL;
	*(dup + len) = '\0';
	*buf += len;

	if (lenp)
		*lenp = len;

	return dup;
}

4737
/*
4738 4739 4740 4741
 * Parse the options provided for an "rbd add" (i.e., rbd image
 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
 * and the data written is passed here via a NUL-terminated buffer.
 * Returns 0 if successful or an error code otherwise.
A
Alex Elder 已提交
4742
 *
4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776
 * The information extracted from these options is recorded in
 * the other parameters which return dynamically-allocated
 * structures:
 *  ceph_opts
 *      The address of a pointer that will refer to a ceph options
 *      structure.  Caller must release the returned pointer using
 *      ceph_destroy_options() when it is no longer needed.
 *  rbd_opts
 *	Address of an rbd options pointer.  Fully initialized by
 *	this function; caller must release with kfree().
 *  spec
 *	Address of an rbd image specification pointer.  Fully
 *	initialized by this function based on parsed options.
 *	Caller must release with rbd_spec_put().
 *
 * The options passed take this form:
 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
 * where:
 *  <mon_addrs>
 *      A comma-separated list of one or more monitor addresses.
 *      A monitor address is an ip address, optionally followed
 *      by a port number (separated by a colon).
 *        I.e.:  ip1[:port1][,ip2[:port2]...]
 *  <options>
 *      A comma-separated list of ceph and/or rbd options.
 *  <pool_name>
 *      The name of the rados pool containing the rbd image.
 *  <image_name>
 *      The name of the image in that pool to map.
 *  <snap_id>
 *      An optional snapshot id.  If provided, the mapping will
 *      present data from the image at the time that snapshot was
 *      created.  The image head is used if no snapshot id is
 *      provided.  Snapshot mappings are always read-only.
4777
 */
4778
static int rbd_add_parse_args(const char *buf,
4779
				struct ceph_options **ceph_opts,
4780 4781
				struct rbd_options **opts,
				struct rbd_spec **rbd_spec)
4782
{
A
Alex Elder 已提交
4783
	size_t len;
4784
	char *options;
4785
	const char *mon_addrs;
4786
	char *snap_name;
4787
	size_t mon_addrs_size;
4788
	struct rbd_spec *spec = NULL;
4789
	struct rbd_options *rbd_opts = NULL;
4790
	struct ceph_options *copts;
4791
	int ret;
4792 4793 4794

	/* The first four tokens are required */

4795
	len = next_token(&buf);
4796 4797 4798 4799
	if (!len) {
		rbd_warn(NULL, "no monitor address(es) provided");
		return -EINVAL;
	}
4800
	mon_addrs = buf;
4801
	mon_addrs_size = len + 1;
4802
	buf += len;
4803

4804
	ret = -EINVAL;
4805 4806
	options = dup_token(&buf, NULL);
	if (!options)
4807
		return -ENOMEM;
4808 4809 4810 4811
	if (!*options) {
		rbd_warn(NULL, "no options provided");
		goto out_err;
	}
4812

4813 4814
	spec = rbd_spec_alloc();
	if (!spec)
4815
		goto out_mem;
4816 4817 4818 4819

	spec->pool_name = dup_token(&buf, NULL);
	if (!spec->pool_name)
		goto out_mem;
4820 4821 4822 4823
	if (!*spec->pool_name) {
		rbd_warn(NULL, "no pool name provided");
		goto out_err;
	}
4824

A
Alex Elder 已提交
4825
	spec->image_name = dup_token(&buf, NULL);
4826
	if (!spec->image_name)
4827
		goto out_mem;
4828 4829 4830 4831
	if (!*spec->image_name) {
		rbd_warn(NULL, "no image name provided");
		goto out_err;
	}
4832

4833 4834 4835 4836
	/*
	 * Snapshot name is optional; default is to use "-"
	 * (indicating the head/no snapshot).
	 */
4837
	len = next_token(&buf);
4838
	if (!len) {
4839 4840
		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4841
	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4842
		ret = -ENAMETOOLONG;
4843
		goto out_err;
4844
	}
4845 4846
	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
	if (!snap_name)
4847
		goto out_mem;
4848 4849
	*(snap_name + len) = '\0';
	spec->snap_name = snap_name;
A
Alex Elder 已提交
4850

4851
	/* Initialize all rbd options to the defaults */
4852

4853 4854 4855 4856 4857
	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
	if (!rbd_opts)
		goto out_mem;

	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
I
Ilya Dryomov 已提交
4858
	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
A
Alex Elder 已提交
4859

4860
	copts = ceph_parse_options(options, mon_addrs,
4861
					mon_addrs + mon_addrs_size - 1,
4862
					parse_rbd_opts_token, rbd_opts);
4863 4864
	if (IS_ERR(copts)) {
		ret = PTR_ERR(copts);
4865 4866
		goto out_err;
	}
4867 4868 4869
	kfree(options);

	*ceph_opts = copts;
4870
	*opts = rbd_opts;
4871
	*rbd_spec = spec;
4872

4873
	return 0;
4874
out_mem:
4875
	ret = -ENOMEM;
A
Alex Elder 已提交
4876
out_err:
4877 4878
	kfree(rbd_opts);
	rbd_spec_put(spec);
4879
	kfree(options);
A
Alex Elder 已提交
4880

4881
	return ret;
4882 4883
}

4884 4885 4886 4887 4888
/*
 * Return pool id (>= 0) or a negative error code.
 */
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
4889
	struct ceph_options *opts = rbdc->client->options;
4890 4891 4892 4893 4894 4895 4896
	u64 newest_epoch;
	int tries = 0;
	int ret;

again:
	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
	if (ret == -ENOENT && tries++ < 1) {
4897 4898
		ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
					    &newest_epoch);
4899 4900 4901 4902
		if (ret < 0)
			return ret;

		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4903
			ceph_osdc_maybe_request_map(&rbdc->client->osdc);
4904
			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4905 4906
						     newest_epoch,
						     opts->mount_timeout);
4907 4908 4909 4910 4911 4912 4913 4914 4915 4916
			goto again;
		} else {
			/* the osdmap we have is new enough */
			return -ENOENT;
		}
	}

	return ret;
}

A
Alex Elder 已提交
4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936
/*
 * An rbd format 2 image has a unique identifier, distinct from the
 * name given to it by the user.  Internally, that identifier is
 * what's used to specify the names of objects related to the image.
 *
 * A special "rbd id" object is used to map an rbd image name to its
 * id.  If that object doesn't exist, then there is no v2 rbd image
 * with the supplied name.
 *
 * This function will record the given rbd_dev's image_id field if
 * it can be determined, and in that case will return 0.  If any
 * errors occur a negative errno will be returned and the rbd_dev's
 * image_id field will be unchanged (and should be NULL).
 */
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
	int ret;
	size_t size;
	char *object_name;
	void *response;
4937
	char *image_id;
4938

A
Alex Elder 已提交
4939 4940 4941
	/*
	 * When probing a parent image, the image id is already
	 * known (and the image name likely is not).  There's no
4942 4943
	 * need to fetch the image id again in this case.  We
	 * do still need to set the image format though.
A
Alex Elder 已提交
4944
	 */
4945 4946 4947
	if (rbd_dev->spec->image_id) {
		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;

A
Alex Elder 已提交
4948
		return 0;
4949
	}
A
Alex Elder 已提交
4950

A
Alex Elder 已提交
4951 4952 4953 4954
	/*
	 * First, see if the format 2 image id file exists, and if
	 * so, get the image's persistent id from it.
	 */
A
Alex Elder 已提交
4955
	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
A
Alex Elder 已提交
4956 4957 4958
	object_name = kmalloc(size, GFP_NOIO);
	if (!object_name)
		return -ENOMEM;
4959
	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
A
Alex Elder 已提交
4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970
	dout("rbd id object name is %s\n", object_name);

	/* Response will be an encoded string, which includes a length */

	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
	response = kzalloc(size, GFP_NOIO);
	if (!response) {
		ret = -ENOMEM;
		goto out;
	}

4971 4972
	/* If it doesn't exist we'll assume it's a format 1 image */

4973
	ret = rbd_obj_method_sync(rbd_dev, object_name,
4974
				"rbd", "get_id", NULL, 0,
4975
				response, RBD_IMAGE_ID_LEN_MAX);
4976
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4977 4978 4979 4980 4981
	if (ret == -ENOENT) {
		image_id = kstrdup("", GFP_KERNEL);
		ret = image_id ? 0 : -ENOMEM;
		if (!ret)
			rbd_dev->image_format = 1;
4982
	} else if (ret >= 0) {
4983 4984 4985
		void *p = response;

		image_id = ceph_extract_encoded_string(&p, p + ret,
A
Alex Elder 已提交
4986
						NULL, GFP_NOIO);
4987
		ret = PTR_ERR_OR_ZERO(image_id);
4988 4989 4990 4991 4992 4993 4994
		if (!ret)
			rbd_dev->image_format = 2;
	}

	if (!ret) {
		rbd_dev->spec->image_id = image_id;
		dout("image_id is %s\n", image_id);
A
Alex Elder 已提交
4995 4996 4997 4998 4999 5000 5001 5002
	}
out:
	kfree(response);
	kfree(object_name);

	return ret;
}

A
Alex Elder 已提交
5003 5004 5005 5006
/*
 * Undo whatever state changes are made by v1 or v2 header info
 * call.
 */
A
Alex Elder 已提交
5007 5008 5009 5010
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
	struct rbd_image_header	*header;

5011
	rbd_dev_parent_put(rbd_dev);
A
Alex Elder 已提交
5012 5013 5014 5015

	/* Free dynamic fields from the header, then zero it out */

	header = &rbd_dev->header;
5016
	ceph_put_snap_context(header->snapc);
A
Alex Elder 已提交
5017 5018 5019 5020 5021 5022
	kfree(header->snap_sizes);
	kfree(header->snap_names);
	kfree(header->object_prefix);
	memset(header, 0, sizeof (*header));
}

5023
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5024 5025 5026
{
	int ret;

5027
	ret = rbd_dev_v2_object_prefix(rbd_dev);
5028
	if (ret)
5029 5030
		goto out_err;

5031 5032 5033 5034
	/*
	 * Get the and check features for the image.  Currently the
	 * features are assumed to never change.
	 */
5035
	ret = rbd_dev_v2_features(rbd_dev);
5036
	if (ret)
5037
		goto out_err;
5038

5039 5040 5041 5042 5043 5044 5045
	/* If the image supports fancy striping, get its parameters */

	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
		ret = rbd_dev_v2_striping_info(rbd_dev);
		if (ret < 0)
			goto out_err;
	}
5046
	/* No support for crypto and compression type format 2 images */
5047

A
Alex Elder 已提交
5048
	return 0;
5049
out_err:
A
Alex Elder 已提交
5050
	rbd_dev->header.features = 0;
5051 5052
	kfree(rbd_dev->header.object_prefix);
	rbd_dev->header.object_prefix = NULL;
5053 5054

	return ret;
5055 5056
}

5057 5058 5059 5060 5061 5062
/*
 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
 * rbd_dev_image_probe() recursion depth, which means it's also the
 * length of the already discovered part of the parent chain.
 */
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
A
Alex Elder 已提交
5063
{
5064
	struct rbd_device *parent = NULL;
5065 5066 5067 5068 5069
	int ret;

	if (!rbd_dev->parent_spec)
		return 0;

5070 5071 5072 5073 5074 5075
	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
		pr_info("parent chain is too long (%d)\n", depth);
		ret = -EINVAL;
		goto out_err;
	}

5076
	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5077 5078
	if (!parent) {
		ret = -ENOMEM;
5079
		goto out_err;
5080 5081 5082 5083 5084 5085 5086 5087
	}

	/*
	 * Images related by parent/child relationships always share
	 * rbd_client and spec/parent_spec, so bump their refcounts.
	 */
	__rbd_get_client(rbd_dev->rbd_client);
	rbd_spec_get(rbd_dev->parent_spec);
5088

5089
	ret = rbd_dev_image_probe(parent, depth);
5090 5091
	if (ret < 0)
		goto out_err;
5092

5093
	rbd_dev->parent = parent;
5094
	atomic_set(&rbd_dev->parent_ref, 1);
5095
	return 0;
5096

5097
out_err:
5098
	rbd_dev_unparent(rbd_dev);
5099
	rbd_dev_destroy(parent);
5100 5101 5102
	return ret;
}

I
Ilya Dryomov 已提交
5103 5104 5105 5106
/*
 * rbd_dev->header_rwsem must be locked for write and will be unlocked
 * upon return.
 */
5107
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5108
{
A
Alex Elder 已提交
5109
	int ret;
A
Alex Elder 已提交
5110

5111
	/* Record our major and minor device numbers. */
A
Alex Elder 已提交
5112

5113 5114 5115
	if (!single_major) {
		ret = register_blkdev(0, rbd_dev->name);
		if (ret < 0)
5116
			goto err_out_unlock;
5117 5118 5119 5120 5121 5122 5123

		rbd_dev->major = ret;
		rbd_dev->minor = 0;
	} else {
		rbd_dev->major = rbd_major;
		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
	}
A
Alex Elder 已提交
5124 5125 5126 5127 5128 5129 5130

	/* Set up the blkdev mapping. */

	ret = rbd_init_disk(rbd_dev);
	if (ret)
		goto err_out_blkdev;

5131
	ret = rbd_dev_mapping_set(rbd_dev);
A
Alex Elder 已提交
5132 5133
	if (ret)
		goto err_out_disk;
I
Ilya Dryomov 已提交
5134

5135
	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5136
	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5137

5138 5139
	dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
	ret = device_add(&rbd_dev->dev);
5140
	if (ret)
5141
		goto err_out_mapping;
A
Alex Elder 已提交
5142 5143 5144

	/* Everything's ready.  Announce the disk to the world. */

5145
	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
I
Ilya Dryomov 已提交
5146
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5147

5148 5149 5150 5151
	spin_lock(&rbd_dev_list_lock);
	list_add_tail(&rbd_dev->node, &rbd_dev_list);
	spin_unlock(&rbd_dev_list_lock);

I
Ilya Dryomov 已提交
5152
	add_disk(rbd_dev->disk);
A
Alex Elder 已提交
5153 5154 5155 5156
	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
		(unsigned long long) rbd_dev->mapping.size);

	return ret;
5157

5158 5159
err_out_mapping:
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5160 5161 5162
err_out_disk:
	rbd_free_disk(rbd_dev);
err_out_blkdev:
5163 5164
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
I
Ilya Dryomov 已提交
5165 5166
err_out_unlock:
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5167 5168 5169
	return ret;
}

A
Alex Elder 已提交
5170 5171 5172
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;
5173
	int ret;
A
Alex Elder 已提交
5174 5175 5176 5177 5178

	/* Record the header object name for this rbd image. */

	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

5179
	rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
A
Alex Elder 已提交
5180
	if (rbd_dev->image_format == 1)
5181 5182
		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
				       spec->image_name, RBD_SUFFIX);
A
Alex Elder 已提交
5183
	else
5184 5185
		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
				       RBD_HEADER_PREFIX, spec->image_id);
A
Alex Elder 已提交
5186

5187
	return ret;
A
Alex Elder 已提交
5188 5189
}

5190 5191
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5192 5193 5194 5195 5196
	rbd_dev_unprobe(rbd_dev);
	rbd_dev->image_format = 0;
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;

5197 5198 5199
	rbd_dev_destroy(rbd_dev);
}

5200 5201
/*
 * Probe for the existence of the header object for the given rbd
5202 5203 5204
 * device.  If this image is the one being mapped (i.e., not a
 * parent), initiate a watch on its header object before using that
 * object to get detailed information about the rbd image.
5205
 */
5206
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5207 5208 5209 5210
{
	int ret;

	/*
A
Alex Elder 已提交
5211 5212 5213 5214
	 * Get the id from the image id object.  Unless there's an
	 * error, rbd_dev->spec->image_id will be filled in with
	 * a dynamically-allocated string, and rbd_dev->image_format
	 * will be set to either 1 or 2.
5215 5216 5217
	 */
	ret = rbd_dev_image_id(rbd_dev);
	if (ret)
5218 5219
		return ret;

A
Alex Elder 已提交
5220 5221 5222 5223
	ret = rbd_dev_header_name(rbd_dev);
	if (ret)
		goto err_out_format;

5224
	if (!depth) {
5225
		ret = rbd_dev_header_watch_sync(rbd_dev);
5226 5227 5228 5229 5230
		if (ret) {
			if (ret == -ENOENT)
				pr_info("image %s/%s does not exist\n",
					rbd_dev->spec->pool_name,
					rbd_dev->spec->image_name);
5231
			goto err_out_format;
5232
		}
5233
	}
5234

5235
	ret = rbd_dev_header_info(rbd_dev);
5236
	if (ret)
5237
		goto err_out_watch;
A
Alex Elder 已提交
5238

5239 5240 5241 5242 5243 5244
	/*
	 * If this image is the one being mapped, we have pool name and
	 * id, image name and id, and snap name - need to fill snap id.
	 * Otherwise this is a parent image, identified by pool, image
	 * and snap ids - need to fill in names for those ids.
	 */
5245
	if (!depth)
5246 5247 5248
		ret = rbd_spec_fill_snap_id(rbd_dev);
	else
		ret = rbd_spec_fill_names(rbd_dev);
5249 5250 5251 5252 5253 5254
	if (ret) {
		if (ret == -ENOENT)
			pr_info("snap %s/%s@%s does not exist\n",
				rbd_dev->spec->pool_name,
				rbd_dev->spec->image_name,
				rbd_dev->spec->snap_name);
A
Alex Elder 已提交
5255
		goto err_out_probe;
5256
	}
5257

5258 5259 5260 5261 5262 5263 5264 5265 5266
	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			goto err_out_probe;

		/*
		 * Need to warn users if this image is the one being
		 * mapped and has a parent.
		 */
5267
		if (!depth && rbd_dev->parent_spec)
5268 5269 5270 5271
			rbd_warn(rbd_dev,
				 "WARNING: kernel layering is EXPERIMENTAL!");
	}

5272
	ret = rbd_dev_probe_parent(rbd_dev, depth);
A
Alex Elder 已提交
5273 5274 5275 5276
	if (ret)
		goto err_out_probe;

	dout("discovered format %u image, header name is %s\n",
5277
		rbd_dev->image_format, rbd_dev->header_oid.name);
A
Alex Elder 已提交
5278
	return 0;
5279

A
Alex Elder 已提交
5280 5281
err_out_probe:
	rbd_dev_unprobe(rbd_dev);
5282
err_out_watch:
5283
	if (!depth)
5284
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5285 5286
err_out_format:
	rbd_dev->image_format = 0;
5287 5288
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;
5289 5290 5291
	return ret;
}

5292 5293 5294
static ssize_t do_rbd_add(struct bus_type *bus,
			  const char *buf,
			  size_t count)
5295
{
5296
	struct rbd_device *rbd_dev = NULL;
5297
	struct ceph_options *ceph_opts = NULL;
5298
	struct rbd_options *rbd_opts = NULL;
5299
	struct rbd_spec *spec = NULL;
5300
	struct rbd_client *rbdc;
5301
	bool read_only;
5302
	int rc;
5303 5304 5305 5306 5307

	if (!try_module_get(THIS_MODULE))
		return -ENODEV;

	/* parse add command */
5308
	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5309
	if (rc < 0)
5310
		goto out;
5311

5312 5313 5314
	rbdc = rbd_get_client(ceph_opts);
	if (IS_ERR(rbdc)) {
		rc = PTR_ERR(rbdc);
5315
		goto err_out_args;
5316
	}
5317 5318

	/* pick the pool */
5319
	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5320 5321 5322
	if (rc < 0) {
		if (rc == -ENOENT)
			pr_info("pool %s does not exist\n", spec->pool_name);
5323
		goto err_out_client;
5324
	}
A
Alex Elder 已提交
5325
	spec->pool_id = (u64)rc;
5326

5327
	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5328 5329
	if (!rbd_dev) {
		rc = -ENOMEM;
5330
		goto err_out_client;
5331
	}
5332 5333
	rbdc = NULL;		/* rbd_dev now owns this */
	spec = NULL;		/* rbd_dev now owns this */
5334
	rbd_opts = NULL;	/* rbd_dev now owns this */
5335

I
Ilya Dryomov 已提交
5336
	down_write(&rbd_dev->header_rwsem);
5337
	rc = rbd_dev_image_probe(rbd_dev, 0);
5338
	if (rc < 0)
5339
		goto err_out_rbd_dev;
5340

5341 5342
	/* If we are mapping a snapshot it must be marked read-only */

5343
	read_only = rbd_dev->opts->read_only;
5344 5345 5346 5347
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
		read_only = true;
	rbd_dev->mapping.read_only = read_only;

5348
	rc = rbd_dev_device_setup(rbd_dev);
A
Alex Elder 已提交
5349
	if (rc) {
5350 5351 5352 5353 5354 5355
		/*
		 * rbd_dev_header_unwatch_sync() can't be moved into
		 * rbd_dev_image_release() without refactoring, see
		 * commit 1f3ef78861ac.
		 */
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5356
		rbd_dev_image_release(rbd_dev);
5357
		goto out;
A
Alex Elder 已提交
5358 5359
	}

5360 5361 5362 5363
	rc = count;
out:
	module_put(THIS_MODULE);
	return rc;
5364

5365
err_out_rbd_dev:
I
Ilya Dryomov 已提交
5366
	up_write(&rbd_dev->header_rwsem);
5367
	rbd_dev_destroy(rbd_dev);
5368
err_out_client:
5369
	rbd_put_client(rbdc);
5370
err_out_args:
5371
	rbd_spec_put(spec);
5372
	kfree(rbd_opts);
5373
	goto out;
5374 5375
}

5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392
static ssize_t rbd_add(struct bus_type *bus,
		       const char *buf,
		       size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_add(bus, buf, count);
}

static ssize_t rbd_add_single_major(struct bus_type *bus,
				    const char *buf,
				    size_t count)
{
	return do_rbd_add(bus, buf, count);
}

5393
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5394 5395
{
	rbd_free_disk(rbd_dev);
5396 5397 5398 5399 5400

	spin_lock(&rbd_dev_list_lock);
	list_del_init(&rbd_dev->node);
	spin_unlock(&rbd_dev_list_lock);

5401
	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5402
	device_del(&rbd_dev->dev);
A
Alex Elder 已提交
5403
	rbd_dev_mapping_clear(rbd_dev);
5404 5405
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
5406 5407
}

5408 5409
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5410
	while (rbd_dev->parent) {
5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422
		struct rbd_device *first = rbd_dev;
		struct rbd_device *second = first->parent;
		struct rbd_device *third;

		/*
		 * Follow to the parent with no grandparent and
		 * remove it.
		 */
		while (second && (third = second->parent)) {
			first = second;
			second = third;
		}
A
Alex Elder 已提交
5423
		rbd_assert(second);
5424
		rbd_dev_image_release(second);
A
Alex Elder 已提交
5425 5426 5427 5428
		first->parent = NULL;
		first->parent_overlap = 0;

		rbd_assert(first->parent_spec);
5429 5430 5431 5432 5433
		rbd_spec_put(first->parent_spec);
		first->parent_spec = NULL;
	}
}

5434 5435 5436
static ssize_t do_rbd_remove(struct bus_type *bus,
			     const char *buf,
			     size_t count)
5437 5438
{
	struct rbd_device *rbd_dev = NULL;
5439 5440
	struct list_head *tmp;
	int dev_id;
5441
	unsigned long ul;
5442
	bool already = false;
5443
	int ret;
5444

5445
	ret = kstrtoul(buf, 10, &ul);
5446 5447
	if (ret)
		return ret;
5448 5449

	/* convert to int; abort if we lost anything in the conversion */
5450 5451
	dev_id = (int)ul;
	if (dev_id != ul)
5452 5453
		return -EINVAL;

5454 5455 5456 5457 5458 5459 5460 5461
	ret = -ENOENT;
	spin_lock(&rbd_dev_list_lock);
	list_for_each(tmp, &rbd_dev_list) {
		rbd_dev = list_entry(tmp, struct rbd_device, node);
		if (rbd_dev->dev_id == dev_id) {
			ret = 0;
			break;
		}
5462
	}
5463 5464 5465 5466 5467
	if (!ret) {
		spin_lock_irq(&rbd_dev->lock);
		if (rbd_dev->open_count)
			ret = -EBUSY;
		else
5468 5469
			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
							&rbd_dev->flags);
5470 5471 5472
		spin_unlock_irq(&rbd_dev->lock);
	}
	spin_unlock(&rbd_dev_list_lock);
5473
	if (ret < 0 || already)
5474
		return ret;
5475

5476 5477
	rbd_dev_header_unwatch_sync(rbd_dev);

5478 5479 5480 5481 5482 5483
	/*
	 * Don't free anything from rbd_dev->disk until after all
	 * notifies are completely processed. Otherwise
	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
	 * in a potential use after free of rbd_dev->disk or rbd_dev.
	 */
5484
	rbd_dev_device_release(rbd_dev);
5485
	rbd_dev_image_release(rbd_dev);
A
Alex Elder 已提交
5486

5487
	return count;
5488 5489
}

5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506
static ssize_t rbd_remove(struct bus_type *bus,
			  const char *buf,
			  size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_remove(bus, buf, count);
}

static ssize_t rbd_remove_single_major(struct bus_type *bus,
				       const char *buf,
				       size_t count)
{
	return do_rbd_remove(bus, buf, count);
}

5507 5508
/*
 * create control files in sysfs
5509
 * /sys/bus/rbd/...
5510 5511 5512
 */
static int rbd_sysfs_init(void)
{
5513
	int ret;
5514

5515
	ret = device_register(&rbd_root_dev);
A
Alex Elder 已提交
5516
	if (ret < 0)
5517
		return ret;
5518

5519 5520 5521
	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
		device_unregister(&rbd_root_dev);
5522 5523 5524 5525 5526 5527

	return ret;
}

static void rbd_sysfs_cleanup(void)
{
5528
	bus_unregister(&rbd_bus_type);
5529
	device_unregister(&rbd_root_dev);
5530 5531
}

5532 5533 5534
static int rbd_slab_init(void)
{
	rbd_assert(!rbd_img_request_cache);
G
Geliang Tang 已提交
5535
	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
5536 5537 5538 5539
	if (!rbd_img_request_cache)
		return -ENOMEM;

	rbd_assert(!rbd_obj_request_cache);
G
Geliang Tang 已提交
5540
	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
5541 5542 5543 5544 5545
	if (!rbd_obj_request_cache)
		goto out_err;

	rbd_assert(!rbd_segment_name_cache);
	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5546
					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5547
	if (rbd_segment_name_cache)
5548
		return 0;
5549
out_err:
5550 5551
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;
5552

5553 5554 5555
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;

5556 5557 5558 5559 5560
	return -ENOMEM;
}

static void rbd_slab_exit(void)
{
5561 5562 5563 5564
	rbd_assert(rbd_segment_name_cache);
	kmem_cache_destroy(rbd_segment_name_cache);
	rbd_segment_name_cache = NULL;

5565 5566 5567 5568
	rbd_assert(rbd_obj_request_cache);
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;

5569 5570 5571 5572 5573
	rbd_assert(rbd_img_request_cache);
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;
}

A
Alex Elder 已提交
5574
static int __init rbd_init(void)
5575 5576 5577
{
	int rc;

5578 5579 5580 5581
	if (!libceph_compatible(NULL)) {
		rbd_warn(NULL, "libceph incompatibility (quitting)");
		return -EINVAL;
	}
I
Ilya Dryomov 已提交
5582

5583
	rc = rbd_slab_init();
5584 5585
	if (rc)
		return rc;
I
Ilya Dryomov 已提交
5586

5587 5588
	/*
	 * The number of active work items is limited by the number of
I
Ilya Dryomov 已提交
5589
	 * rbd devices * queue depth, so leave @max_active at default.
5590 5591 5592 5593 5594 5595 5596
	 */
	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
	if (!rbd_wq) {
		rc = -ENOMEM;
		goto err_out_slab;
	}

5597 5598 5599 5600
	if (single_major) {
		rbd_major = register_blkdev(0, RBD_DRV_NAME);
		if (rbd_major < 0) {
			rc = rbd_major;
5601
			goto err_out_wq;
5602 5603 5604
		}
	}

5605 5606
	rc = rbd_sysfs_init();
	if (rc)
5607 5608 5609 5610 5611 5612
		goto err_out_blkdev;

	if (single_major)
		pr_info("loaded (major %d)\n", rbd_major);
	else
		pr_info("loaded\n");
5613

I
Ilya Dryomov 已提交
5614 5615
	return 0;

5616 5617 5618
err_out_blkdev:
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5619 5620
err_out_wq:
	destroy_workqueue(rbd_wq);
I
Ilya Dryomov 已提交
5621 5622
err_out_slab:
	rbd_slab_exit();
5623
	return rc;
5624 5625
}

A
Alex Elder 已提交
5626
static void __exit rbd_exit(void)
5627
{
I
Ilya Dryomov 已提交
5628
	ida_destroy(&rbd_dev_id_ida);
5629
	rbd_sysfs_cleanup();
5630 5631
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5632
	destroy_workqueue(rbd_wq);
5633
	rbd_slab_exit();
5634 5635 5636 5637 5638
}

module_init(rbd_init);
module_exit(rbd_exit);

A
Alex Elder 已提交
5639
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5640 5641 5642 5643 5644
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");

5645
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5646
MODULE_LICENSE("GPL");