rbd.c 146.1 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
   rbd.c -- Export ceph rados objects as a Linux block device


   based on drivers/block/osdblk.c:

   Copyright 2009 Red Hat, Inc.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.



25
   For usage instructions, please refer to:
26

27
                 Documentation/ABI/testing/sysfs-bus-rbd
28 29 30 31 32 33 34

 */

#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
35
#include <linux/parser.h>
36
#include <linux/bsearch.h>
37 38 39 40

#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
C
Christoph Hellwig 已提交
41
#include <linux/blk-mq.h>
42 43
#include <linux/fs.h>
#include <linux/blkdev.h>
44
#include <linux/slab.h>
45
#include <linux/idr.h>
I
Ilya Dryomov 已提交
46
#include <linux/workqueue.h>
47 48 49

#include "rbd_types.h"

A
Alex Elder 已提交
50 51
#define RBD_DEBUG	/* Activate rbd_assert() calls */

A
Alex Elder 已提交
52 53 54 55 56 57 58 59 60
/*
 * The basic unit of block I/O is a sector.  It is interpreted in a
 * number of contexts in Linux (blk, bio, genhd), but the default is
 * universally 512 bytes.  These symbols are just slightly more
 * meaningful than the bare numbers they represent.
 */
#define	SECTOR_SHIFT	9
#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Increment the given counter and return its updated value.
 * If the counter is already 0 it will not be incremented.
 * If the counter is already at its maximum value returns
 * -EINVAL without updating it.
 */
static int atomic_inc_return_safe(atomic_t *v)
{
	unsigned int counter;

	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
	if (counter <= (unsigned int)INT_MAX)
		return (int)counter;

	atomic_dec(v);

	return -EINVAL;
}

/* Decrement the counter.  Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
	int counter;

	counter = atomic_dec_return(v);
	if (counter >= 0)
		return counter;

	atomic_inc(v);

	return -EINVAL;
}

A
Alex Elder 已提交
94
#define RBD_DRV_NAME "rbd"
95

96 97
#define RBD_MINORS_PER_MAJOR		256
#define RBD_SINGLE_MAJOR_PART_SHIFT	4
98

99 100
#define RBD_MAX_PARENT_CHAIN_LEN	16

101 102 103 104
#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
#define RBD_MAX_SNAP_NAME_LEN	\
			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))

105
#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
106 107 108

#define RBD_SNAP_HEAD_NAME	"-"

109 110
#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */

111 112
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
113
#define RBD_IMAGE_ID_LEN_MAX	64
114

115
#define RBD_OBJ_PREFIX_LEN_MAX	64
A
Alex Elder 已提交
116

A
Alex Elder 已提交
117 118
/* Feature bits */

A
Alex Elder 已提交
119 120 121 122
#define RBD_FEATURE_LAYERING	(1<<0)
#define RBD_FEATURE_STRIPINGV2	(1<<1)
#define RBD_FEATURES_ALL \
	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
A
Alex Elder 已提交
123 124 125

/* Features supported by this (client software) implementation. */

126
#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
A
Alex Elder 已提交
127

A
Alex Elder 已提交
128 129 130 131 132 133
/*
 * An RBD device name will be "rbd#", where the "rbd" comes from
 * RBD_DRV_NAME above, and # is a unique integer identifier.
 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
 * enough to hold all possible device names.
 */
134
#define DEV_NAME_LEN		32
A
Alex Elder 已提交
135
#define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
136 137 138 139 140

/*
 * block device image metadata (in-memory version)
 */
struct rbd_image_header {
141
	/* These six fields never change for a given rbd image */
142
	char *object_prefix;
143 144 145
	__u8 obj_order;
	__u8 crypt_type;
	__u8 comp_type;
146 147 148
	u64 stripe_unit;
	u64 stripe_count;
	u64 features;		/* Might be changeable someday? */
149

A
Alex Elder 已提交
150 151 152
	/* The remaining fields need to be updated occasionally */
	u64 image_size;
	struct ceph_snap_context *snapc;
153 154
	char *snap_names;	/* format 1 only */
	u64 *snap_sizes;	/* format 1 only */
155 156
};

157 158 159 160
/*
 * An rbd image specification.
 *
 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
A
Alex Elder 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
 * identify an image.  Each rbd_dev structure includes a pointer to
 * an rbd_spec structure that encapsulates this identity.
 *
 * Each of the id's in an rbd_spec has an associated name.  For a
 * user-mapped image, the names are supplied and the id's associated
 * with them are looked up.  For a layered image, a parent image is
 * defined by the tuple, and the names are looked up.
 *
 * An rbd_dev structure contains a parent_spec pointer which is
 * non-null if the image it represents is a child in a layered
 * image.  This pointer will refer to the rbd_spec structure used
 * by the parent rbd_dev for its own identity (i.e., the structure
 * is shared between the parent and child).
 *
 * Since these structures are populated once, during the discovery
 * phase of image construction, they are effectively immutable so
 * we make no effort to synchronize access to them.
 *
 * Note that code herein does not assume the image name is known (it
 * could be a null pointer).
181 182 183
 */
struct rbd_spec {
	u64		pool_id;
184
	const char	*pool_name;
185

186 187
	const char	*image_id;
	const char	*image_name;
188 189

	u64		snap_id;
190
	const char	*snap_name;
191 192 193 194

	struct kref	kref;
};

195
/*
A
Alex Elder 已提交
196
 * an instance of the client.  multiple devices may share an rbd client.
197 198 199 200 201 202 203
 */
struct rbd_client {
	struct ceph_client	*client;
	struct kref		kref;
	struct list_head	node;
};

A
Alex Elder 已提交
204 205 206 207 208 209 210 211
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);

#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */

struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);

212 213 214
enum obj_request_type {
	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
A
Alex Elder 已提交
215

G
Guangliang Zhao 已提交
216 217 218
enum obj_operation_type {
	OBJ_OP_WRITE,
	OBJ_OP_READ,
219
	OBJ_OP_DISCARD,
G
Guangliang Zhao 已提交
220 221
};

222 223
enum obj_req_flags {
	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
224
	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
225 226
	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
227 228
};

A
Alex Elder 已提交
229 230 231 232
struct rbd_obj_request {
	const char		*object_name;
	u64			offset;		/* object start byte */
	u64			length;		/* bytes from offset */
233
	unsigned long		flags;
A
Alex Elder 已提交
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	/*
	 * An object request associated with an image will have its
	 * img_data flag set; a standalone object request will not.
	 *
	 * A standalone object request will have which == BAD_WHICH
	 * and a null obj_request pointer.
	 *
	 * An object request initiated in support of a layered image
	 * object (to check for its existence before a write) will
	 * have which == BAD_WHICH and a non-null obj_request pointer.
	 *
	 * Finally, an object request for rbd image data will have
	 * which != BAD_WHICH, and will have a non-null img_request
	 * pointer.  The value of which will be in the range
	 * 0..(img_request->obj_request_count-1).
	 */
	union {
		struct rbd_obj_request	*obj_request;	/* STAT op */
		struct {
			struct rbd_img_request	*img_request;
			u64			img_offset;
			/* links for img_request->obj_requests list */
			struct list_head	links;
		};
	};
A
Alex Elder 已提交
260 261 262
	u32			which;		/* posn image request list */

	enum obj_request_type	type;
263 264 265 266 267 268 269
	union {
		struct bio	*bio_list;
		struct {
			struct page	**pages;
			u32		page_count;
		};
	};
270
	struct page		**copyup_pages;
271
	u32			copyup_page_count;
A
Alex Elder 已提交
272 273 274 275

	struct ceph_osd_request	*osd_req;

	u64			xferred;	/* bytes transferred */
276
	int			result;
A
Alex Elder 已提交
277 278

	rbd_obj_callback_t	callback;
279
	struct completion	completion;
A
Alex Elder 已提交
280 281 282 283

	struct kref		kref;
};

A
Alex Elder 已提交
284
enum img_req_flags {
285 286
	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
287
	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
288
	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
A
Alex Elder 已提交
289 290
};

A
Alex Elder 已提交
291 292 293 294
struct rbd_img_request {
	struct rbd_device	*rbd_dev;
	u64			offset;	/* starting image byte offset */
	u64			length;	/* byte count from offset */
A
Alex Elder 已提交
295
	unsigned long		flags;
A
Alex Elder 已提交
296
	union {
297
		u64			snap_id;	/* for reads */
A
Alex Elder 已提交
298
		struct ceph_snap_context *snapc;	/* for writes */
299 300 301 302
	};
	union {
		struct request		*rq;		/* block request */
		struct rbd_obj_request	*obj_request;	/* obj req initiator */
A
Alex Elder 已提交
303
	};
304
	struct page		**copyup_pages;
305
	u32			copyup_page_count;
A
Alex Elder 已提交
306 307 308
	spinlock_t		completion_lock;/* protects next_completion */
	u32			next_completion;
	rbd_img_callback_t	callback;
309
	u64			xferred;/* aggregate bytes transferred */
310
	int			result;	/* first nonzero obj_request result */
A
Alex Elder 已提交
311 312 313 314 315 316 317 318

	u32			obj_request_count;
	struct list_head	obj_requests;	/* rbd_obj_request structs */

	struct kref		kref;
};

#define for_each_obj_request(ireq, oreq) \
319
	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
320
#define for_each_obj_request_from(ireq, oreq) \
321
	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
322
#define for_each_obj_request_safe(ireq, oreq, n) \
323
	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
324

A
Alex Elder 已提交
325
struct rbd_mapping {
A
Alex Elder 已提交
326
	u64                     size;
A
Alex Elder 已提交
327
	u64                     features;
A
Alex Elder 已提交
328 329 330
	bool			read_only;
};

331 332 333 334
/*
 * a single device
 */
struct rbd_device {
A
Alex Elder 已提交
335
	int			dev_id;		/* blkdev unique id */
336 337

	int			major;		/* blkdev assigned major */
338
	int			minor;
339 340
	struct gendisk		*disk;		/* blkdev's gendisk and rq */

341
	u32			image_format;	/* Either 1 or 2 */
342 343 344 345
	struct rbd_client	*rbd_client;

	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */

346
	spinlock_t		lock;		/* queue, flags, open_count */
347 348

	struct rbd_image_header	header;
349
	unsigned long		flags;		/* possibly lock protected */
350
	struct rbd_spec		*spec;
351
	struct rbd_options	*opts;
352

353
	char			*header_name;
354

355 356
	struct ceph_file_layout	layout;

357
	struct ceph_osd_event   *watch_event;
358
	struct rbd_obj_request	*watch_request;
359

360 361
	struct rbd_spec		*parent_spec;
	u64			parent_overlap;
362
	atomic_t		parent_ref;
363
	struct rbd_device	*parent;
364

C
Christoph Hellwig 已提交
365 366 367
	/* Block layer tags. */
	struct blk_mq_tag_set	tag_set;

368 369
	/* protects updating the header */
	struct rw_semaphore     header_rwsem;
A
Alex Elder 已提交
370 371

	struct rbd_mapping	mapping;
372 373

	struct list_head	node;
374 375 376

	/* sysfs related */
	struct device		dev;
377
	unsigned long		open_count;	/* protected by lock */
378 379
};

380 381 382 383 384 385 386
/*
 * Flag bits for rbd_dev->flags.  If atomicity is required,
 * rbd_dev->lock is used to protect access.
 *
 * Currently, only the "removing" flag (which is coupled with the
 * "open_count" field) requires atomic access.
 */
387 388
enum rbd_dev_flags {
	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
389
	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
390 391
};

392
static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
393

394
static LIST_HEAD(rbd_dev_list);    /* devices */
395 396
static DEFINE_SPINLOCK(rbd_dev_list_lock);

A
Alex Elder 已提交
397 398
static LIST_HEAD(rbd_client_list);		/* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
399

400 401
/* Slab caches for frequently-allocated structures */

402
static struct kmem_cache	*rbd_img_request_cache;
403
static struct kmem_cache	*rbd_obj_request_cache;
404
static struct kmem_cache	*rbd_segment_name_cache;
405

406
static int rbd_major;
407 408
static DEFINE_IDA(rbd_dev_id_ida);

409 410
static struct workqueue_struct *rbd_wq;

411 412 413 414 415 416 417 418
/*
 * Default to false for now, as single-major requires >= 0.75 version of
 * userspace rbd utility.
 */
static bool single_major = false;
module_param(single_major, bool, S_IRUGO);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");

419 420
static int rbd_img_request_submit(struct rbd_img_request *img_request);

421
static void rbd_dev_device_release(struct device *dev);
422

A
Alex Elder 已提交
423 424 425 426
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
		       size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
			  size_t count);
427 428 429 430
static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
				    size_t count);
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
				       size_t count);
431
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
432
static void rbd_spec_put(struct rbd_spec *spec);
A
Alex Elder 已提交
433

434 435
static int rbd_dev_id_to_minor(int dev_id)
{
436
	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
437 438 439 440
}

static int minor_to_rbd_dev_id(int minor)
{
441
	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
442 443
}

444 445
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
446 447
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
448 449 450 451

static struct attribute *rbd_bus_attrs[] = {
	&bus_attr_add.attr,
	&bus_attr_remove.attr,
452 453
	&bus_attr_add_single_major.attr,
	&bus_attr_remove_single_major.attr,
454
	NULL,
A
Alex Elder 已提交
455
};
456 457 458 459

static umode_t rbd_bus_is_visible(struct kobject *kobj,
				  struct attribute *attr, int index)
{
460 461 462 463 464
	if (!single_major &&
	    (attr == &bus_attr_add_single_major.attr ||
	     attr == &bus_attr_remove_single_major.attr))
		return 0;

465 466 467 468 469 470 471 472
	return attr->mode;
}

static const struct attribute_group rbd_bus_group = {
	.attrs = rbd_bus_attrs,
	.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
A
Alex Elder 已提交
473 474 475

static struct bus_type rbd_bus_type = {
	.name		= "rbd",
476
	.bus_groups	= rbd_bus_groups,
A
Alex Elder 已提交
477 478 479 480 481 482 483 484 485 486 487
};

static void rbd_root_dev_release(struct device *dev)
{
}

static struct device rbd_root_dev = {
	.init_name =    "rbd",
	.release =      rbd_root_dev_release,
};

A
Alex Elder 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (!rbd_dev)
		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
	else if (rbd_dev->disk)
		printk(KERN_WARNING "%s: %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_name)
		printk(KERN_WARNING "%s: image %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_id)
		printk(KERN_WARNING "%s: id %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
	else	/* punt */
		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
			RBD_DRV_NAME, rbd_dev, &vaf);
	va_end(args);
}

A
Alex Elder 已提交
515 516 517 518 519 520 521 522 523 524 525 526
#ifdef RBD_DEBUG
#define rbd_assert(expr)						\
		if (unlikely(!(expr))) {				\
			printk(KERN_ERR "\nAssertion failure in %s() "	\
						"at line %d:\n\n"	\
					"\trbd_assert(%s);\n\n",	\
					__func__, __LINE__, #expr);	\
			BUG();						\
		}
#else /* !RBD_DEBUG */
#  define rbd_assert(expr)	((void) 0)
#endif /* !RBD_DEBUG */
527

I
Ilya Dryomov 已提交
528
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
529
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
530 531
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
532

A
Alex Elder 已提交
533
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
534
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
535
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
536
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
537 538
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id);
539 540 541 542 543
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features);
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
544

545 546
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
A
Alex Elder 已提交
547
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
548
	bool removing = false;
549

A
Alex Elder 已提交
550
	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
551 552
		return -EROFS;

553
	spin_lock_irq(&rbd_dev->lock);
554 555 556 557
	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
		removing = true;
	else
		rbd_dev->open_count++;
558
	spin_unlock_irq(&rbd_dev->lock);
559 560 561
	if (removing)
		return -ENOENT;

A
Alex Elder 已提交
562
	(void) get_device(&rbd_dev->dev);
563

564 565 566
	return 0;
}

567
static void rbd_release(struct gendisk *disk, fmode_t mode)
568 569
{
	struct rbd_device *rbd_dev = disk->private_data;
570 571
	unsigned long open_count_before;

572
	spin_lock_irq(&rbd_dev->lock);
573
	open_count_before = rbd_dev->open_count--;
574
	spin_unlock_irq(&rbd_dev->lock);
575
	rbd_assert(open_count_before > 0);
576

A
Alex Elder 已提交
577
	put_device(&rbd_dev->dev);
578 579
}

G
Guangliang Zhao 已提交
580 581
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
582
	int ret = 0;
G
Guangliang Zhao 已提交
583 584
	int val;
	bool ro;
585
	bool ro_changed = false;
G
Guangliang Zhao 已提交
586

587
	/* get_user() may sleep, so call it before taking rbd_dev->lock */
G
Guangliang Zhao 已提交
588 589 590 591 592 593 594 595
	if (get_user(val, (int __user *)(arg)))
		return -EFAULT;

	ro = val ? true : false;
	/* Snapshot doesn't allow to write*/
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
		return -EROFS;

596 597 598 599 600 601 602
	spin_lock_irq(&rbd_dev->lock);
	/* prevent others open this device */
	if (rbd_dev->open_count > 1) {
		ret = -EBUSY;
		goto out;
	}

G
Guangliang Zhao 已提交
603 604
	if (rbd_dev->mapping.read_only != ro) {
		rbd_dev->mapping.read_only = ro;
605
		ro_changed = true;
G
Guangliang Zhao 已提交
606 607
	}

608 609 610 611 612 613 614
out:
	spin_unlock_irq(&rbd_dev->lock);
	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
	if (ret == 0 && ro_changed)
		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);

	return ret;
G
Guangliang Zhao 已提交
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
}

static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
	int ret = 0;

	switch (cmd) {
	case BLKROSET:
		ret = rbd_ioctl_set_ro(rbd_dev, arg);
		break;
	default:
		ret = -ENOTTY;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
				unsigned int cmd, unsigned long arg)
{
	return rbd_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */

642 643 644
static const struct block_device_operations rbd_bd_ops = {
	.owner			= THIS_MODULE,
	.open			= rbd_open,
645
	.release		= rbd_release,
G
Guangliang Zhao 已提交
646 647 648 649
	.ioctl			= rbd_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= rbd_compat_ioctl,
#endif
650 651 652
};

/*
653
 * Initialize an rbd client instance.  Success or not, this function
654
 * consumes ceph_opts.  Caller holds client_mutex.
655
 */
656
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
657 658 659 660
{
	struct rbd_client *rbdc;
	int ret = -ENOMEM;

A
Alex Elder 已提交
661
	dout("%s:\n", __func__);
662 663 664 665 666 667 668
	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
	if (!rbdc)
		goto out_opt;

	kref_init(&rbdc->kref);
	INIT_LIST_HEAD(&rbdc->node);

A
Alex Elder 已提交
669
	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
670
	if (IS_ERR(rbdc->client))
671
		goto out_rbdc;
A
Alex Elder 已提交
672
	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
673 674 675

	ret = ceph_open_session(rbdc->client);
	if (ret < 0)
676
		goto out_client;
677

A
Alex Elder 已提交
678
	spin_lock(&rbd_client_list_lock);
679
	list_add_tail(&rbdc->node, &rbd_client_list);
A
Alex Elder 已提交
680
	spin_unlock(&rbd_client_list_lock);
681

A
Alex Elder 已提交
682
	dout("%s: rbdc %p\n", __func__, rbdc);
683

684
	return rbdc;
685
out_client:
686
	ceph_destroy_client(rbdc->client);
687
out_rbdc:
688 689
	kfree(rbdc);
out_opt:
A
Alex Elder 已提交
690 691
	if (ceph_opts)
		ceph_destroy_options(ceph_opts);
A
Alex Elder 已提交
692 693
	dout("%s: error %d\n", __func__, ret);

V
Vasiliy Kulikov 已提交
694
	return ERR_PTR(ret);
695 696
}

697 698 699 700 701 702 703
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
	kref_get(&rbdc->kref);

	return rbdc;
}

704
/*
705 706
 * Find a ceph client with specific addr and configuration.  If
 * found, bump its reference count.
707
 */
708
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
709 710
{
	struct rbd_client *client_node;
711
	bool found = false;
712

A
Alex Elder 已提交
713
	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
714 715
		return NULL;

716 717 718
	spin_lock(&rbd_client_list_lock);
	list_for_each_entry(client_node, &rbd_client_list, node) {
		if (!ceph_compare_options(ceph_opts, client_node->client)) {
719 720
			__rbd_get_client(client_node);

721 722 723 724 725 726 727
			found = true;
			break;
		}
	}
	spin_unlock(&rbd_client_list_lock);

	return found ? client_node : NULL;
728 729
}

730
/*
731
 * (Per device) rbd map options
732 733
 */
enum {
I
Ilya Dryomov 已提交
734
	Opt_queue_depth,
735 736 737 738
	Opt_last_int,
	/* int args above */
	Opt_last_string,
	/* string args above */
A
Alex Elder 已提交
739 740
	Opt_read_only,
	Opt_read_write,
741
	Opt_err
742 743
};

A
Alex Elder 已提交
744
static match_table_t rbd_opts_tokens = {
I
Ilya Dryomov 已提交
745
	{Opt_queue_depth, "queue_depth=%d"},
746 747
	/* int args above */
	/* string args above */
A
Alex Elder 已提交
748
	{Opt_read_only, "read_only"},
A
Alex Elder 已提交
749 750 751
	{Opt_read_only, "ro"},		/* Alternate spelling */
	{Opt_read_write, "read_write"},
	{Opt_read_write, "rw"},		/* Alternate spelling */
752
	{Opt_err, NULL}
753 754
};

A
Alex Elder 已提交
755
struct rbd_options {
I
Ilya Dryomov 已提交
756
	int	queue_depth;
A
Alex Elder 已提交
757 758 759
	bool	read_only;
};

I
Ilya Dryomov 已提交
760
#define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
A
Alex Elder 已提交
761 762
#define RBD_READ_ONLY_DEFAULT	false

763 764
static int parse_rbd_opts_token(char *c, void *private)
{
A
Alex Elder 已提交
765
	struct rbd_options *rbd_opts = private;
766 767 768
	substring_t argstr[MAX_OPT_ARGS];
	int token, intval, ret;

A
Alex Elder 已提交
769
	token = match_token(c, rbd_opts_tokens, argstr);
770 771 772
	if (token < Opt_last_int) {
		ret = match_int(&argstr[0], &intval);
		if (ret < 0) {
773
			pr_err("bad mount option arg (not int) at '%s'\n", c);
774 775 776 777
			return ret;
		}
		dout("got int token %d val %d\n", token, intval);
	} else if (token > Opt_last_int && token < Opt_last_string) {
778
		dout("got string token %d val %s\n", token, argstr[0].from);
779 780 781 782 783
	} else {
		dout("got token %d\n", token);
	}

	switch (token) {
I
Ilya Dryomov 已提交
784 785 786 787 788 789 790
	case Opt_queue_depth:
		if (intval < 1) {
			pr_err("queue_depth out of range\n");
			return -EINVAL;
		}
		rbd_opts->queue_depth = intval;
		break;
A
Alex Elder 已提交
791 792 793 794 795 796
	case Opt_read_only:
		rbd_opts->read_only = true;
		break;
	case Opt_read_write:
		rbd_opts->read_only = false;
		break;
797
	default:
798 799
		/* libceph prints "bad option" msg */
		return -EINVAL;
800
	}
801

802 803 804
	return 0;
}

G
Guangliang Zhao 已提交
805 806 807 808 809 810 811
static char* obj_op_name(enum obj_operation_type op_type)
{
	switch (op_type) {
	case OBJ_OP_READ:
		return "read";
	case OBJ_OP_WRITE:
		return "write";
812 813
	case OBJ_OP_DISCARD:
		return "discard";
G
Guangliang Zhao 已提交
814 815 816 817 818
	default:
		return "???";
	}
}

819 820
/*
 * Get a ceph client with specific addr and configuration, if one does
821 822
 * not exist create it.  Either way, ceph_opts is consumed by this
 * function.
823
 */
824
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
825
{
826
	struct rbd_client *rbdc;
827

828
	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
829
	rbdc = rbd_client_find(ceph_opts);
830
	if (rbdc)	/* using an existing client */
A
Alex Elder 已提交
831
		ceph_destroy_options(ceph_opts);
832
	else
833
		rbdc = rbd_client_create(ceph_opts);
834
	mutex_unlock(&client_mutex);
835

836
	return rbdc;
837 838 839 840
}

/*
 * Destroy ceph client
A
Alex Elder 已提交
841
 *
A
Alex Elder 已提交
842
 * Caller must hold rbd_client_list_lock.
843 844 845 846 847
 */
static void rbd_client_release(struct kref *kref)
{
	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);

A
Alex Elder 已提交
848
	dout("%s: rbdc %p\n", __func__, rbdc);
849
	spin_lock(&rbd_client_list_lock);
850
	list_del(&rbdc->node);
851
	spin_unlock(&rbd_client_list_lock);
852 853 854 855 856 857 858 859 860

	ceph_destroy_client(rbdc->client);
	kfree(rbdc);
}

/*
 * Drop reference to ceph client node. If it's not referenced anymore, release
 * it.
 */
861
static void rbd_put_client(struct rbd_client *rbdc)
862
{
863 864
	if (rbdc)
		kref_put(&rbdc->kref, rbd_client_release);
865 866
}

867 868 869 870 871
static bool rbd_image_format_valid(u32 image_format)
{
	return image_format == 1 || image_format == 2;
}

872 873
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
874 875 876 877 878 879 880
	size_t size;
	u32 snap_count;

	/* The header has to start with the magic rbd header text */
	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
		return false;

A
Alex Elder 已提交
881 882 883 884 885 886 887 888 889 890
	/* The bio layer requires at least sector-sized I/O */

	if (ondisk->options.order < SECTOR_SHIFT)
		return false;

	/* If we use u64 in a few spots we may be able to loosen this */

	if (ondisk->options.order > 8 * sizeof (int) - 1)
		return false;

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	/*
	 * The size of a snapshot header has to fit in a size_t, and
	 * that limits the number of snapshots.
	 */
	snap_count = le32_to_cpu(ondisk->snap_count);
	size = SIZE_MAX - sizeof (struct ceph_snap_context);
	if (snap_count > size / sizeof (__le64))
		return false;

	/*
	 * Not only that, but the size of the entire the snapshot
	 * header must also be representable in a size_t.
	 */
	size -= snap_count * sizeof (__le64);
	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
		return false;

	return true;
909 910
}

911
/*
912 913
 * Fill an rbd image header with information from the given format 1
 * on-disk header.
914
 */
A
Alex Elder 已提交
915
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
916
				 struct rbd_image_header_ondisk *ondisk)
917
{
A
Alex Elder 已提交
918
	struct rbd_image_header *header = &rbd_dev->header;
919 920 921 922 923
	bool first_time = header->object_prefix == NULL;
	struct ceph_snap_context *snapc;
	char *object_prefix = NULL;
	char *snap_names = NULL;
	u64 *snap_sizes = NULL;
924
	u32 snap_count;
925
	size_t size;
926
	int ret = -ENOMEM;
927
	u32 i;
928

929
	/* Allocate this now to avoid having to handle failure below */
A
Alex Elder 已提交
930

931 932
	if (first_time) {
		size_t len;
933

934 935 936 937 938 939 940 941
		len = strnlen(ondisk->object_prefix,
				sizeof (ondisk->object_prefix));
		object_prefix = kmalloc(len + 1, GFP_KERNEL);
		if (!object_prefix)
			return -ENOMEM;
		memcpy(object_prefix, ondisk->object_prefix, len);
		object_prefix[len] = '\0';
	}
A
Alex Elder 已提交
942

943
	/* Allocate the snapshot context and fill it in */
A
Alex Elder 已提交
944

945 946 947 948 949
	snap_count = le32_to_cpu(ondisk->snap_count);
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
	if (!snapc)
		goto out_err;
	snapc->seq = le64_to_cpu(ondisk->snap_seq);
950
	if (snap_count) {
951
		struct rbd_image_snap_ondisk *snaps;
A
Alex Elder 已提交
952 953
		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);

954
		/* We'll keep a copy of the snapshot names... */
955

956 957 958 959
		if (snap_names_len > (u64)SIZE_MAX)
			goto out_2big;
		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
		if (!snap_names)
A
Alex Elder 已提交
960 961
			goto out_err;

962
		/* ...as well as the array of their sizes. */
963

964
		size = snap_count * sizeof (*header->snap_sizes);
965 966
		snap_sizes = kmalloc(size, GFP_KERNEL);
		if (!snap_sizes)
A
Alex Elder 已提交
967
			goto out_err;
968

A
Alex Elder 已提交
969
		/*
970 971 972
		 * Copy the names, and fill in each snapshot's id
		 * and size.
		 *
973
		 * Note that rbd_dev_v1_header_info() guarantees the
974
		 * ondisk buffer we're working with has
A
Alex Elder 已提交
975 976 977
		 * snap_names_len bytes beyond the end of the
		 * snapshot id array, this memcpy() is safe.
		 */
978 979 980 981 982 983
		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
		snaps = ondisk->snaps;
		for (i = 0; i < snap_count; i++) {
			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
		}
984
	}
A
Alex Elder 已提交
985

986
	/* We won't fail any more, fill in the header */
987

988 989 990 991 992 993 994 995 996
	if (first_time) {
		header->object_prefix = object_prefix;
		header->obj_order = ondisk->options.order;
		header->crypt_type = ondisk->options.crypt_type;
		header->comp_type = ondisk->options.comp_type;
		/* The rest aren't used for format 1 images */
		header->stripe_unit = 0;
		header->stripe_count = 0;
		header->features = 0;
997
	} else {
A
Alex Elder 已提交
998 999 1000
		ceph_put_snap_context(header->snapc);
		kfree(header->snap_names);
		kfree(header->snap_sizes);
1001
	}
1002

1003
	/* The remaining fields always get updated (when we refresh) */
1004

A
Alex Elder 已提交
1005
	header->image_size = le64_to_cpu(ondisk->image_size);
1006 1007 1008
	header->snapc = snapc;
	header->snap_names = snap_names;
	header->snap_sizes = snap_sizes;
1009

1010
	return 0;
1011 1012
out_2big:
	ret = -EIO;
A
Alex Elder 已提交
1013
out_err:
1014 1015 1016 1017
	kfree(snap_sizes);
	kfree(snap_names);
	ceph_put_snap_context(snapc);
	kfree(object_prefix);
1018

1019
	return ret;
1020 1021
}

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
	const char *snap_name;

	rbd_assert(which < rbd_dev->header.snapc->num_snaps);

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which--)
		snap_name += strlen(snap_name) + 1;

	return kstrdup(snap_name, GFP_KERNEL);
}

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
/*
 * Snapshot id comparison function for use with qsort()/bsearch().
 * Note that result is for snapshots in *descending* order.
 */
static int snapid_compare_reverse(const void *s1, const void *s2)
{
	u64 snap_id1 = *(u64 *)s1;
	u64 snap_id2 = *(u64 *)s2;

	if (snap_id1 < snap_id2)
		return 1;
	return snap_id1 == snap_id2 ? 0 : -1;
}

/*
 * Search a snapshot context to see if the given snapshot id is
 * present.
 *
 * Returns the position of the snapshot id in the array if it's found,
 * or BAD_SNAP_INDEX otherwise.
 *
 * Note: The snapshot array is in kept sorted (by the osd) in
 * reverse order, highest snapshot id first.
 */
1061 1062 1063
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1064
	u64 *found;
1065

1066 1067
	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
				sizeof (snap_id), snapid_compare_reverse);
1068

1069
	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1070 1071
}

1072 1073
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
1074
{
1075
	u32 which;
1076
	const char *snap_name;
1077

1078 1079
	which = rbd_dev_snap_index(rbd_dev, snap_id);
	if (which == BAD_SNAP_INDEX)
1080
		return ERR_PTR(-ENOENT);
1081

1082 1083
	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1084 1085 1086 1087
}

static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
1088 1089 1090
	if (snap_id == CEPH_NOSNAP)
		return RBD_SNAP_HEAD_NAME;

1091 1092 1093
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1094

1095
	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1096 1097
}

1098 1099
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u64 *snap_size)
1100
{
1101 1102 1103 1104 1105
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_size = rbd_dev->header.image_size;
	} else if (rbd_dev->image_format == 1) {
		u32 which;
1106

1107 1108 1109
		which = rbd_dev_snap_index(rbd_dev, snap_id);
		if (which == BAD_SNAP_INDEX)
			return -ENOENT;
1110

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
		*snap_size = rbd_dev->header.snap_sizes[which];
	} else {
		u64 size = 0;
		int ret;

		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
		if (ret)
			return ret;

		*snap_size = size;
	}
	return 0;
1123 1124
}

1125 1126
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
			u64 *snap_features)
1127
{
1128 1129 1130 1131 1132
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_features = rbd_dev->header.features;
	} else if (rbd_dev->image_format == 1) {
		*snap_features = 0;	/* No features for format 1 */
1133
	} else {
1134 1135
		u64 features = 0;
		int ret;
1136

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
		if (ret)
			return ret;

		*snap_features = features;
	}
	return 0;
}

static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
1148
	u64 snap_id = rbd_dev->spec->snap_id;
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	u64 size = 0;
	u64 features = 0;
	int ret;

	ret = rbd_snap_size(rbd_dev, snap_id, &size);
	if (ret)
		return ret;
	ret = rbd_snap_features(rbd_dev, snap_id, &features);
	if (ret)
		return ret;

	rbd_dev->mapping.size = size;
	rbd_dev->mapping.features = features;

1163
	return 0;
1164 1165
}

A
Alex Elder 已提交
1166 1167 1168 1169
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
	rbd_dev->mapping.size = 0;
	rbd_dev->mapping.features = 0;
1170 1171
}

1172 1173 1174 1175 1176 1177 1178
static void rbd_segment_name_free(const char *name)
{
	/* The explicit cast here is needed to drop the const qualifier */

	kmem_cache_free(rbd_segment_name_cache, (void *)name);
}

A
Alex Elder 已提交
1179
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1180
{
A
Alex Elder 已提交
1181 1182 1183
	char *name;
	u64 segment;
	int ret;
1184
	char *name_format;
1185

1186
	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
A
Alex Elder 已提交
1187 1188 1189
	if (!name)
		return NULL;
	segment = offset >> rbd_dev->header.obj_order;
1190 1191 1192
	name_format = "%s.%012llx";
	if (rbd_dev->image_format == 2)
		name_format = "%s.%016llx";
1193
	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
A
Alex Elder 已提交
1194
			rbd_dev->header.object_prefix, segment);
1195
	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
A
Alex Elder 已提交
1196 1197
		pr_err("error formatting segment name for #%llu (%d)\n",
			segment, ret);
1198
		rbd_segment_name_free(name);
A
Alex Elder 已提交
1199 1200
		name = NULL;
	}
1201

A
Alex Elder 已提交
1202 1203
	return name;
}
1204

A
Alex Elder 已提交
1205 1206 1207
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1208

A
Alex Elder 已提交
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	return offset & (segment_size - 1);
}

static u64 rbd_segment_length(struct rbd_device *rbd_dev,
				u64 offset, u64 length)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;

	offset &= segment_size - 1;

A
Alex Elder 已提交
1219
	rbd_assert(length <= U64_MAX - offset);
A
Alex Elder 已提交
1220 1221 1222 1223
	if (offset + length > segment_size)
		length = segment_size - offset;

	return length;
1224 1225
}

1226 1227 1228 1229 1230 1231 1232 1233
/*
 * returns the size of an object in the image
 */
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
	return 1 << header->obj_order;
}

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
/*
 * bio helpers
 */

static void bio_chain_put(struct bio *chain)
{
	struct bio *tmp;

	while (chain) {
		tmp = chain;
		chain = chain->bi_next;
		bio_put(tmp);
	}
}

/*
 * zeros a bio chain, starting at specific offset
 */
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
1254 1255
	struct bio_vec bv;
	struct bvec_iter iter;
1256 1257 1258 1259 1260
	unsigned long flags;
	void *buf;
	int pos = 0;

	while (chain) {
1261 1262
		bio_for_each_segment(bv, chain, iter) {
			if (pos + bv.bv_len > start_ofs) {
1263
				int remainder = max(start_ofs - pos, 0);
1264
				buf = bvec_kmap_irq(&bv, &flags);
1265
				memset(buf + remainder, 0,
1266 1267
				       bv.bv_len - remainder);
				flush_dcache_page(bv.bv_page);
1268
				bvec_kunmap_irq(buf, &flags);
1269
			}
1270
			pos += bv.bv_len;
1271 1272 1273 1274 1275 1276
		}

		chain = chain->bi_next;
	}
}

A
Alex Elder 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
/*
 * similar to zero_bio_chain(), zeros data defined by a page array,
 * starting at the given byte offset from the start of the array and
 * continuing up to the given end offset.  The pages array is
 * assumed to be big enough to hold all bytes up to the end.
 */
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
	struct page **page = &pages[offset >> PAGE_SHIFT];

	rbd_assert(end > offset);
	rbd_assert(end - offset <= (u64)SIZE_MAX);
	while (offset < end) {
		size_t page_offset;
		size_t length;
		unsigned long flags;
		void *kaddr;

1295 1296
		page_offset = offset & ~PAGE_MASK;
		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
A
Alex Elder 已提交
1297 1298 1299
		local_irq_save(flags);
		kaddr = kmap_atomic(*page);
		memset(kaddr + page_offset, 0, length);
1300
		flush_dcache_page(*page);
A
Alex Elder 已提交
1301 1302 1303 1304 1305 1306 1307 1308
		kunmap_atomic(kaddr);
		local_irq_restore(flags);

		offset += length;
		page++;
	}
}

1309
/*
A
Alex Elder 已提交
1310 1311
 * Clone a portion of a bio, starting at the given byte offset
 * and continuing for the number of bytes indicated.
1312
 */
A
Alex Elder 已提交
1313 1314 1315 1316
static struct bio *bio_clone_range(struct bio *bio_src,
					unsigned int offset,
					unsigned int len,
					gfp_t gfpmask)
1317
{
A
Alex Elder 已提交
1318 1319
	struct bio *bio;

K
Kent Overstreet 已提交
1320
	bio = bio_clone(bio_src, gfpmask);
A
Alex Elder 已提交
1321 1322
	if (!bio)
		return NULL;	/* ENOMEM */
1323

K
Kent Overstreet 已提交
1324
	bio_advance(bio, offset);
1325
	bio->bi_iter.bi_size = len;
A
Alex Elder 已提交
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355

	return bio;
}

/*
 * Clone a portion of a bio chain, starting at the given byte offset
 * into the first bio in the source chain and continuing for the
 * number of bytes indicated.  The result is another bio chain of
 * exactly the given length, or a null pointer on error.
 *
 * The bio_src and offset parameters are both in-out.  On entry they
 * refer to the first source bio and the offset into that bio where
 * the start of data to be cloned is located.
 *
 * On return, bio_src is updated to refer to the bio in the source
 * chain that contains first un-cloned byte, and *offset will
 * contain the offset of that byte within that bio.
 */
static struct bio *bio_chain_clone_range(struct bio **bio_src,
					unsigned int *offset,
					unsigned int len,
					gfp_t gfpmask)
{
	struct bio *bi = *bio_src;
	unsigned int off = *offset;
	struct bio *chain = NULL;
	struct bio **end;

	/* Build up a chain of clone bios up to the limit */

1356
	if (!bi || off >= bi->bi_iter.bi_size || !len)
A
Alex Elder 已提交
1357
		return NULL;		/* Nothing to clone */
1358

A
Alex Elder 已提交
1359 1360 1361 1362 1363
	end = &chain;
	while (len) {
		unsigned int bi_size;
		struct bio *bio;

1364 1365
		if (!bi) {
			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
A
Alex Elder 已提交
1366
			goto out_err;	/* EINVAL; ran out of bio's */
1367
		}
1368
		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
A
Alex Elder 已提交
1369 1370 1371 1372 1373 1374
		bio = bio_clone_range(bi, off, bi_size, gfpmask);
		if (!bio)
			goto out_err;	/* ENOMEM */

		*end = bio;
		end = &bio->bi_next;
1375

A
Alex Elder 已提交
1376
		off += bi_size;
1377
		if (off == bi->bi_iter.bi_size) {
A
Alex Elder 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
			bi = bi->bi_next;
			off = 0;
		}
		len -= bi_size;
	}
	*bio_src = bi;
	*offset = off;

	return chain;
out_err:
	bio_chain_put(chain);
1389 1390 1391 1392

	return NULL;
}

1393 1394 1395 1396 1397
/*
 * The default/initial value for all object request flags is 0.  For
 * each flag, once its value is set to 1 it is never reset to 0
 * again.
 */
A
Alex Elder 已提交
1398
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1399
{
A
Alex Elder 已提交
1400
	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1401 1402
		struct rbd_device *rbd_dev;

A
Alex Elder 已提交
1403
		rbd_dev = obj_request->img_request->rbd_dev;
1404
		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1405 1406 1407 1408
			obj_request);
	}
}

A
Alex Elder 已提交
1409
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1410 1411
{
	smp_mb();
A
Alex Elder 已提交
1412
	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1413 1414
}

A
Alex Elder 已提交
1415
static void obj_request_done_set(struct rbd_obj_request *obj_request)
1416
{
A
Alex Elder 已提交
1417 1418
	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
		struct rbd_device *rbd_dev = NULL;
1419

A
Alex Elder 已提交
1420 1421
		if (obj_request_img_data_test(obj_request))
			rbd_dev = obj_request->img_request->rbd_dev;
1422
		rbd_warn(rbd_dev, "obj_request %p already marked done",
1423 1424 1425 1426
			obj_request);
	}
}

A
Alex Elder 已提交
1427
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1428 1429
{
	smp_mb();
A
Alex Elder 已提交
1430
	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1431 1432
}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
/*
 * This sets the KNOWN flag after (possibly) setting the EXISTS
 * flag.  The latter is set based on the "exists" value provided.
 *
 * Note that for our purposes once an object exists it never goes
 * away again.  It's possible that the response from two existence
 * checks are separated by the creation of the target object, and
 * the first ("doesn't exist") response arrives *after* the second
 * ("does exist").  In that case we ignore the second one.
 */
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
				bool exists)
{
	if (exists)
		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
	smp_mb();
}

static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}

static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}

1464 1465 1466 1467 1468 1469 1470 1471
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;

	return obj_request->img_offset <
	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}

A
Alex Elder 已提交
1472 1473
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1474 1475
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1476 1477 1478 1479 1480 1481 1482
	kref_get(&obj_request->kref);
}

static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request != NULL);
A
Alex Elder 已提交
1483 1484
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1485 1486 1487
	kref_put(&obj_request->kref, rbd_obj_request_destroy);
}

1488 1489 1490 1491 1492 1493 1494
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
	dout("%s: img %p (was %d)\n", __func__, img_request,
	     atomic_read(&img_request->kref.refcount));
	kref_get(&img_request->kref);
}

1495 1496
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
A
Alex Elder 已提交
1497 1498 1499 1500
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
	rbd_assert(img_request != NULL);
A
Alex Elder 已提交
1501 1502
	dout("%s: img %p (was %d)\n", __func__, img_request,
		atomic_read(&img_request->kref.refcount));
1503 1504 1505 1506
	if (img_request_child_test(img_request))
		kref_put(&img_request->kref, rbd_parent_request_destroy);
	else
		kref_put(&img_request->kref, rbd_img_request_destroy);
A
Alex Elder 已提交
1507 1508 1509 1510 1511
}

static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
1512 1513
	rbd_assert(obj_request->img_request == NULL);

1514
	/* Image request now owns object's original reference */
A
Alex Elder 已提交
1515
	obj_request->img_request = img_request;
1516
	obj_request->which = img_request->obj_request_count;
1517 1518
	rbd_assert(!obj_request_img_data_test(obj_request));
	obj_request_img_data_set(obj_request);
A
Alex Elder 已提交
1519
	rbd_assert(obj_request->which != BAD_WHICH);
1520 1521
	img_request->obj_request_count++;
	list_add_tail(&obj_request->links, &img_request->obj_requests);
A
Alex Elder 已提交
1522 1523
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1524 1525 1526 1527 1528 1529
}

static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request->which != BAD_WHICH);
1530

A
Alex Elder 已提交
1531 1532
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1533
	list_del(&obj_request->links);
1534 1535 1536 1537
	rbd_assert(img_request->obj_request_count > 0);
	img_request->obj_request_count--;
	rbd_assert(obj_request->which == img_request->obj_request_count);
	obj_request->which = BAD_WHICH;
1538
	rbd_assert(obj_request_img_data_test(obj_request));
A
Alex Elder 已提交
1539 1540
	rbd_assert(obj_request->img_request == img_request);
	obj_request->img_request = NULL;
1541
	obj_request->callback = NULL;
A
Alex Elder 已提交
1542 1543 1544 1545 1546 1547
	rbd_obj_request_put(obj_request);
}

static bool obj_request_type_valid(enum obj_request_type type)
{
	switch (type) {
1548
	case OBJ_REQUEST_NODATA:
A
Alex Elder 已提交
1549
	case OBJ_REQUEST_BIO:
1550
	case OBJ_REQUEST_PAGES:
A
Alex Elder 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559
		return true;
	default:
		return false;
	}
}

static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
				struct rbd_obj_request *obj_request)
{
1560
	dout("%s %p\n", __func__, obj_request);
A
Alex Elder 已提交
1561 1562 1563
	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}

1564 1565 1566 1567 1568 1569 1570 1571 1572
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
{
	dout("%s %p\n", __func__, obj_request);
	ceph_osdc_cancel_request(obj_request->osd_req);
}

/*
 * Wait for an object request to complete.  If interrupted, cancel the
 * underlying osd request.
1573 1574
 *
 * @timeout: in jiffies, 0 means "wait forever"
1575
 */
1576 1577
static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
				  unsigned long timeout)
1578
{
1579
	long ret;
1580 1581

	dout("%s %p\n", __func__, obj_request);
1582 1583 1584 1585 1586 1587
	ret = wait_for_completion_interruptible_timeout(
					&obj_request->completion,
					ceph_timeout_jiffies(timeout));
	if (ret <= 0) {
		if (ret == 0)
			ret = -ETIMEDOUT;
1588
		rbd_obj_request_end(obj_request);
1589 1590
	} else {
		ret = 0;
1591 1592
	}

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
	dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
	return ret;
}

static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
	return __rbd_obj_request_wait(obj_request, 0);
}

static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
					unsigned long timeout)
{
	return __rbd_obj_request_wait(obj_request, timeout);
1606 1607
}

A
Alex Elder 已提交
1608 1609
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
1610

A
Alex Elder 已提交
1611
	dout("%s: img %p\n", __func__, img_request);
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627

	/*
	 * If no error occurred, compute the aggregate transfer
	 * count for the image request.  We could instead use
	 * atomic64_cmpxchg() to update it as each object request
	 * completes; not clear which way is better off hand.
	 */
	if (!img_request->result) {
		struct rbd_obj_request *obj_request;
		u64 xferred = 0;

		for_each_obj_request(img_request, obj_request)
			xferred += obj_request->xferred;
		img_request->xferred = xferred;
	}

A
Alex Elder 已提交
1628 1629 1630 1631 1632 1633
	if (img_request->callback)
		img_request->callback(img_request);
	else
		rbd_img_request_put(img_request);
}

A
Alex Elder 已提交
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
/*
 * The default/initial value for all image request flags is 0.  Each
 * is conditionally set to 1 at image request initialization time
 * and currently never change thereafter.
 */
static void img_request_write_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_WRITE, &img_request->flags);
	smp_mb();
}

static bool img_request_write_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
/*
 * Set the discard flag when the img_request is an discard request
 */
static void img_request_discard_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_DISCARD, &img_request->flags);
	smp_mb();
}

static bool img_request_discard_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
}

1666 1667 1668 1669 1670 1671
static void img_request_child_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1672 1673 1674 1675 1676 1677
static void img_request_child_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1678 1679 1680 1681 1682 1683
static bool img_request_child_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}

1684 1685 1686 1687 1688 1689
static void img_request_layered_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1690 1691 1692 1693 1694 1695
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1696 1697 1698 1699 1700 1701
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
static enum obj_operation_type
rbd_img_request_op_type(struct rbd_img_request *img_request)
{
	if (img_request_write_test(img_request))
		return OBJ_OP_WRITE;
	else if (img_request_discard_test(img_request))
		return OBJ_OP_DISCARD;
	else
		return OBJ_OP_READ;
}

1713 1714 1715
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1716 1717 1718
	u64 xferred = obj_request->xferred;
	u64 length = obj_request->length;

1719 1720
	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, obj_request->img_request, obj_request->result,
A
Alex Elder 已提交
1721
		xferred, length);
1722
	/*
1723 1724 1725 1726 1727 1728
	 * ENOENT means a hole in the image.  We zero-fill the entire
	 * length of the request.  A short read also implies zero-fill
	 * to the end of the request.  An error requires the whole
	 * length of the request to be reported finished with an error
	 * to the block layer.  In each case we update the xferred
	 * count to indicate the whole request was satisfied.
1729
	 */
A
Alex Elder 已提交
1730
	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1731
	if (obj_request->result == -ENOENT) {
A
Alex Elder 已提交
1732 1733 1734 1735
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, 0);
		else
			zero_pages(obj_request->pages, 0, length);
1736
		obj_request->result = 0;
A
Alex Elder 已提交
1737 1738 1739 1740 1741
	} else if (xferred < length && !obj_request->result) {
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, xferred);
		else
			zero_pages(obj_request->pages, xferred, length);
1742
	}
1743
	obj_request->xferred = length;
1744 1745 1746
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1747 1748
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1749 1750
	dout("%s: obj %p cb %p\n", __func__, obj_request,
		obj_request->callback);
A
Alex Elder 已提交
1751 1752
	if (obj_request->callback)
		obj_request->callback(obj_request);
1753 1754
	else
		complete_all(&obj_request->completion);
A
Alex Elder 已提交
1755 1756
}

1757
static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1758 1759 1760 1761 1762
{
	dout("%s: obj %p\n", __func__, obj_request);
	obj_request_done_set(obj_request);
}

1763
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1764
{
A
Alex Elder 已提交
1765
	struct rbd_img_request *img_request = NULL;
A
Alex Elder 已提交
1766
	struct rbd_device *rbd_dev = NULL;
A
Alex Elder 已提交
1767 1768 1769 1770 1771
	bool layered = false;

	if (obj_request_img_data_test(obj_request)) {
		img_request = obj_request->img_request;
		layered = img_request && img_request_layered_test(img_request);
A
Alex Elder 已提交
1772
		rbd_dev = img_request->rbd_dev;
A
Alex Elder 已提交
1773
	}
A
Alex Elder 已提交
1774 1775 1776 1777

	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, img_request, obj_request->result,
		obj_request->xferred, obj_request->length);
A
Alex Elder 已提交
1778 1779
	if (layered && obj_request->result == -ENOENT &&
			obj_request->img_offset < rbd_dev->parent_overlap)
A
Alex Elder 已提交
1780 1781
		rbd_img_parent_read(obj_request);
	else if (img_request)
1782 1783 1784
		rbd_img_obj_request_read_callback(obj_request);
	else
		obj_request_done_set(obj_request);
A
Alex Elder 已提交
1785 1786
}

1787
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1788
{
1789 1790 1791
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
A
Alex Elder 已提交
1792 1793
	 * There is no such thing as a successful short write.  Set
	 * it to our originally-requested length.
1794 1795
	 */
	obj_request->xferred = obj_request->length;
1796
	obj_request_done_set(obj_request);
A
Alex Elder 已提交
1797 1798
}

1799 1800 1801 1802 1803 1804 1805 1806 1807
static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
	 * There is no such thing as a successful short discard.  Set
	 * it to our originally-requested length.
	 */
	obj_request->xferred = obj_request->length;
1808 1809 1810
	/* discarding a non-existent object is not a problem */
	if (obj_request->result == -ENOENT)
		obj_request->result = 0;
1811 1812 1813
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1814 1815 1816 1817
/*
 * For a simple stat call there's nothing to do.  We'll do more if
 * this is part of a write sequence for a layered image.
 */
1818
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1819
{
A
Alex Elder 已提交
1820
	dout("%s: obj %p\n", __func__, obj_request);
A
Alex Elder 已提交
1821 1822 1823
	obj_request_done_set(obj_request);
}

I
Ilya Dryomov 已提交
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p\n", __func__, obj_request);

	if (obj_request_img_data_test(obj_request))
		rbd_osd_copyup_callback(obj_request);
	else
		obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1834 1835 1836 1837 1838 1839
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
				struct ceph_msg *msg)
{
	struct rbd_obj_request *obj_request = osd_req->r_priv;
	u16 opcode;

A
Alex Elder 已提交
1840
	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
A
Alex Elder 已提交
1841
	rbd_assert(osd_req == obj_request->osd_req);
A
Alex Elder 已提交
1842 1843 1844 1845 1846 1847
	if (obj_request_img_data_test(obj_request)) {
		rbd_assert(obj_request->img_request);
		rbd_assert(obj_request->which != BAD_WHICH);
	} else {
		rbd_assert(obj_request->which == BAD_WHICH);
	}
A
Alex Elder 已提交
1848

1849 1850
	if (osd_req->r_result < 0)
		obj_request->result = osd_req->r_result;
A
Alex Elder 已提交
1851

1852
	rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
A
Alex Elder 已提交
1853

1854 1855
	/*
	 * We support a 64-bit length, but ultimately it has to be
C
Christoph Hellwig 已提交
1856 1857
	 * passed to the block layer, which just supports a 32-bit
	 * length field.
1858
	 */
1859
	obj_request->xferred = osd_req->r_reply_op_len[0];
A
Alex Elder 已提交
1860
	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1861

1862
	opcode = osd_req->r_ops[0].op;
A
Alex Elder 已提交
1863 1864
	switch (opcode) {
	case CEPH_OSD_OP_READ:
1865
		rbd_osd_read_callback(obj_request);
A
Alex Elder 已提交
1866
		break;
1867
	case CEPH_OSD_OP_SETALLOCHINT:
1868 1869
		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1870
		/* fall through */
A
Alex Elder 已提交
1871
	case CEPH_OSD_OP_WRITE:
1872
	case CEPH_OSD_OP_WRITEFULL:
1873
		rbd_osd_write_callback(obj_request);
A
Alex Elder 已提交
1874
		break;
A
Alex Elder 已提交
1875
	case CEPH_OSD_OP_STAT:
1876
		rbd_osd_stat_callback(obj_request);
A
Alex Elder 已提交
1877
		break;
1878 1879 1880 1881 1882
	case CEPH_OSD_OP_DELETE:
	case CEPH_OSD_OP_TRUNCATE:
	case CEPH_OSD_OP_ZERO:
		rbd_osd_discard_callback(obj_request);
		break;
1883
	case CEPH_OSD_OP_CALL:
I
Ilya Dryomov 已提交
1884 1885
		rbd_osd_call_callback(obj_request);
		break;
A
Alex Elder 已提交
1886
	case CEPH_OSD_OP_NOTIFY_ACK:
1887
	case CEPH_OSD_OP_WATCH:
1888
		rbd_osd_trivial_callback(obj_request);
1889
		break;
A
Alex Elder 已提交
1890
	default:
1891
		rbd_warn(NULL, "%s: unsupported op %hu",
A
Alex Elder 已提交
1892 1893 1894 1895
			obj_request->object_name, (unsigned short) opcode);
		break;
	}

1896
	if (obj_request_done_test(obj_request))
A
Alex Elder 已提交
1897 1898 1899
		rbd_obj_request_complete(obj_request);
}

1900
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1901 1902
{
	struct rbd_img_request *img_request = obj_request->img_request;
1903
	struct ceph_osd_request *osd_req = obj_request->osd_req;
1904
	u64 snap_id;
A
Alex Elder 已提交
1905

1906
	rbd_assert(osd_req != NULL);
A
Alex Elder 已提交
1907

1908
	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1909
	ceph_osdc_build_request(osd_req, obj_request->offset,
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
			NULL, snap_id, NULL);
}

static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct ceph_osd_request *osd_req = obj_request->osd_req;
	struct ceph_snap_context *snapc;
	struct timespec mtime = CURRENT_TIME;

	rbd_assert(osd_req != NULL);

	snapc = img_request ? img_request->snapc : NULL;
	ceph_osdc_build_request(osd_req, obj_request->offset,
			snapc, CEPH_NOSNAP, &mtime);
A
Alex Elder 已提交
1925 1926
}

1927 1928 1929 1930 1931 1932
/*
 * Create an osd request.  A read request has one osd op (read).
 * A write request has either one (watch) or two (hint+write) osd ops.
 * (All rbd data writes are prefixed with an allocation hint op, but
 * technically osd watch is a write request, hence this distinction.)
 */
A
Alex Elder 已提交
1933 1934
static struct ceph_osd_request *rbd_osd_req_create(
					struct rbd_device *rbd_dev,
G
Guangliang Zhao 已提交
1935
					enum obj_operation_type op_type,
1936
					unsigned int num_ops,
A
Alex Elder 已提交
1937
					struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1938 1939 1940 1941 1942
{
	struct ceph_snap_context *snapc = NULL;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

1943 1944
	if (obj_request_img_data_test(obj_request) &&
		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1945
		struct rbd_img_request *img_request = obj_request->img_request;
1946 1947 1948 1949 1950
		if (op_type == OBJ_OP_WRITE) {
			rbd_assert(img_request_write_test(img_request));
		} else {
			rbd_assert(img_request_discard_test(img_request));
		}
G
Guangliang Zhao 已提交
1951
		snapc = img_request->snapc;
A
Alex Elder 已提交
1952 1953
	}

G
Guangliang Zhao 已提交
1954
	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1955 1956

	/* Allocate and initialize the request, for the num_ops ops */
A
Alex Elder 已提交
1957 1958

	osdc = &rbd_dev->rbd_client->client->osdc;
1959 1960
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
					  GFP_ATOMIC);
A
Alex Elder 已提交
1961 1962 1963
	if (!osd_req)
		return NULL;	/* ENOMEM */

1964
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
A
Alex Elder 已提交
1965
		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
A
Alex Elder 已提交
1966
	else
A
Alex Elder 已提交
1967 1968 1969 1970 1971
		osd_req->r_flags = CEPH_OSD_FLAG_READ;

	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1972 1973
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
A
Alex Elder 已提交
1974 1975 1976 1977

	return osd_req;
}

1978
/*
1979 1980 1981 1982
 * Create a copyup osd request based on the information in the object
 * request supplied.  A copyup request has two or three osd ops, a
 * copyup method call, potentially a hint op, and a write or truncate
 * or zero op.
1983 1984 1985 1986 1987 1988 1989 1990 1991
 */
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;
1992
	int num_osd_ops = 3;
1993 1994 1995 1996

	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);
1997 1998
	rbd_assert(img_request_write_test(img_request) ||
			img_request_discard_test(img_request));
1999

2000 2001 2002 2003
	if (img_request_discard_test(img_request))
		num_osd_ops = 2;

	/* Allocate and initialize the request, for all the ops */
2004 2005 2006 2007

	snapc = img_request->snapc;
	rbd_dev = img_request->rbd_dev;
	osdc = &rbd_dev->rbd_client->client->osdc;
2008 2009
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
						false, GFP_ATOMIC);
2010 2011 2012 2013 2014 2015 2016
	if (!osd_req)
		return NULL;	/* ENOMEM */

	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

2017 2018
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
2019 2020 2021 2022 2023

	return osd_req;
}


A
Alex Elder 已提交
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
	ceph_osdc_put_request(osd_req);
}

/* object_name is assumed to be a non-null pointer and NUL-terminated */

static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
						u64 offset, u64 length,
						enum obj_request_type type)
{
	struct rbd_obj_request *obj_request;
	size_t size;
	char *name;

	rbd_assert(obj_request_type_valid(type));

	size = strlen(object_name) + 1;
2042
	name = kmalloc(size, GFP_NOIO);
2043
	if (!name)
A
Alex Elder 已提交
2044 2045
		return NULL;

2046
	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2047 2048 2049 2050 2051
	if (!obj_request) {
		kfree(name);
		return NULL;
	}

A
Alex Elder 已提交
2052 2053 2054
	obj_request->object_name = memcpy(name, object_name, size);
	obj_request->offset = offset;
	obj_request->length = length;
2055
	obj_request->flags = 0;
A
Alex Elder 已提交
2056 2057 2058
	obj_request->which = BAD_WHICH;
	obj_request->type = type;
	INIT_LIST_HEAD(&obj_request->links);
2059
	init_completion(&obj_request->completion);
A
Alex Elder 已提交
2060 2061
	kref_init(&obj_request->kref);

A
Alex Elder 已提交
2062 2063 2064
	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
		offset, length, (int)type, obj_request);

A
Alex Elder 已提交
2065 2066 2067 2068 2069 2070 2071 2072 2073
	return obj_request;
}

static void rbd_obj_request_destroy(struct kref *kref)
{
	struct rbd_obj_request *obj_request;

	obj_request = container_of(kref, struct rbd_obj_request, kref);

A
Alex Elder 已提交
2074 2075
	dout("%s: obj %p\n", __func__, obj_request);

A
Alex Elder 已提交
2076 2077 2078 2079 2080 2081 2082 2083
	rbd_assert(obj_request->img_request == NULL);
	rbd_assert(obj_request->which == BAD_WHICH);

	if (obj_request->osd_req)
		rbd_osd_req_destroy(obj_request->osd_req);

	rbd_assert(obj_request_type_valid(obj_request->type));
	switch (obj_request->type) {
2084 2085
	case OBJ_REQUEST_NODATA:
		break;		/* Nothing to do */
A
Alex Elder 已提交
2086 2087 2088 2089
	case OBJ_REQUEST_BIO:
		if (obj_request->bio_list)
			bio_chain_put(obj_request->bio_list);
		break;
2090 2091 2092 2093 2094
	case OBJ_REQUEST_PAGES:
		if (obj_request->pages)
			ceph_release_page_vector(obj_request->pages,
						obj_request->page_count);
		break;
A
Alex Elder 已提交
2095 2096
	}

2097
	kfree(obj_request->object_name);
2098 2099
	obj_request->object_name = NULL;
	kmem_cache_free(rbd_obj_request_cache, obj_request);
A
Alex Elder 已提交
2100 2101
}

A
Alex Elder 已提交
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
/* It's OK to call this for a device with no parent */

static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
	rbd_dev_remove_parent(rbd_dev);
	rbd_spec_put(rbd_dev->parent_spec);
	rbd_dev->parent_spec = NULL;
	rbd_dev->parent_overlap = 0;
}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
/*
 * Parent image reference counting is used to determine when an
 * image's parent fields can be safely torn down--after there are no
 * more in-flight requests to the parent image.  When the last
 * reference is dropped, cleaning them up is safe.
 */
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return;

	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
	if (counter > 0)
		return;

	/* Last reference; clean up parent data structures */

	if (!counter)
		rbd_dev_unparent(rbd_dev);
	else
2135
		rbd_warn(rbd_dev, "parent reference underflow");
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
}

/*
 * If an image has a non-zero parent overlap, get a reference to its
 * parent.
 *
 * Returns true if the rbd device has a parent with a non-zero
 * overlap and a reference for it was successfully taken, or
 * false otherwise.
 */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
2148
	int counter = 0;
2149 2150 2151 2152

	if (!rbd_dev->parent_spec)
		return false;

2153 2154 2155 2156
	down_read(&rbd_dev->header_rwsem);
	if (rbd_dev->parent_overlap)
		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
	up_read(&rbd_dev->header_rwsem);
2157 2158

	if (counter < 0)
2159
		rbd_warn(rbd_dev, "parent reference overflow");
2160

2161
	return counter > 0;
2162 2163
}

A
Alex Elder 已提交
2164 2165 2166 2167 2168
/*
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 */
A
Alex Elder 已提交
2169 2170
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
A
Alex Elder 已提交
2171
					u64 offset, u64 length,
G
Guangliang Zhao 已提交
2172
					enum obj_operation_type op_type,
2173
					struct ceph_snap_context *snapc)
A
Alex Elder 已提交
2174 2175 2176
{
	struct rbd_img_request *img_request;

2177
	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
A
Alex Elder 已提交
2178 2179 2180 2181 2182 2183 2184
	if (!img_request)
		return NULL;

	img_request->rq = NULL;
	img_request->rbd_dev = rbd_dev;
	img_request->offset = offset;
	img_request->length = length;
A
Alex Elder 已提交
2185
	img_request->flags = 0;
2186 2187 2188 2189
	if (op_type == OBJ_OP_DISCARD) {
		img_request_discard_set(img_request);
		img_request->snapc = snapc;
	} else if (op_type == OBJ_OP_WRITE) {
A
Alex Elder 已提交
2190
		img_request_write_set(img_request);
2191
		img_request->snapc = snapc;
A
Alex Elder 已提交
2192
	} else {
A
Alex Elder 已提交
2193
		img_request->snap_id = rbd_dev->spec->snap_id;
A
Alex Elder 已提交
2194
	}
2195
	if (rbd_dev_parent_get(rbd_dev))
2196
		img_request_layered_set(img_request);
A
Alex Elder 已提交
2197 2198 2199
	spin_lock_init(&img_request->completion_lock);
	img_request->next_completion = 0;
	img_request->callback = NULL;
2200
	img_request->result = 0;
A
Alex Elder 已提交
2201 2202 2203 2204
	img_request->obj_request_count = 0;
	INIT_LIST_HEAD(&img_request->obj_requests);
	kref_init(&img_request->kref);

A
Alex Elder 已提交
2205
	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
G
Guangliang Zhao 已提交
2206
		obj_op_name(op_type), offset, length, img_request);
A
Alex Elder 已提交
2207

A
Alex Elder 已提交
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
	return img_request;
}

static void rbd_img_request_destroy(struct kref *kref)
{
	struct rbd_img_request *img_request;
	struct rbd_obj_request *obj_request;
	struct rbd_obj_request *next_obj_request;

	img_request = container_of(kref, struct rbd_img_request, kref);

A
Alex Elder 已提交
2219 2220
	dout("%s: img %p\n", __func__, img_request);

A
Alex Elder 已提交
2221 2222
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
		rbd_img_obj_request_del(img_request, obj_request);
2223
	rbd_assert(img_request->obj_request_count == 0);
A
Alex Elder 已提交
2224

2225 2226 2227 2228 2229
	if (img_request_layered_test(img_request)) {
		img_request_layered_clear(img_request);
		rbd_dev_parent_put(img_request->rbd_dev);
	}

2230 2231
	if (img_request_write_test(img_request) ||
		img_request_discard_test(img_request))
2232
		ceph_put_snap_context(img_request->snapc);
A
Alex Elder 已提交
2233

2234
	kmem_cache_free(rbd_img_request_cache, img_request);
A
Alex Elder 已提交
2235 2236
}

2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
static struct rbd_img_request *rbd_parent_request_create(
					struct rbd_obj_request *obj_request,
					u64 img_offset, u64 length)
{
	struct rbd_img_request *parent_request;
	struct rbd_device *rbd_dev;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;

2247
	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
G
Guangliang Zhao 已提交
2248
						length, OBJ_OP_READ, NULL);
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
	if (!parent_request)
		return NULL;

	img_request_child_set(parent_request);
	rbd_obj_request_get(obj_request);
	parent_request->obj_request = obj_request;

	return parent_request;
}

static void rbd_parent_request_destroy(struct kref *kref)
{
	struct rbd_img_request *parent_request;
	struct rbd_obj_request *orig_request;

	parent_request = container_of(kref, struct rbd_img_request, kref);
	orig_request = parent_request->obj_request;

	parent_request->obj_request = NULL;
	rbd_obj_request_put(orig_request);
	img_request_child_clear(parent_request);

	rbd_img_request_destroy(kref);
}

2274 2275
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
2276
	struct rbd_img_request *img_request;
2277 2278
	unsigned int xferred;
	int result;
A
Alex Elder 已提交
2279
	bool more;
2280

2281 2282 2283
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;

2284 2285 2286 2287 2288
	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
	xferred = (unsigned int)obj_request->xferred;
	result = obj_request->result;
	if (result) {
		struct rbd_device *rbd_dev = img_request->rbd_dev;
G
Guangliang Zhao 已提交
2289 2290
		enum obj_operation_type op_type;

2291 2292 2293 2294 2295 2296
		if (img_request_discard_test(img_request))
			op_type = OBJ_OP_DISCARD;
		else if (img_request_write_test(img_request))
			op_type = OBJ_OP_WRITE;
		else
			op_type = OBJ_OP_READ;
2297

2298
		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
G
Guangliang Zhao 已提交
2299 2300
			obj_op_name(op_type), obj_request->length,
			obj_request->img_offset, obj_request->offset);
2301
		rbd_warn(rbd_dev, "  result %d xferred %x",
2302 2303 2304
			result, xferred);
		if (!img_request->result)
			img_request->result = result;
2305 2306 2307 2308 2309
		/*
		 * Need to end I/O on the entire obj_request worth of
		 * bytes in case of error.
		 */
		xferred = obj_request->length;
2310 2311
	}

2312 2313 2314 2315 2316 2317 2318
	/* Image object requests don't own their page array */

	if (obj_request->type == OBJ_REQUEST_PAGES) {
		obj_request->pages = NULL;
		obj_request->page_count = 0;
	}

A
Alex Elder 已提交
2319 2320 2321 2322 2323
	if (img_request_child_test(img_request)) {
		rbd_assert(img_request->obj_request != NULL);
		more = obj_request->which < img_request->obj_request_count - 1;
	} else {
		rbd_assert(img_request->rq != NULL);
C
Christoph Hellwig 已提交
2324 2325 2326 2327

		more = blk_update_request(img_request->rq, result, xferred);
		if (!more)
			__blk_mq_end_request(img_request->rq, result);
A
Alex Elder 已提交
2328 2329 2330
	}

	return more;
2331 2332
}

2333 2334 2335 2336 2337 2338
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	u32 which = obj_request->which;
	bool more = true;

2339
	rbd_assert(obj_request_img_data_test(obj_request));
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
	img_request = obj_request->img_request;

	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
	rbd_assert(img_request != NULL);
	rbd_assert(img_request->obj_request_count > 0);
	rbd_assert(which != BAD_WHICH);
	rbd_assert(which < img_request->obj_request_count);

	spin_lock_irq(&img_request->completion_lock);
	if (which != img_request->next_completion)
		goto out;

	for_each_obj_request_from(img_request, obj_request) {
		rbd_assert(more);
		rbd_assert(which < img_request->obj_request_count);

		if (!obj_request_done_test(obj_request))
			break;
2358
		more = rbd_img_obj_end_request(obj_request);
2359 2360 2361 2362 2363 2364 2365
		which++;
	}

	rbd_assert(more ^ (which == img_request->obj_request_count));
	img_request->next_completion = which;
out:
	spin_unlock_irq(&img_request->completion_lock);
2366
	rbd_img_request_put(img_request);
2367 2368 2369 2370 2371

	if (!more)
		rbd_img_request_complete(img_request);
}

2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
/*
 * Add individual osd ops to the given ceph_osd_request and prepare
 * them for submission. num_ops is the current number of
 * osd operations already to the object request.
 */
static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
				struct ceph_osd_request *osd_request,
				enum obj_operation_type op_type,
				unsigned int num_ops)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
	u64 offset = obj_request->offset;
	u64 length = obj_request->length;
	u64 img_end;
	u16 opcode;

	if (op_type == OBJ_OP_DISCARD) {
2391 2392 2393
		if (!offset && length == object_size &&
		    (!img_request_layered_test(img_request) ||
		     !obj_request_overlaps_parent(obj_request))) {
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
			opcode = CEPH_OSD_OP_DELETE;
		} else if ((offset + length == object_size)) {
			opcode = CEPH_OSD_OP_TRUNCATE;
		} else {
			down_read(&rbd_dev->header_rwsem);
			img_end = rbd_dev->header.image_size;
			up_read(&rbd_dev->header_rwsem);

			if (obj_request->img_offset + length == img_end)
				opcode = CEPH_OSD_OP_TRUNCATE;
			else
				opcode = CEPH_OSD_OP_ZERO;
		}
	} else if (op_type == OBJ_OP_WRITE) {
2408 2409 2410 2411
		if (!offset && length == object_size)
			opcode = CEPH_OSD_OP_WRITEFULL;
		else
			opcode = CEPH_OSD_OP_WRITE;
2412 2413 2414 2415 2416 2417 2418
		osd_req_op_alloc_hint_init(osd_request, num_ops,
					object_size, object_size);
		num_ops++;
	} else {
		opcode = CEPH_OSD_OP_READ;
	}

2419
	if (opcode == CEPH_OSD_OP_DELETE)
2420
		osd_req_op_init(osd_request, num_ops, opcode, 0);
2421 2422 2423 2424
	else
		osd_req_op_extent_init(osd_request, num_ops, opcode,
				       offset, length, 0, 0);

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
	if (obj_request->type == OBJ_REQUEST_BIO)
		osd_req_op_extent_osd_data_bio(osd_request, num_ops,
					obj_request->bio_list, length);
	else if (obj_request->type == OBJ_REQUEST_PAGES)
		osd_req_op_extent_osd_data_pages(osd_request, num_ops,
					obj_request->pages, length,
					offset & ~PAGE_MASK, false, false);

	/* Discards are also writes */
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
		rbd_osd_req_format_write(obj_request);
	else
		rbd_osd_req_format_read(obj_request);
}

2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
/*
 * Split up an image request into one or more object requests, each
 * to a different object.  The "type" parameter indicates whether
 * "data_desc" is the pointer to the head of a list of bio
 * structures, or the base of a page array.  In either case this
 * function assumes data_desc describes memory sufficient to hold
 * all data described by the image request.
 */
static int rbd_img_request_fill(struct rbd_img_request *img_request,
					enum obj_request_type type,
					void *data_desc)
A
Alex Elder 已提交
2451 2452 2453 2454
{
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	struct rbd_obj_request *obj_request = NULL;
	struct rbd_obj_request *next_obj_request;
J
Jingoo Han 已提交
2455
	struct bio *bio_list = NULL;
2456
	unsigned int bio_offset = 0;
J
Jingoo Han 已提交
2457
	struct page **pages = NULL;
G
Guangliang Zhao 已提交
2458
	enum obj_operation_type op_type;
2459
	u64 img_offset;
A
Alex Elder 已提交
2460 2461
	u64 resid;

2462 2463
	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
		(int)type, data_desc);
A
Alex Elder 已提交
2464

2465
	img_offset = img_request->offset;
A
Alex Elder 已提交
2466
	resid = img_request->length;
A
Alex Elder 已提交
2467
	rbd_assert(resid > 0);
2468
	op_type = rbd_img_request_op_type(img_request);
2469 2470 2471

	if (type == OBJ_REQUEST_BIO) {
		bio_list = data_desc;
2472 2473
		rbd_assert(img_offset ==
			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2474
	} else if (type == OBJ_REQUEST_PAGES) {
2475 2476 2477
		pages = data_desc;
	}

A
Alex Elder 已提交
2478
	while (resid) {
2479
		struct ceph_osd_request *osd_req;
A
Alex Elder 已提交
2480 2481 2482 2483
		const char *object_name;
		u64 offset;
		u64 length;

2484
		object_name = rbd_segment_name(rbd_dev, img_offset);
A
Alex Elder 已提交
2485 2486
		if (!object_name)
			goto out_unwind;
2487 2488
		offset = rbd_segment_offset(rbd_dev, img_offset);
		length = rbd_segment_length(rbd_dev, img_offset, resid);
A
Alex Elder 已提交
2489
		obj_request = rbd_obj_request_create(object_name,
2490
						offset, length, type);
2491 2492
		/* object request has its own copy of the object name */
		rbd_segment_name_free(object_name);
A
Alex Elder 已提交
2493 2494
		if (!obj_request)
			goto out_unwind;
2495

2496 2497 2498 2499 2500
		/*
		 * set obj_request->img_request before creating the
		 * osd_request so that it gets the right snapc
		 */
		rbd_img_obj_request_add(img_request, obj_request);
A
Alex Elder 已提交
2501

2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
		if (type == OBJ_REQUEST_BIO) {
			unsigned int clone_size;

			rbd_assert(length <= (u64)UINT_MAX);
			clone_size = (unsigned int)length;
			obj_request->bio_list =
					bio_chain_clone_range(&bio_list,
								&bio_offset,
								clone_size,
								GFP_ATOMIC);
			if (!obj_request->bio_list)
2513
				goto out_unwind;
2514
		} else if (type == OBJ_REQUEST_PAGES) {
2515 2516 2517 2518 2519 2520 2521 2522 2523
			unsigned int page_count;

			obj_request->pages = pages;
			page_count = (u32)calc_pages_for(offset, length);
			obj_request->page_count = page_count;
			if ((offset + length) & ~PAGE_MASK)
				page_count--;	/* more on last page */
			pages += page_count;
		}
A
Alex Elder 已提交
2524

G
Guangliang Zhao 已提交
2525 2526 2527
		osd_req = rbd_osd_req_create(rbd_dev, op_type,
					(op_type == OBJ_OP_WRITE) ? 2 : 1,
					obj_request);
2528
		if (!osd_req)
2529
			goto out_unwind;
2530

2531
		obj_request->osd_req = osd_req;
2532
		obj_request->callback = rbd_img_obj_callback;
2533
		obj_request->img_offset = img_offset;
2534

2535
		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
A
Alex Elder 已提交
2536

2537
		rbd_img_request_get(img_request);
A
Alex Elder 已提交
2538

2539
		img_offset += length;
A
Alex Elder 已提交
2540 2541 2542 2543 2544 2545 2546
		resid -= length;
	}

	return 0;

out_unwind:
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2547
		rbd_img_obj_request_del(img_request, obj_request);
A
Alex Elder 已提交
2548 2549 2550 2551

	return -ENOMEM;
}

2552
static void
I
Ilya Dryomov 已提交
2553
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2554 2555 2556
{
	struct rbd_img_request *img_request;
	struct rbd_device *rbd_dev;
2557
	struct page **pages;
2558 2559
	u32 page_count;

I
Ilya Dryomov 已提交
2560 2561
	dout("%s: obj %p\n", __func__, obj_request);

2562 2563
	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
		obj_request->type == OBJ_REQUEST_NODATA);
2564 2565 2566 2567 2568 2569 2570
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);

	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev);

2571 2572
	pages = obj_request->copyup_pages;
	rbd_assert(pages != NULL);
2573
	obj_request->copyup_pages = NULL;
2574 2575 2576 2577
	page_count = obj_request->copyup_page_count;
	rbd_assert(page_count);
	obj_request->copyup_page_count = 0;
	ceph_release_page_vector(pages, page_count);
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587

	/*
	 * We want the transfer count to reflect the size of the
	 * original write request.  There is no such thing as a
	 * successful short write, so if the request was successful
	 * we can just set it to the originally-requested length.
	 */
	if (!obj_request->result)
		obj_request->xferred = obj_request->length;

I
Ilya Dryomov 已提交
2588
	obj_request_done_set(obj_request);
2589 2590
}

2591 2592 2593 2594
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *orig_request;
2595 2596 2597
	struct ceph_osd_request *osd_req;
	struct ceph_osd_client *osdc;
	struct rbd_device *rbd_dev;
2598
	struct page **pages;
2599
	enum obj_operation_type op_type;
2600
	u32 page_count;
2601
	int img_result;
2602
	u64 parent_length;
2603 2604 2605 2606 2607 2608 2609 2610

	rbd_assert(img_request_child_test(img_request));

	/* First get what we need from the image request */

	pages = img_request->copyup_pages;
	rbd_assert(pages != NULL);
	img_request->copyup_pages = NULL;
2611 2612 2613
	page_count = img_request->copyup_page_count;
	rbd_assert(page_count);
	img_request->copyup_page_count = 0;
2614 2615 2616

	orig_request = img_request->obj_request;
	rbd_assert(orig_request != NULL);
2617
	rbd_assert(obj_request_type_valid(orig_request->type));
2618
	img_result = img_request->result;
2619 2620
	parent_length = img_request->length;
	rbd_assert(parent_length == img_request->xferred);
2621
	rbd_img_request_put(img_request);
2622

2623 2624
	rbd_assert(orig_request->img_request);
	rbd_dev = orig_request->img_request->rbd_dev;
2625 2626
	rbd_assert(rbd_dev);

2627 2628 2629 2630 2631 2632 2633
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;
2634

2635 2636 2637 2638 2639 2640
		ceph_release_page_vector(pages, page_count);
		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, orig_request);
		if (!img_result)
			return;
	}
2641

2642
	if (img_result)
2643 2644
		goto out_err;

2645 2646
	/*
	 * The original osd request is of no use to use any more.
2647
	 * We need a new one that can hold the three ops in a copyup
2648 2649 2650
	 * request.  Allocate the new copyup osd request for the
	 * original request, and release the old one.
	 */
2651
	img_result = -ENOMEM;
2652 2653 2654
	osd_req = rbd_osd_req_create_copyup(orig_request);
	if (!osd_req)
		goto out_err;
2655
	rbd_osd_req_destroy(orig_request->osd_req);
2656 2657
	orig_request->osd_req = osd_req;
	orig_request->copyup_pages = pages;
2658
	orig_request->copyup_page_count = page_count;
2659

2660
	/* Initialize the copyup op */
2661

2662
	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2663
	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2664
						false, false);
2665

2666
	/* Add the other op(s) */
2667

2668 2669
	op_type = rbd_img_request_op_type(orig_request->img_request);
	rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2670 2671 2672 2673

	/* All set, send it off. */

	osdc = &rbd_dev->rbd_client->client->osdc;
2674 2675
	img_result = rbd_obj_request_submit(osdc, orig_request);
	if (!img_result)
2676 2677 2678 2679
		return;
out_err:
	/* Record the error code and complete the request */

2680
	orig_request->result = img_result;
2681 2682 2683
	orig_request->xferred = 0;
	obj_request_done_set(orig_request);
	rbd_obj_request_complete(orig_request);
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
}

/*
 * Read from the parent image the range of data that covers the
 * entire target of the given object request.  This is used for
 * satisfying a layered image write request when the target of an
 * object request from the image request does not exist.
 *
 * A page array big enough to hold the returned data is allocated
 * and supplied to rbd_img_request_fill() as the "data descriptor."
 * When the read completes, this page array will be transferred to
 * the original object request for the copyup operation.
 *
 * If an error occurs, record it as the result of the original
 * object request and mark it done so it gets completed.
 */
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = NULL;
	struct rbd_img_request *parent_request = NULL;
	struct rbd_device *rbd_dev;
	u64 img_offset;
	u64 length;
	struct page **pages = NULL;
	u32 page_count;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
2712
	rbd_assert(obj_request_type_valid(obj_request->type));
2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725

	img_request = obj_request->img_request;
	rbd_assert(img_request != NULL);
	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev->parent != NULL);

	/*
	 * Determine the byte range covered by the object in the
	 * child image to which the original request was to be sent.
	 */
	img_offset = obj_request->img_offset - obj_request->offset;
	length = (u64)1 << rbd_dev->header.obj_order;

A
Alex Elder 已提交
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
	/*
	 * There is no defined parent data beyond the parent
	 * overlap, so limit what we read at that boundary if
	 * necessary.
	 */
	if (img_offset + length > rbd_dev->parent_overlap) {
		rbd_assert(img_offset < rbd_dev->parent_overlap);
		length = rbd_dev->parent_overlap - img_offset;
	}

2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
	/*
	 * Allocate a page array big enough to receive the data read
	 * from the parent.
	 */
	page_count = (u32)calc_pages_for(0, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages)) {
		result = PTR_ERR(pages);
		pages = NULL;
		goto out_err;
	}

	result = -ENOMEM;
2749 2750
	parent_request = rbd_parent_request_create(obj_request,
						img_offset, length);
2751 2752 2753 2754 2755 2756 2757
	if (!parent_request)
		goto out_err;

	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
	if (result)
		goto out_err;
	parent_request->copyup_pages = pages;
2758
	parent_request->copyup_page_count = page_count;
2759 2760 2761 2762 2763 2764 2765

	parent_request->callback = rbd_img_obj_parent_read_full_callback;
	result = rbd_img_request_submit(parent_request);
	if (!result)
		return 0;

	parent_request->copyup_pages = NULL;
2766
	parent_request->copyup_page_count = 0;
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
	parent_request->obj_request = NULL;
	rbd_obj_request_put(obj_request);
out_err:
	if (pages)
		ceph_release_page_vector(pages, page_count);
	if (parent_request)
		rbd_img_request_put(parent_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);

	return result;
}

2781 2782 2783
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *orig_request;
2784
	struct rbd_device *rbd_dev;
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
	int result;

	rbd_assert(!obj_request_img_data_test(obj_request));

	/*
	 * All we need from the object request is the original
	 * request and the result of the STAT op.  Grab those, then
	 * we're done with the request.
	 */
	orig_request = obj_request->obj_request;
	obj_request->obj_request = NULL;
2796
	rbd_obj_request_put(orig_request);
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	rbd_assert(orig_request);
	rbd_assert(orig_request->img_request);

	result = obj_request->result;
	obj_request->result = 0;

	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
		obj_request, orig_request, result,
		obj_request->xferred, obj_request->length);
	rbd_obj_request_put(obj_request);

2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	rbd_dev = orig_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		result = rbd_obj_request_submit(osdc, orig_request);
		if (!result)
			return;
	}
2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834

	/*
	 * Our only purpose here is to determine whether the object
	 * exists, and we don't want to treat the non-existence as
	 * an error.  If something else comes back, transfer the
	 * error to the original request and complete it now.
	 */
	if (!result) {
		obj_request_existence_set(orig_request, true);
	} else if (result == -ENOENT) {
		obj_request_existence_set(orig_request, false);
	} else if (result) {
		orig_request->result = result;
2835
		goto out;
2836 2837 2838 2839 2840 2841
	}

	/*
	 * Resubmit the original request now that we have recorded
	 * whether the target object exists.
	 */
2842
	orig_request->result = rbd_img_obj_request_submit(orig_request);
2843
out:
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
	if (orig_request->result)
		rbd_obj_request_complete(orig_request);
}

static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *stat_request;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct page **pages = NULL;
	u32 page_count;
	size_t size;
	int ret;

	/*
	 * The response data for a STAT call consists of:
	 *     le64 length;
	 *     struct {
	 *         le32 tv_sec;
	 *         le32 tv_nsec;
	 *     } mtime;
	 */
	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
	page_count = (u32)calc_pages_for(0, size);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
							OBJ_REQUEST_PAGES);
	if (!stat_request)
		goto out;

	rbd_obj_request_get(obj_request);
	stat_request->obj_request = obj_request;
	stat_request->pages = pages;
	stat_request->page_count = page_count;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;
G
Guangliang Zhao 已提交
2885
	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2886
						   stat_request);
2887 2888 2889 2890
	if (!stat_request->osd_req)
		goto out;
	stat_request->callback = rbd_img_obj_exists_callback;

2891
	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2892 2893
	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
					false, false);
2894
	rbd_osd_req_format_read(stat_request);
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904

	osdc = &rbd_dev->rbd_client->client->osdc;
	ret = rbd_obj_request_submit(osdc, stat_request);
out:
	if (ret)
		rbd_obj_request_put(obj_request);

	return ret;
}

2905
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2906 2907
{
	struct rbd_img_request *img_request;
A
Alex Elder 已提交
2908
	struct rbd_device *rbd_dev;
2909 2910 2911 2912 2913

	rbd_assert(obj_request_img_data_test(obj_request));

	img_request = obj_request->img_request;
	rbd_assert(img_request);
A
Alex Elder 已提交
2914
	rbd_dev = img_request->rbd_dev;
2915

2916
	/* Reads */
2917 2918
	if (!img_request_write_test(img_request) &&
	    !img_request_discard_test(img_request))
2919 2920 2921 2922 2923 2924
		return true;

	/* Non-layered writes */
	if (!img_request_layered_test(img_request))
		return true;

2925
	/*
2926 2927
	 * Layered writes outside of the parent overlap range don't
	 * share any data with the parent.
2928
	 */
2929 2930
	if (!obj_request_overlaps_parent(obj_request))
		return true;
2931

2932 2933 2934 2935 2936 2937 2938 2939
	/*
	 * Entire-object layered writes - we will overwrite whatever
	 * parent data there is anyway.
	 */
	if (!obj_request->offset &&
	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
		return true;

2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
	/*
	 * If the object is known to already exist, its parent data has
	 * already been copied.
	 */
	if (obj_request_known_test(obj_request) &&
	    obj_request_exists_test(obj_request))
		return true;

	return false;
}

static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
	if (img_obj_request_simple(obj_request)) {
2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		struct rbd_device *rbd_dev;
		struct ceph_osd_client *osdc;

		rbd_dev = obj_request->img_request->rbd_dev;
		osdc = &rbd_dev->rbd_client->client->osdc;

		return rbd_obj_request_submit(osdc, obj_request);
	}

	/*
2964 2965 2966 2967
	 * It's a layered write.  The target object might exist but
	 * we may not know that yet.  If we know it doesn't exist,
	 * start by reading the data for the full target object from
	 * the parent so we can use it for a copyup to the target.
2968
	 */
2969
	if (obj_request_known_test(obj_request))
2970 2971 2972
		return rbd_img_obj_parent_read_full(obj_request);

	/* We don't know whether the target exists.  Go find out. */
2973 2974 2975 2976

	return rbd_img_obj_exists_submit(obj_request);
}

A
Alex Elder 已提交
2977 2978 2979
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
2980
	struct rbd_obj_request *next_obj_request;
A
Alex Elder 已提交
2981

A
Alex Elder 已提交
2982
	dout("%s: img %p\n", __func__, img_request);
2983
	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
A
Alex Elder 已提交
2984 2985
		int ret;

2986
		ret = rbd_img_obj_request_submit(obj_request);
A
Alex Elder 已提交
2987 2988 2989 2990 2991 2992
		if (ret)
			return ret;
	}

	return 0;
}
A
Alex Elder 已提交
2993 2994 2995 2996

static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
A
Alex Elder 已提交
2997 2998
	struct rbd_device *rbd_dev;
	u64 obj_end;
2999 3000
	u64 img_xferred;
	int img_result;
A
Alex Elder 已提交
3001 3002 3003

	rbd_assert(img_request_child_test(img_request));

3004 3005
	/* First get what we need from the image request and release it */

A
Alex Elder 已提交
3006
	obj_request = img_request->obj_request;
3007 3008 3009 3010 3011 3012 3013 3014 3015
	img_xferred = img_request->xferred;
	img_result = img_request->result;
	rbd_img_request_put(img_request);

	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to re-submit the
	 * original request.
	 */
A
Alex Elder 已提交
3016 3017
	rbd_assert(obj_request);
	rbd_assert(obj_request->img_request);
3018 3019 3020 3021 3022 3023 3024 3025 3026
	rbd_dev = obj_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, obj_request);
		if (!img_result)
			return;
	}
A
Alex Elder 已提交
3027

3028
	obj_request->result = img_result;
A
Alex Elder 已提交
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
	if (obj_request->result)
		goto out;

	/*
	 * We need to zero anything beyond the parent overlap
	 * boundary.  Since rbd_img_obj_request_read_callback()
	 * will zero anything beyond the end of a short read, an
	 * easy way to do this is to pretend the data from the
	 * parent came up short--ending at the overlap boundary.
	 */
	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
	obj_end = obj_request->img_offset + obj_request->length;
	if (obj_end > rbd_dev->parent_overlap) {
		u64 xferred = 0;

		if (obj_request->img_offset < rbd_dev->parent_overlap)
			xferred = rbd_dev->parent_overlap -
					obj_request->img_offset;
A
Alex Elder 已提交
3047

3048
		obj_request->xferred = min(img_xferred, xferred);
A
Alex Elder 已提交
3049
	} else {
3050
		obj_request->xferred = img_xferred;
A
Alex Elder 已提交
3051 3052
	}
out:
A
Alex Elder 已提交
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064
	rbd_img_obj_request_read_callback(obj_request);
	rbd_obj_request_complete(obj_request);
}

static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
	rbd_assert(obj_request->img_request != NULL);
	rbd_assert(obj_request->result == (s32) -ENOENT);
3065
	rbd_assert(obj_request_type_valid(obj_request->type));
A
Alex Elder 已提交
3066 3067

	/* rbd_read_finish(obj_request, obj_request->length); */
3068
	img_request = rbd_parent_request_create(obj_request,
A
Alex Elder 已提交
3069
						obj_request->img_offset,
3070
						obj_request->length);
A
Alex Elder 已提交
3071 3072 3073 3074
	result = -ENOMEM;
	if (!img_request)
		goto out_err;

3075 3076 3077 3078 3079 3080
	if (obj_request->type == OBJ_REQUEST_BIO)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
						obj_request->bio_list);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
						obj_request->pages);
A
Alex Elder 已提交
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
	if (result)
		goto out_err;

	img_request->callback = rbd_img_parent_read_callback;
	result = rbd_img_request_submit(img_request);
	if (result)
		goto out_err;

	return;
out_err:
	if (img_request)
		rbd_img_request_put(img_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);
}
A
Alex Elder 已提交
3097

3098
static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
A
Alex Elder 已提交
3099 3100
{
	struct rbd_obj_request *obj_request;
3101
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
A
Alex Elder 已提交
3102 3103 3104 3105 3106 3107 3108 3109
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
							OBJ_REQUEST_NODATA);
	if (!obj_request)
		return -ENOMEM;

	ret = -ENOMEM;
G
Guangliang Zhao 已提交
3110
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3111
						  obj_request);
A
Alex Elder 已提交
3112 3113 3114
	if (!obj_request->osd_req)
		goto out;

3115
	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
A
Alex Elder 已提交
3116
					notify_id, 0, 0);
3117
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3118

A
Alex Elder 已提交
3119
	ret = rbd_obj_request_submit(osdc, obj_request);
A
Alex Elder 已提交
3120
	if (ret)
3121 3122 3123 3124
		goto out;
	ret = rbd_obj_request_wait(obj_request);
out:
	rbd_obj_request_put(obj_request);
A
Alex Elder 已提交
3125 3126 3127 3128 3129 3130 3131

	return ret;
}

static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
	struct rbd_device *rbd_dev = (struct rbd_device *)data;
3132
	int ret;
A
Alex Elder 已提交
3133 3134 3135 3136

	if (!rbd_dev)
		return;

A
Alex Elder 已提交
3137
	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
A
Alex Elder 已提交
3138 3139
		rbd_dev->header_name, (unsigned long long)notify_id,
		(unsigned int)opcode);
3140 3141 3142 3143 3144 3145 3146

	/*
	 * Until adequate refresh error handling is in place, there is
	 * not much we can do here, except warn.
	 *
	 * See http://tracker.ceph.com/issues/5040
	 */
3147 3148
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
3149
		rbd_warn(rbd_dev, "refresh failed: %d", ret);
A
Alex Elder 已提交
3150

3151 3152
	ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
	if (ret)
3153
		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
A
Alex Elder 已提交
3154 3155
}

3156 3157 3158 3159 3160 3161 3162 3163 3164
/*
 * Send a (un)watch request and wait for the ack.  Return a request
 * with a ref held on success or error.
 */
static struct rbd_obj_request *rbd_obj_watch_request_helper(
						struct rbd_device *rbd_dev,
						bool watch)
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3165
	struct ceph_options *opts = osdc->client->options;
3166 3167 3168 3169 3170 3171 3172 3173
	struct rbd_obj_request *obj_request;
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
					     OBJ_REQUEST_NODATA);
	if (!obj_request)
		return ERR_PTR(-ENOMEM);

G
Guangliang Zhao 已提交
3174
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191
						  obj_request);
	if (!obj_request->osd_req) {
		ret = -ENOMEM;
		goto out;
	}

	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
			      rbd_dev->watch_event->cookie, 0, watch);
	rbd_osd_req_format_write(obj_request);

	if (watch)
		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);

	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;

3192
	ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret) {
		if (watch)
			rbd_obj_request_end(obj_request);
		goto out;
	}

	return obj_request;

out:
	rbd_obj_request_put(obj_request);
	return ERR_PTR(ret);
}

3210
/*
3211
 * Initiate a watch request, synchronously.
3212
 */
3213
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3214 3215 3216 3217 3218
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_obj_request *obj_request;
	int ret;

3219 3220
	rbd_assert(!rbd_dev->watch_event);
	rbd_assert(!rbd_dev->watch_request);
3221

3222 3223 3224 3225 3226
	ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
				     &rbd_dev->watch_event);
	if (ret < 0)
		return ret;

3227 3228 3229 3230 3231
	obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
	if (IS_ERR(obj_request)) {
		ceph_osdc_cancel_event(rbd_dev->watch_event);
		rbd_dev->watch_event = NULL;
		return PTR_ERR(obj_request);
3232
	}
3233

3234 3235 3236 3237
	/*
	 * A watch request is set to linger, so the underlying osd
	 * request won't go away until we unregister it.  We retain
	 * a pointer to the object request during that time (in
3238 3239 3240
	 * rbd_dev->watch_request), so we'll keep a reference to it.
	 * We'll drop that reference after we've unregistered it in
	 * rbd_dev_header_unwatch_sync().
3241
	 */
3242
	rbd_dev->watch_request = obj_request;
3243

3244 3245 3246 3247 3248 3249
	return 0;
}

/*
 * Tear down a watch request, synchronously.
 */
3250
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3251 3252 3253 3254 3255 3256
{
	struct rbd_obj_request *obj_request;

	rbd_assert(rbd_dev->watch_event);
	rbd_assert(rbd_dev->watch_request);

3257
	rbd_obj_request_end(rbd_dev->watch_request);
3258 3259
	rbd_obj_request_put(rbd_dev->watch_request);
	rbd_dev->watch_request = NULL;
3260

3261 3262 3263 3264 3265 3266 3267
	obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
	if (!IS_ERR(obj_request))
		rbd_obj_request_put(obj_request);
	else
		rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
			 PTR_ERR(obj_request));

3268 3269
	ceph_osdc_cancel_event(rbd_dev->watch_event);
	rbd_dev->watch_event = NULL;
3270 3271
}

3272
/*
3273 3274
 * Synchronous osd object method call.  Returns the number of bytes
 * returned in the outbound buffer, or a negative error code.
3275 3276 3277 3278 3279
 */
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
			     const char *object_name,
			     const char *class_name,
			     const char *method_name,
3280
			     const void *outbound,
3281
			     size_t outbound_size,
3282
			     void *inbound,
3283
			     size_t inbound_size)
3284
{
3285
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3286 3287 3288 3289 3290 3291
	struct rbd_obj_request *obj_request;
	struct page **pages;
	u32 page_count;
	int ret;

	/*
3292 3293 3294 3295 3296
	 * Method calls are ultimately read operations.  The result
	 * should placed into the inbound buffer provided.  They
	 * also supply outbound data--parameters for the object
	 * method.  Currently if this is present it will be a
	 * snapshot id.
3297
	 */
3298
	page_count = (u32)calc_pages_for(0, inbound_size);
3299 3300 3301 3302 3303
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
3304
	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3305 3306 3307 3308 3309 3310 3311
							OBJ_REQUEST_PAGES);
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3312
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3313
						  obj_request);
3314 3315 3316
	if (!obj_request->osd_req)
		goto out;

3317
	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
					class_name, method_name);
	if (outbound_size) {
		struct ceph_pagelist *pagelist;

		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
		if (!pagelist)
			goto out;

		ceph_pagelist_init(pagelist);
		ceph_pagelist_append(pagelist, outbound, outbound_size);
		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
						pagelist);
	}
3331 3332
	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
					obj_request->pages, inbound_size,
3333
					0, false, false);
3334
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3335

3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3346 3347 3348

	rbd_assert(obj_request->xferred < (u64)INT_MAX);
	ret = (int)obj_request->xferred;
3349
	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3350 3351 3352 3353 3354 3355 3356 3357 3358
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

C
Christoph Hellwig 已提交
3359
static void rbd_queue_workfn(struct work_struct *work)
A
Alex Elder 已提交
3360
{
C
Christoph Hellwig 已提交
3361 3362
	struct request *rq = blk_mq_rq_from_pdu(work);
	struct rbd_device *rbd_dev = rq->q->queuedata;
I
Ilya Dryomov 已提交
3363
	struct rbd_img_request *img_request;
3364
	struct ceph_snap_context *snapc = NULL;
I
Ilya Dryomov 已提交
3365 3366
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
G
Guangliang Zhao 已提交
3367
	enum obj_operation_type op_type;
3368
	u64 mapping_size;
A
Alex Elder 已提交
3369 3370
	int result;

C
Christoph Hellwig 已提交
3371 3372 3373 3374 3375 3376 3377
	if (rq->cmd_type != REQ_TYPE_FS) {
		dout("%s: non-fs request type %d\n", __func__,
			(int) rq->cmd_type);
		result = -EIO;
		goto err;
	}

3378 3379 3380
	if (rq->cmd_flags & REQ_DISCARD)
		op_type = OBJ_OP_DISCARD;
	else if (rq->cmd_flags & REQ_WRITE)
G
Guangliang Zhao 已提交
3381 3382 3383 3384
		op_type = OBJ_OP_WRITE;
	else
		op_type = OBJ_OP_READ;

I
Ilya Dryomov 已提交
3385
	/* Ignore/skip any zero-length requests */
A
Alex Elder 已提交
3386

I
Ilya Dryomov 已提交
3387 3388 3389 3390 3391
	if (!length) {
		dout("%s: zero-length request\n", __func__);
		result = 0;
		goto err_rq;
	}
A
Alex Elder 已提交
3392

G
Guangliang Zhao 已提交
3393
	/* Only reads are allowed to a read-only device */
I
Ilya Dryomov 已提交
3394

G
Guangliang Zhao 已提交
3395
	if (op_type != OBJ_OP_READ) {
I
Ilya Dryomov 已提交
3396 3397 3398
		if (rbd_dev->mapping.read_only) {
			result = -EROFS;
			goto err_rq;
A
Alex Elder 已提交
3399
		}
I
Ilya Dryomov 已提交
3400 3401
		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
	}
A
Alex Elder 已提交
3402

I
Ilya Dryomov 已提交
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
	/*
	 * Quit early if the mapped snapshot no longer exists.  It's
	 * still possible the snapshot will have disappeared by the
	 * time our request arrives at the osd, but there's no sense in
	 * sending it if we already know.
	 */
	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
		dout("request for non-existent snapshot");
		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
		result = -ENXIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3415

I
Ilya Dryomov 已提交
3416 3417 3418 3419 3420 3421
	if (offset && length > U64_MAX - offset + 1) {
		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
			 length);
		result = -EINVAL;
		goto err_rq;	/* Shouldn't happen */
	}
A
Alex Elder 已提交
3422

C
Christoph Hellwig 已提交
3423 3424
	blk_mq_start_request(rq);

3425 3426
	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
G
Guangliang Zhao 已提交
3427
	if (op_type != OBJ_OP_READ) {
3428 3429 3430 3431 3432 3433
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
I
Ilya Dryomov 已提交
3434
		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3435
			 length, mapping_size);
I
Ilya Dryomov 已提交
3436 3437 3438
		result = -EIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3439

G
Guangliang Zhao 已提交
3440
	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3441
					     snapc);
I
Ilya Dryomov 已提交
3442 3443 3444 3445 3446
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
A
Alex Elder 已提交
3447

3448 3449 3450 3451 3452 3453
	if (op_type == OBJ_OP_DISCARD)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
					      NULL);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
					      rq->bio);
I
Ilya Dryomov 已提交
3454 3455
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3456

I
Ilya Dryomov 已提交
3457 3458 3459
	result = rbd_img_request_submit(img_request);
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3460

I
Ilya Dryomov 已提交
3461
	return;
A
Alex Elder 已提交
3462

I
Ilya Dryomov 已提交
3463 3464 3465 3466 3467
err_img_request:
	rbd_img_request_put(img_request);
err_rq:
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
G
Guangliang Zhao 已提交
3468
			 obj_op_name(op_type), length, offset, result);
3469
	ceph_put_snap_context(snapc);
C
Christoph Hellwig 已提交
3470 3471
err:
	blk_mq_end_request(rq, result);
I
Ilya Dryomov 已提交
3472
}
A
Alex Elder 已提交
3473

C
Christoph Hellwig 已提交
3474 3475
static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
I
Ilya Dryomov 已提交
3476
{
C
Christoph Hellwig 已提交
3477 3478
	struct request *rq = bd->rq;
	struct work_struct *work = blk_mq_rq_to_pdu(rq);
A
Alex Elder 已提交
3479

C
Christoph Hellwig 已提交
3480 3481
	queue_work(rbd_wq, work);
	return BLK_MQ_RQ_QUEUE_OK;
A
Alex Elder 已提交
3482 3483
}

3484 3485 3486 3487 3488 3489 3490
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk = rbd_dev->disk;

	if (!disk)
		return;

3491 3492
	rbd_dev->disk = NULL;
	if (disk->flags & GENHD_FL_UP) {
3493
		del_gendisk(disk);
3494 3495
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
C
Christoph Hellwig 已提交
3496
		blk_mq_free_tag_set(&rbd_dev->tag_set);
3497
	}
3498 3499 3500
	put_disk(disk);
}

3501 3502
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
				const char *object_name,
3503
				u64 offset, u64 length, void *buf)
3504 3505

{
3506
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3507 3508 3509
	struct rbd_obj_request *obj_request;
	struct page **pages = NULL;
	u32 page_count;
3510
	size_t size;
3511 3512 3513 3514 3515
	int ret;

	page_count = (u32) calc_pages_for(offset, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
3516
		return PTR_ERR(pages);
3517 3518 3519

	ret = -ENOMEM;
	obj_request = rbd_obj_request_create(object_name, offset, length,
3520
							OBJ_REQUEST_PAGES);
3521 3522 3523 3524 3525 3526
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3527
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3528
						  obj_request);
3529 3530 3531
	if (!obj_request->osd_req)
		goto out;

3532 3533
	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
					offset, length, 0, 0);
3534
	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3535
					obj_request->pages,
3536 3537 3538
					obj_request->length,
					obj_request->offset & ~PAGE_MASK,
					false, false);
3539
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3540

3541 3542 3543 3544 3545 3546 3547 3548 3549 3550
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3551 3552 3553

	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
	size = (size_t) obj_request->xferred;
3554
	ceph_copy_from_page_vector(pages, buf, 0, size);
3555 3556
	rbd_assert(size <= (size_t)INT_MAX);
	ret = (int)size;
3557 3558 3559 3560 3561 3562 3563 3564 3565
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

3566
/*
A
Alex Elder 已提交
3567 3568 3569
 * Read the complete header for the given rbd device.  On successful
 * return, the rbd_dev->header field will contain up-to-date
 * information about the image.
3570
 */
3571
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3572
{
3573
	struct rbd_image_header_ondisk *ondisk = NULL;
3574
	u32 snap_count = 0;
3575 3576 3577
	u64 names_size = 0;
	u32 want_count;
	int ret;
3578

A
Alex Elder 已提交
3579
	/*
3580 3581 3582 3583 3584
	 * The complete header will include an array of its 64-bit
	 * snapshot ids, followed by the names of those snapshots as
	 * a contiguous block of NUL-terminated strings.  Note that
	 * the number of snapshots could change by the time we read
	 * it in, in which case we re-read it.
A
Alex Elder 已提交
3585
	 */
3586 3587 3588 3589 3590 3591 3592 3593 3594 3595
	do {
		size_t size;

		kfree(ondisk);

		size = sizeof (*ondisk);
		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
		size += names_size;
		ondisk = kmalloc(size, GFP_KERNEL);
		if (!ondisk)
A
Alex Elder 已提交
3596
			return -ENOMEM;
3597

3598
		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3599
				       0, size, ondisk);
3600
		if (ret < 0)
A
Alex Elder 已提交
3601
			goto out;
A
Alex Elder 已提交
3602
		if ((size_t)ret < size) {
3603
			ret = -ENXIO;
A
Alex Elder 已提交
3604 3605
			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
				size, ret);
A
Alex Elder 已提交
3606
			goto out;
3607 3608 3609
		}
		if (!rbd_dev_ondisk_valid(ondisk)) {
			ret = -ENXIO;
A
Alex Elder 已提交
3610
			rbd_warn(rbd_dev, "invalid header");
A
Alex Elder 已提交
3611
			goto out;
3612
		}
3613

3614 3615 3616 3617
		names_size = le64_to_cpu(ondisk->snap_names_len);
		want_count = snap_count;
		snap_count = le32_to_cpu(ondisk->snap_count);
	} while (snap_count != want_count);
A
Alex Elder 已提交
3618

A
Alex Elder 已提交
3619 3620
	ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
3621 3622 3623
	kfree(ondisk);

	return ret;
3624 3625
}

3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644
/*
 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
 * has disappeared from the (just updated) snapshot context.
 */
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
	u64 snap_id;

	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
		return;

	snap_id = rbd_dev->spec->snap_id;
	if (snap_id == CEPH_NOSNAP)
		return;

	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}

3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
	sector_t size;
	bool removing;

	/*
	 * Don't hold the lock while doing disk operations,
	 * or lock ordering will conflict with the bdev mutex via:
	 * rbd_add() -> blkdev_get() -> rbd_open()
	 */
	spin_lock_irq(&rbd_dev->lock);
	removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
	spin_unlock_irq(&rbd_dev->lock);
	/*
	 * If the device is being removed, rbd_dev->disk has
	 * been destroyed, so don't try to update its size
	 */
	if (!removing) {
		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
		dout("setting size to %llu sectors", (unsigned long long)size);
		set_capacity(rbd_dev->disk, size);
		revalidate_disk(rbd_dev->disk);
	}
}

A
Alex Elder 已提交
3670
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
3671
{
3672
	u64 mapping_size;
A
Alex Elder 已提交
3673 3674
	int ret;

3675
	down_write(&rbd_dev->header_rwsem);
3676
	mapping_size = rbd_dev->mapping.size;
3677 3678

	ret = rbd_dev_header_info(rbd_dev);
3679
	if (ret)
3680
		goto out;
3681

3682 3683 3684 3685 3686 3687 3688
	/*
	 * If there is a parent, see if it has disappeared due to the
	 * mapped image getting flattened.
	 */
	if (rbd_dev->parent) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
3689
			goto out;
3690 3691
	}

3692
	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3693
		rbd_dev->mapping.size = rbd_dev->header.image_size;
3694 3695 3696 3697
	} else {
		/* validate mapped snapshot's EXISTS flag */
		rbd_exists_validate(rbd_dev);
	}
3698

3699
out:
3700
	up_write(&rbd_dev->header_rwsem);
3701
	if (!ret && mapping_size != rbd_dev->mapping.size)
3702
		rbd_dev_update_size(rbd_dev);
A
Alex Elder 已提交
3703

3704
	return ret;
A
Alex Elder 已提交
3705 3706
}

C
Christoph Hellwig 已提交
3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
static int rbd_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
{
	struct work_struct *work = blk_mq_rq_to_pdu(rq);

	INIT_WORK(work, rbd_queue_workfn);
	return 0;
}

static struct blk_mq_ops rbd_mq_ops = {
	.queue_rq	= rbd_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_request	= rbd_init_request,
};

3723 3724 3725 3726
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk;
	struct request_queue *q;
A
Alex Elder 已提交
3727
	u64 segment_size;
C
Christoph Hellwig 已提交
3728
	int err;
3729 3730

	/* create gendisk info */
3731 3732 3733
	disk = alloc_disk(single_major ?
			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
			  RBD_MINORS_PER_MAJOR);
3734
	if (!disk)
A
Alex Elder 已提交
3735
		return -ENOMEM;
3736

A
Alex Elder 已提交
3737
	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
A
Alex Elder 已提交
3738
		 rbd_dev->dev_id);
3739
	disk->major = rbd_dev->major;
3740
	disk->first_minor = rbd_dev->minor;
3741 3742
	if (single_major)
		disk->flags |= GENHD_FL_EXT_DEVT;
3743 3744 3745
	disk->fops = &rbd_bd_ops;
	disk->private_data = rbd_dev;

C
Christoph Hellwig 已提交
3746 3747
	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
	rbd_dev->tag_set.ops = &rbd_mq_ops;
I
Ilya Dryomov 已提交
3748
	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
C
Christoph Hellwig 已提交
3749
	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
I
Ilya Dryomov 已提交
3750
	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
C
Christoph Hellwig 已提交
3751 3752 3753 3754 3755
	rbd_dev->tag_set.nr_hw_queues = 1;
	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);

	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
	if (err)
3756
		goto out_disk;
3757

C
Christoph Hellwig 已提交
3758 3759 3760 3761 3762 3763
	q = blk_mq_init_queue(&rbd_dev->tag_set);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}

3764 3765
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
A
Alex Elder 已提交
3766

3767
	/* set io sizes to object size */
A
Alex Elder 已提交
3768 3769
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
I
Ilya Dryomov 已提交
3770
	q->limits.max_sectors = queue_max_hw_sectors(q);
I
Ilya Dryomov 已提交
3771
	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
A
Alex Elder 已提交
3772 3773 3774
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);
3775

3776 3777 3778 3779
	/* enable the discard support */
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.discard_granularity = segment_size;
	q->limits.discard_alignment = segment_size;
3780
	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3781
	q->limits.discard_zeroes_data = 1;
3782

3783 3784 3785
	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;

3786 3787 3788 3789 3790 3791 3792
	disk->queue = q;

	q->queuedata = rbd_dev;

	rbd_dev->disk = disk;

	return 0;
C
Christoph Hellwig 已提交
3793 3794
out_tag_set:
	blk_mq_free_tag_set(&rbd_dev->tag_set);
3795 3796
out_disk:
	put_disk(disk);
C
Christoph Hellwig 已提交
3797
	return err;
3798 3799
}

3800 3801 3802 3803
/*
  sysfs
*/

A
Alex Elder 已提交
3804 3805 3806 3807 3808
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
	return container_of(dev, struct rbd_device, dev);
}

3809 3810 3811
static ssize_t rbd_size_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3812
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3813

A
Alex Elder 已提交
3814 3815
	return sprintf(buf, "%llu\n",
		(unsigned long long)rbd_dev->mapping.size);
3816 3817
}

A
Alex Elder 已提交
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827
/*
 * Note this shows the features for whatever's mapped, which is not
 * necessarily the base image.
 */
static ssize_t rbd_features_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

	return sprintf(buf, "0x%016llx\n",
A
Alex Elder 已提交
3828
			(unsigned long long)rbd_dev->mapping.features);
A
Alex Elder 已提交
3829 3830
}

3831 3832 3833
static ssize_t rbd_major_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3834
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3835

A
Alex Elder 已提交
3836 3837 3838 3839
	if (rbd_dev->major)
		return sprintf(buf, "%d\n", rbd_dev->major);

	return sprintf(buf, "(none)\n");
3840 3841 3842 3843 3844 3845
}

static ssize_t rbd_minor_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
A
Alex Elder 已提交
3846

3847
	return sprintf(buf, "%d\n", rbd_dev->minor);
3848 3849 3850 3851
}

static ssize_t rbd_client_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
3852
{
A
Alex Elder 已提交
3853
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3854

3855 3856
	return sprintf(buf, "client%lld\n",
			ceph_client_id(rbd_dev->rbd_client->client));
3857 3858
}

3859 3860
static ssize_t rbd_pool_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
3861
{
A
Alex Elder 已提交
3862
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3863

3864
	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3865 3866
}

3867 3868 3869 3870 3871
static ssize_t rbd_pool_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3872
	return sprintf(buf, "%llu\n",
A
Alex Elder 已提交
3873
			(unsigned long long) rbd_dev->spec->pool_id);
3874 3875
}

3876 3877 3878
static ssize_t rbd_name_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3879
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3880

A
Alex Elder 已提交
3881 3882 3883 3884
	if (rbd_dev->spec->image_name)
		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);

	return sprintf(buf, "(unknown)\n");
3885 3886
}

A
Alex Elder 已提交
3887 3888 3889 3890 3891
static ssize_t rbd_image_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3892
	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
A
Alex Elder 已提交
3893 3894
}

A
Alex Elder 已提交
3895 3896 3897 3898
/*
 * Shows the name of the currently-mapped snapshot (or
 * RBD_SNAP_HEAD_NAME for the base image).
 */
3899 3900 3901 3902
static ssize_t rbd_snap_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
A
Alex Elder 已提交
3903
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3904

3905
	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3906 3907
}

3908
/*
3909 3910 3911
 * For a v2 image, shows the chain of parent images, separated by empty
 * lines.  For v1 images or if there is no parent, shows "(no parent
 * image)".
3912 3913
 */
static ssize_t rbd_parent_show(struct device *dev,
3914 3915
			       struct device_attribute *attr,
			       char *buf)
3916 3917
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3918
	ssize_t count = 0;
3919

3920
	if (!rbd_dev->parent)
3921 3922
		return sprintf(buf, "(no parent image)\n");

3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
		struct rbd_spec *spec = rbd_dev->parent_spec;

		count += sprintf(&buf[count], "%s"
			    "pool_id %llu\npool_name %s\n"
			    "image_id %s\nimage_name %s\n"
			    "snap_id %llu\nsnap_name %s\n"
			    "overlap %llu\n",
			    !count ? "" : "\n", /* first? */
			    spec->pool_id, spec->pool_name,
			    spec->image_id, spec->image_name ?: "(unknown)",
			    spec->snap_id, spec->snap_name,
			    rbd_dev->parent_overlap);
	}

	return count;
3939 3940
}

3941 3942 3943 3944 3945
static ssize_t rbd_image_refresh(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t size)
{
A
Alex Elder 已提交
3946
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3947
	int ret;
3948

A
Alex Elder 已提交
3949
	ret = rbd_dev_refresh(rbd_dev);
3950
	if (ret)
3951
		return ret;
3952

3953
	return size;
3954
}
3955

3956
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
A
Alex Elder 已提交
3957
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3958
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3959
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3960 3961
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3962
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3963
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
A
Alex Elder 已提交
3964
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3965 3966
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3967
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3968 3969 3970

static struct attribute *rbd_attrs[] = {
	&dev_attr_size.attr,
A
Alex Elder 已提交
3971
	&dev_attr_features.attr,
3972
	&dev_attr_major.attr,
3973
	&dev_attr_minor.attr,
3974 3975
	&dev_attr_client_id.attr,
	&dev_attr_pool.attr,
3976
	&dev_attr_pool_id.attr,
3977
	&dev_attr_name.attr,
A
Alex Elder 已提交
3978
	&dev_attr_image_id.attr,
3979
	&dev_attr_current_snap.attr,
3980
	&dev_attr_parent.attr,
3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003
	&dev_attr_refresh.attr,
	NULL
};

static struct attribute_group rbd_attr_group = {
	.attrs = rbd_attrs,
};

static const struct attribute_group *rbd_attr_groups[] = {
	&rbd_attr_group,
	NULL
};

static void rbd_sysfs_dev_release(struct device *dev)
{
}

static struct device_type rbd_device_type = {
	.name		= "rbd",
	.groups		= rbd_attr_groups,
	.release	= rbd_sysfs_dev_release,
};

4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
	kref_get(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
	if (spec)
		kref_put(&spec->kref, rbd_spec_free);
}

static struct rbd_spec *rbd_spec_alloc(void)
{
	struct rbd_spec *spec;

	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
	if (!spec)
		return NULL;
4025 4026 4027

	spec->pool_id = CEPH_NOPOOL;
	spec->snap_id = CEPH_NOSNAP;
4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043
	kref_init(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref)
{
	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);

	kfree(spec->pool_name);
	kfree(spec->image_id);
	kfree(spec->image_name);
	kfree(spec->snap_name);
	kfree(spec);
}

A
Alex Elder 已提交
4044
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4045 4046
					 struct rbd_spec *spec,
					 struct rbd_options *opts)
4047 4048 4049 4050 4051 4052 4053 4054
{
	struct rbd_device *rbd_dev;

	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
	if (!rbd_dev)
		return NULL;

	spin_lock_init(&rbd_dev->lock);
4055
	rbd_dev->flags = 0;
4056
	atomic_set(&rbd_dev->parent_ref, 0);
4057 4058 4059 4060
	INIT_LIST_HEAD(&rbd_dev->node);
	init_rwsem(&rbd_dev->header_rwsem);

	rbd_dev->rbd_client = rbdc;
4061 4062
	rbd_dev->spec = spec;
	rbd_dev->opts = opts;
4063

4064 4065 4066 4067 4068 4069 4070
	/* Initialize the layout used for all rbd requests */

	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);

4071 4072 4073 4074 4075 4076 4077
	return rbd_dev;
}

static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
	rbd_put_client(rbd_dev->rbd_client);
	rbd_spec_put(rbd_dev->spec);
4078
	kfree(rbd_dev->opts);
4079 4080 4081
	kfree(rbd_dev);
}

4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
/*
 * Get the size and object order for an image snapshot, or if
 * snap_id is CEPH_NOSNAP, gets this information for the base
 * image.
 */
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size)
{
	__le64 snapid = cpu_to_le64(snap_id);
	int ret;
	struct {
		u8 order;
		__le64 size;
	} __attribute__ ((packed)) size_buf = { 0 };

4097
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4098
				"rbd", "get_size",
4099
				&snapid, sizeof (snapid),
4100
				&size_buf, sizeof (size_buf));
4101
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4102 4103
	if (ret < 0)
		return ret;
4104 4105
	if (ret < sizeof (size_buf))
		return -ERANGE;
4106

J
Josh Durgin 已提交
4107
	if (order) {
4108
		*order = size_buf.order;
J
Josh Durgin 已提交
4109 4110
		dout("  order %u", (unsigned int)*order);
	}
4111 4112
	*snap_size = le64_to_cpu(size_buf.size);

J
Josh Durgin 已提交
4113 4114
	dout("  snap_id 0x%016llx snap_size = %llu\n",
		(unsigned long long)snap_id,
4115
		(unsigned long long)*snap_size);
4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126

	return 0;
}

static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
					&rbd_dev->header.obj_order,
					&rbd_dev->header.image_size);
}

4127 4128 4129 4130 4131 4132 4133 4134 4135 4136
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
	void *reply_buf;
	int ret;
	void *p;

	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4137
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4138
				"rbd", "get_object_prefix", NULL, 0,
4139
				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4140
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4141 4142 4143 4144 4145
	if (ret < 0)
		goto out;

	p = reply_buf;
	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4146 4147
						p + ret, NULL, GFP_NOIO);
	ret = 0;
4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160

	if (IS_ERR(rbd_dev->header.object_prefix)) {
		ret = PTR_ERR(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	} else {
		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
	}
out:
	kfree(reply_buf);

	return ret;
}

4161 4162 4163 4164 4165 4166 4167
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features)
{
	__le64 snapid = cpu_to_le64(snap_id);
	struct {
		__le64 features;
		__le64 incompat;
4168
	} __attribute__ ((packed)) features_buf = { 0 };
A
Alex Elder 已提交
4169
	u64 incompat;
4170 4171
	int ret;

4172
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4173
				"rbd", "get_features",
4174
				&snapid, sizeof (snapid),
4175
				&features_buf, sizeof (features_buf));
4176
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4177 4178
	if (ret < 0)
		return ret;
4179 4180
	if (ret < sizeof (features_buf))
		return -ERANGE;
A
Alex Elder 已提交
4181 4182

	incompat = le64_to_cpu(features_buf.incompat);
A
Alex Elder 已提交
4183
	if (incompat & ~RBD_FEATURES_SUPPORTED)
A
Alex Elder 已提交
4184
		return -ENXIO;
A
Alex Elder 已提交
4185

4186 4187 4188
	*snap_features = le64_to_cpu(features_buf.features);

	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4189 4190 4191
		(unsigned long long)snap_id,
		(unsigned long long)*snap_features,
		(unsigned long long)le64_to_cpu(features_buf.incompat));
4192 4193 4194 4195 4196 4197 4198 4199 4200 4201

	return 0;
}

static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
						&rbd_dev->header.features);
}

4202 4203 4204 4205 4206 4207 4208 4209
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
	struct rbd_spec *parent_spec;
	size_t size;
	void *reply_buf = NULL;
	__le64 snapid;
	void *p;
	void *end;
A
Alex Elder 已提交
4210
	u64 pool_id;
4211
	char *image_id;
4212
	u64 snap_id;
4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229
	u64 overlap;
	int ret;

	parent_spec = rbd_spec_alloc();
	if (!parent_spec)
		return -ENOMEM;

	size = sizeof (__le64) +				/* pool_id */
		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
		sizeof (__le64) +				/* snap_id */
		sizeof (__le64);				/* overlap */
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf) {
		ret = -ENOMEM;
		goto out_err;
	}

4230
	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4231
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4232
				"rbd", "get_parent",
4233
				&snapid, sizeof (snapid),
4234
				reply_buf, size);
4235
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4236 4237 4238 4239
	if (ret < 0)
		goto out_err;

	p = reply_buf;
4240 4241
	end = reply_buf + ret;
	ret = -ERANGE;
A
Alex Elder 已提交
4242
	ceph_decode_64_safe(&p, end, pool_id, out_err);
4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
	if (pool_id == CEPH_NOPOOL) {
		/*
		 * Either the parent never existed, or we have
		 * record of it but the image got flattened so it no
		 * longer has a parent.  When the parent of a
		 * layered image disappears we immediately set the
		 * overlap to 0.  The effect of this is that all new
		 * requests will be treated as if the image had no
		 * parent.
		 */
		if (rbd_dev->parent_overlap) {
			rbd_dev->parent_overlap = 0;
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image has been flattened\n",
				rbd_dev->disk->disk_name);
		}

4260
		goto out;	/* No parent?  No problem. */
4261
	}
4262

4263 4264 4265
	/* The ceph file layout needs to fit pool id in 32 bits */

	ret = -EIO;
A
Alex Elder 已提交
4266
	if (pool_id > (u64)U32_MAX) {
4267
		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
A
Alex Elder 已提交
4268
			(unsigned long long)pool_id, U32_MAX);
4269
		goto out_err;
A
Alex Elder 已提交
4270
	}
4271

A
Alex Elder 已提交
4272
	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4273 4274 4275 4276
	if (IS_ERR(image_id)) {
		ret = PTR_ERR(image_id);
		goto out_err;
	}
4277
	ceph_decode_64_safe(&p, end, snap_id, out_err);
4278 4279
	ceph_decode_64_safe(&p, end, overlap, out_err);

4280 4281 4282 4283 4284 4285 4286 4287 4288
	/*
	 * The parent won't change (except when the clone is
	 * flattened, already handled that).  So we only need to
	 * record the parent spec we have not already done so.
	 */
	if (!rbd_dev->parent_spec) {
		parent_spec->pool_id = pool_id;
		parent_spec->image_id = image_id;
		parent_spec->snap_id = snap_id;
A
Alex Elder 已提交
4289 4290
		rbd_dev->parent_spec = parent_spec;
		parent_spec = NULL;	/* rbd_dev now owns this */
4291 4292
	} else {
		kfree(image_id);
4293 4294 4295
	}

	/*
4296 4297
	 * We always update the parent overlap.  If it's zero we issue
	 * a warning, as we will proceed as if there was no parent.
4298 4299 4300
	 */
	if (!overlap) {
		if (parent_spec) {
4301 4302 4303 4304
			/* refresh, careful to warn just once */
			if (rbd_dev->parent_overlap)
				rbd_warn(rbd_dev,
				    "clone now standalone (overlap became 0)");
4305
		} else {
4306 4307
			/* initial probe */
			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4308
		}
A
Alex Elder 已提交
4309
	}
4310 4311
	rbd_dev->parent_overlap = overlap;

4312 4313 4314 4315 4316 4317 4318 4319 4320
out:
	ret = 0;
out_err:
	kfree(reply_buf);
	rbd_spec_put(parent_spec);

	return ret;
}

4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
	struct {
		__le64 stripe_unit;
		__le64 stripe_count;
	} __attribute__ ((packed)) striping_info_buf = { 0 };
	size_t size = sizeof (striping_info_buf);
	void *p;
	u64 obj_size;
	u64 stripe_unit;
	u64 stripe_count;
	int ret;

	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
				"rbd", "get_stripe_unit_count", NULL, 0,
4336
				(char *)&striping_info_buf, size);
4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
	if (ret < 0)
		return ret;
	if (ret < size)
		return -ERANGE;

	/*
	 * We don't actually support the "fancy striping" feature
	 * (STRIPINGV2) yet, but if the striping sizes are the
	 * defaults the behavior is the same as before.  So find
	 * out, and only fail if the image has non-default values.
	 */
	ret = -EINVAL;
	obj_size = (u64)1 << rbd_dev->header.obj_order;
	p = &striping_info_buf;
	stripe_unit = ceph_decode_64(&p);
	if (stripe_unit != obj_size) {
		rbd_warn(rbd_dev, "unsupported stripe unit "
				"(got %llu want %llu)",
				stripe_unit, obj_size);
		return -EINVAL;
	}
	stripe_count = ceph_decode_64(&p);
	if (stripe_count != 1) {
		rbd_warn(rbd_dev, "unsupported stripe count "
				"(got %llu want 1)", stripe_count);
		return -EINVAL;
	}
4365 4366
	rbd_dev->header.stripe_unit = stripe_unit;
	rbd_dev->header.stripe_count = stripe_count;
4367 4368 4369 4370

	return 0;
}

4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
	size_t image_id_size;
	char *image_id;
	void *p;
	void *end;
	size_t size;
	void *reply_buf = NULL;
	size_t len = 0;
	char *image_name = NULL;
	int ret;

	rbd_assert(!rbd_dev->spec->image_name);

A
Alex Elder 已提交
4385 4386
	len = strlen(rbd_dev->spec->image_id);
	image_id_size = sizeof (__le32) + len;
4387 4388 4389 4390 4391
	image_id = kmalloc(image_id_size, GFP_KERNEL);
	if (!image_id)
		return NULL;

	p = image_id;
4392
	end = image_id + image_id_size;
4393
	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4394 4395 4396 4397 4398 4399

	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		goto out;

4400
	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4401 4402
				"rbd", "dir_get_name",
				image_id, image_id_size,
4403
				reply_buf, size);
4404 4405 4406
	if (ret < 0)
		goto out;
	p = reply_buf;
4407 4408
	end = reply_buf + ret;

4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420
	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
	if (IS_ERR(image_name))
		image_name = NULL;
	else
		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
	kfree(reply_buf);
	kfree(image_id);

	return image_name;
}

4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	const char *snap_name;
	u32 which = 0;

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which < snapc->num_snaps) {
		if (!strcmp(name, snap_name))
			return snapc->snaps[which];
		snap_name += strlen(snap_name) + 1;
		which++;
	}
	return CEPH_NOSNAP;
}

static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	u32 which;
	bool found = false;
	u64 snap_id;

	for (which = 0; !found && which < snapc->num_snaps; which++) {
		const char *snap_name;

		snap_id = snapc->snaps[which];
		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4451 4452 4453 4454 4455 4456 4457
		if (IS_ERR(snap_name)) {
			/* ignore no-longer existing snapshots */
			if (PTR_ERR(snap_name) == -ENOENT)
				continue;
			else
				break;
		}
4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475
		found = !strcmp(name, snap_name);
		kfree(snap_name);
	}
	return found ? snap_id : CEPH_NOSNAP;
}

/*
 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
 * no snapshot by that name is found, or if an error occurs.
 */
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	if (rbd_dev->image_format == 1)
		return rbd_v1_snap_id_by_name(rbd_dev, name);

	return rbd_v2_snap_id_by_name(rbd_dev, name);
}

4476
/*
4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
 * An image being mapped will have everything but the snap id.
 */
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;

	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
	rbd_assert(spec->image_id && spec->image_name);
	rbd_assert(spec->snap_name);

	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
		u64 snap_id;

		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
		if (snap_id == CEPH_NOSNAP)
			return -ENOENT;

		spec->snap_id = snap_id;
	} else {
		spec->snap_id = CEPH_NOSNAP;
	}

	return 0;
}

/*
 * A parent image will have all ids but none of the names.
4504
 *
4505 4506
 * All names in an rbd spec are dynamically allocated.  It's OK if we
 * can't figure out the name for an image id.
4507
 */
4508
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4509
{
4510 4511 4512 4513 4514
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_spec *spec = rbd_dev->spec;
	const char *pool_name;
	const char *image_name;
	const char *snap_name;
4515 4516
	int ret;

4517 4518 4519
	rbd_assert(spec->pool_id != CEPH_NOPOOL);
	rbd_assert(spec->image_id);
	rbd_assert(spec->snap_id != CEPH_NOSNAP);
4520

4521
	/* Get the pool name; we have to make our own copy of this */
4522

4523 4524 4525
	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
	if (!pool_name) {
		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4526 4527
		return -EIO;
	}
4528 4529
	pool_name = kstrdup(pool_name, GFP_KERNEL);
	if (!pool_name)
4530 4531 4532 4533
		return -ENOMEM;

	/* Fetch the image name; tolerate failure here */

4534 4535
	image_name = rbd_dev_image_name(rbd_dev);
	if (!image_name)
A
Alex Elder 已提交
4536
		rbd_warn(rbd_dev, "unable to get image name");
4537

4538
	/* Fetch the snapshot name */
4539

4540
	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4541 4542
	if (IS_ERR(snap_name)) {
		ret = PTR_ERR(snap_name);
4543
		goto out_err;
4544 4545 4546 4547 4548
	}

	spec->pool_name = pool_name;
	spec->image_name = image_name;
	spec->snap_name = snap_name;
4549 4550

	return 0;
4551

4552
out_err:
4553 4554
	kfree(image_name);
	kfree(pool_name);
4555 4556 4557
	return ret;
}

A
Alex Elder 已提交
4558
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581
{
	size_t size;
	int ret;
	void *reply_buf;
	void *p;
	void *end;
	u64 seq;
	u32 snap_count;
	struct ceph_snap_context *snapc;
	u32 i;

	/*
	 * We'll need room for the seq value (maximum snapshot id),
	 * snapshot count, and array of that many snapshot ids.
	 * For now we have a fixed upper limit on the number we're
	 * prepared to receive.
	 */
	size = sizeof (__le64) + sizeof (__le32) +
			RBD_MAX_SNAP_COUNT * sizeof (__le64);
	reply_buf = kzalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4582
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4583
				"rbd", "get_snapcontext", NULL, 0,
4584
				reply_buf, size);
4585
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4586 4587 4588 4589
	if (ret < 0)
		goto out;

	p = reply_buf;
4590 4591
	end = reply_buf + ret;
	ret = -ERANGE;
4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
	ceph_decode_64_safe(&p, end, seq, out);
	ceph_decode_32_safe(&p, end, snap_count, out);

	/*
	 * Make sure the reported number of snapshot ids wouldn't go
	 * beyond the end of our buffer.  But before checking that,
	 * make sure the computed size of the snapshot context we
	 * allocate is representable in a size_t.
	 */
	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
				 / sizeof (u64)) {
		ret = -EINVAL;
		goto out;
	}
	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
		goto out;
4608
	ret = 0;
4609

4610
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4611 4612 4613 4614 4615 4616 4617 4618
	if (!snapc) {
		ret = -ENOMEM;
		goto out;
	}
	snapc->seq = seq;
	for (i = 0; i < snap_count; i++)
		snapc->snaps[i] = ceph_decode_64(&p);

4619
	ceph_put_snap_context(rbd_dev->header.snapc);
4620 4621 4622
	rbd_dev->header.snapc = snapc;

	dout("  snap context seq = %llu, snap_count = %u\n",
4623
		(unsigned long long)seq, (unsigned int)snap_count);
4624 4625 4626
out:
	kfree(reply_buf);

4627
	return ret;
4628 4629
}

4630 4631
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
A
Alex Elder 已提交
4632 4633 4634
{
	size_t size;
	void *reply_buf;
4635
	__le64 snapid;
A
Alex Elder 已提交
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645
	int ret;
	void *p;
	void *end;
	char *snap_name;

	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return ERR_PTR(-ENOMEM);

4646
	snapid = cpu_to_le64(snap_id);
4647
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
A
Alex Elder 已提交
4648
				"rbd", "get_snapshot_name",
4649
				&snapid, sizeof (snapid),
4650
				reply_buf, size);
4651
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4652 4653
	if (ret < 0) {
		snap_name = ERR_PTR(ret);
A
Alex Elder 已提交
4654
		goto out;
4655
	}
A
Alex Elder 已提交
4656 4657

	p = reply_buf;
4658
	end = reply_buf + ret;
A
Alex Elder 已提交
4659
	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4660
	if (IS_ERR(snap_name))
A
Alex Elder 已提交
4661 4662
		goto out;

4663
	dout("  snap_id 0x%016llx snap_name = %s\n",
4664
		(unsigned long long)snap_id, snap_name);
A
Alex Elder 已提交
4665 4666 4667
out:
	kfree(reply_buf);

4668
	return snap_name;
A
Alex Elder 已提交
4669 4670
}

4671
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
4672
{
4673
	bool first_time = rbd_dev->header.object_prefix == NULL;
A
Alex Elder 已提交
4674 4675
	int ret;

4676 4677
	ret = rbd_dev_v2_image_size(rbd_dev);
	if (ret)
4678
		return ret;
4679

4680 4681 4682
	if (first_time) {
		ret = rbd_dev_v2_header_onetime(rbd_dev);
		if (ret)
4683
			return ret;
4684 4685
	}

A
Alex Elder 已提交
4686
	ret = rbd_dev_v2_snap_context(rbd_dev);
4687 4688 4689 4690
	if (ret && first_time) {
		kfree(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	}
A
Alex Elder 已提交
4691 4692 4693 4694

	return ret;
}

4695 4696 4697 4698 4699 4700 4701 4702 4703 4704
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_header_info(rbd_dev);

	return rbd_dev_v2_header_info(rbd_dev);
}

4705 4706 4707
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
	struct device *dev;
4708
	int ret;
4709

4710
	dev = &rbd_dev->dev;
4711 4712 4713
	dev->bus = &rbd_bus_type;
	dev->type = &rbd_device_type;
	dev->parent = &rbd_root_dev;
4714
	dev->release = rbd_dev_device_release;
A
Alex Elder 已提交
4715
	dev_set_name(dev, "%d", rbd_dev->dev_id);
4716 4717 4718
	ret = device_register(dev);

	return ret;
4719 4720
}

4721 4722 4723 4724 4725
static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
{
	device_unregister(&rbd_dev->dev);
}

4726
/*
4727
 * Get a unique rbd identifier for the given new rbd_dev, and add
4728
 * the rbd_dev to the global list.
4729
 */
4730
static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4731
{
4732 4733
	int new_dev_id;

4734 4735 4736
	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
				    0, minor_to_rbd_dev_id(1 << MINORBITS),
				    GFP_KERNEL);
4737 4738 4739 4740
	if (new_dev_id < 0)
		return new_dev_id;

	rbd_dev->dev_id = new_dev_id;
4741 4742 4743 4744

	spin_lock(&rbd_dev_list_lock);
	list_add_tail(&rbd_dev->node, &rbd_dev_list);
	spin_unlock(&rbd_dev_list_lock);
4745

4746
	dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4747 4748

	return 0;
4749
}
4750

4751
/*
4752 4753
 * Remove an rbd_dev from the global list, and record that its
 * identifier is no longer in use.
4754
 */
A
Alex Elder 已提交
4755
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4756
{
4757 4758 4759
	spin_lock(&rbd_dev_list_lock);
	list_del_init(&rbd_dev->node);
	spin_unlock(&rbd_dev_list_lock);
4760

4761 4762 4763
	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);

	dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4764 4765
}

4766 4767 4768
/*
 * Skips over white space at *buf, and updates *buf to point to the
 * first found non-space character (if any). Returns the length of
A
Alex Elder 已提交
4769 4770
 * the token (string of non-white space characters) found.  Note
 * that *buf must be terminated with '\0'.
4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784
 */
static inline size_t next_token(const char **buf)
{
        /*
        * These are the characters that produce nonzero for
        * isspace() in the "C" and "POSIX" locales.
        */
        const char *spaces = " \f\n\r\t\v";

        *buf += strspn(*buf, spaces);	/* Find start of token */

	return strcspn(*buf, spaces);   /* Return token length */
}

A
Alex Elder 已提交
4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806
/*
 * Finds the next token in *buf, dynamically allocates a buffer big
 * enough to hold a copy of it, and copies the token into the new
 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
 * that a duplicate buffer is created even for a zero-length token.
 *
 * Returns a pointer to the newly-allocated duplicate, or a null
 * pointer if memory for the duplicate was not available.  If
 * the lenp argument is a non-null pointer, the length of the token
 * (not including the '\0') is returned in *lenp.
 *
 * If successful, the *buf pointer will be updated to point beyond
 * the end of the found token.
 *
 * Note: uses GFP_KERNEL for allocation.
 */
static inline char *dup_token(const char **buf, size_t *lenp)
{
	char *dup;
	size_t len;

	len = next_token(buf);
A
Alex Elder 已提交
4807
	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
A
Alex Elder 已提交
4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818
	if (!dup)
		return NULL;
	*(dup + len) = '\0';
	*buf += len;

	if (lenp)
		*lenp = len;

	return dup;
}

4819
/*
4820 4821 4822 4823
 * Parse the options provided for an "rbd add" (i.e., rbd image
 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
 * and the data written is passed here via a NUL-terminated buffer.
 * Returns 0 if successful or an error code otherwise.
A
Alex Elder 已提交
4824
 *
4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858
 * The information extracted from these options is recorded in
 * the other parameters which return dynamically-allocated
 * structures:
 *  ceph_opts
 *      The address of a pointer that will refer to a ceph options
 *      structure.  Caller must release the returned pointer using
 *      ceph_destroy_options() when it is no longer needed.
 *  rbd_opts
 *	Address of an rbd options pointer.  Fully initialized by
 *	this function; caller must release with kfree().
 *  spec
 *	Address of an rbd image specification pointer.  Fully
 *	initialized by this function based on parsed options.
 *	Caller must release with rbd_spec_put().
 *
 * The options passed take this form:
 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
 * where:
 *  <mon_addrs>
 *      A comma-separated list of one or more monitor addresses.
 *      A monitor address is an ip address, optionally followed
 *      by a port number (separated by a colon).
 *        I.e.:  ip1[:port1][,ip2[:port2]...]
 *  <options>
 *      A comma-separated list of ceph and/or rbd options.
 *  <pool_name>
 *      The name of the rados pool containing the rbd image.
 *  <image_name>
 *      The name of the image in that pool to map.
 *  <snap_id>
 *      An optional snapshot id.  If provided, the mapping will
 *      present data from the image at the time that snapshot was
 *      created.  The image head is used if no snapshot id is
 *      provided.  Snapshot mappings are always read-only.
4859
 */
4860
static int rbd_add_parse_args(const char *buf,
4861
				struct ceph_options **ceph_opts,
4862 4863
				struct rbd_options **opts,
				struct rbd_spec **rbd_spec)
4864
{
A
Alex Elder 已提交
4865
	size_t len;
4866
	char *options;
4867
	const char *mon_addrs;
4868
	char *snap_name;
4869
	size_t mon_addrs_size;
4870
	struct rbd_spec *spec = NULL;
4871
	struct rbd_options *rbd_opts = NULL;
4872
	struct ceph_options *copts;
4873
	int ret;
4874 4875 4876

	/* The first four tokens are required */

4877
	len = next_token(&buf);
4878 4879 4880 4881
	if (!len) {
		rbd_warn(NULL, "no monitor address(es) provided");
		return -EINVAL;
	}
4882
	mon_addrs = buf;
4883
	mon_addrs_size = len + 1;
4884
	buf += len;
4885

4886
	ret = -EINVAL;
4887 4888
	options = dup_token(&buf, NULL);
	if (!options)
4889
		return -ENOMEM;
4890 4891 4892 4893
	if (!*options) {
		rbd_warn(NULL, "no options provided");
		goto out_err;
	}
4894

4895 4896
	spec = rbd_spec_alloc();
	if (!spec)
4897
		goto out_mem;
4898 4899 4900 4901

	spec->pool_name = dup_token(&buf, NULL);
	if (!spec->pool_name)
		goto out_mem;
4902 4903 4904 4905
	if (!*spec->pool_name) {
		rbd_warn(NULL, "no pool name provided");
		goto out_err;
	}
4906

A
Alex Elder 已提交
4907
	spec->image_name = dup_token(&buf, NULL);
4908
	if (!spec->image_name)
4909
		goto out_mem;
4910 4911 4912 4913
	if (!*spec->image_name) {
		rbd_warn(NULL, "no image name provided");
		goto out_err;
	}
4914

4915 4916 4917 4918
	/*
	 * Snapshot name is optional; default is to use "-"
	 * (indicating the head/no snapshot).
	 */
4919
	len = next_token(&buf);
4920
	if (!len) {
4921 4922
		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4923
	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4924
		ret = -ENAMETOOLONG;
4925
		goto out_err;
4926
	}
4927 4928
	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
	if (!snap_name)
4929
		goto out_mem;
4930 4931
	*(snap_name + len) = '\0';
	spec->snap_name = snap_name;
A
Alex Elder 已提交
4932

4933
	/* Initialize all rbd options to the defaults */
4934

4935 4936 4937 4938 4939
	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
	if (!rbd_opts)
		goto out_mem;

	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
I
Ilya Dryomov 已提交
4940
	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
A
Alex Elder 已提交
4941

4942
	copts = ceph_parse_options(options, mon_addrs,
4943
					mon_addrs + mon_addrs_size - 1,
4944
					parse_rbd_opts_token, rbd_opts);
4945 4946
	if (IS_ERR(copts)) {
		ret = PTR_ERR(copts);
4947 4948
		goto out_err;
	}
4949 4950 4951
	kfree(options);

	*ceph_opts = copts;
4952
	*opts = rbd_opts;
4953
	*rbd_spec = spec;
4954

4955
	return 0;
4956
out_mem:
4957
	ret = -ENOMEM;
A
Alex Elder 已提交
4958
out_err:
4959 4960
	kfree(rbd_opts);
	rbd_spec_put(spec);
4961
	kfree(options);
A
Alex Elder 已提交
4962

4963
	return ret;
4964 4965
}

4966 4967 4968 4969 4970
/*
 * Return pool id (>= 0) or a negative error code.
 */
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
4971
	struct ceph_options *opts = rbdc->client->options;
4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986
	u64 newest_epoch;
	int tries = 0;
	int ret;

again:
	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
	if (ret == -ENOENT && tries++ < 1) {
		ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
					       &newest_epoch);
		if (ret < 0)
			return ret;

		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
			ceph_monc_request_next_osdmap(&rbdc->client->monc);
			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4987 4988
						     newest_epoch,
						     opts->mount_timeout);
4989 4990 4991 4992 4993 4994 4995 4996 4997 4998
			goto again;
		} else {
			/* the osdmap we have is new enough */
			return -ENOENT;
		}
	}

	return ret;
}

A
Alex Elder 已提交
4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018
/*
 * An rbd format 2 image has a unique identifier, distinct from the
 * name given to it by the user.  Internally, that identifier is
 * what's used to specify the names of objects related to the image.
 *
 * A special "rbd id" object is used to map an rbd image name to its
 * id.  If that object doesn't exist, then there is no v2 rbd image
 * with the supplied name.
 *
 * This function will record the given rbd_dev's image_id field if
 * it can be determined, and in that case will return 0.  If any
 * errors occur a negative errno will be returned and the rbd_dev's
 * image_id field will be unchanged (and should be NULL).
 */
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
	int ret;
	size_t size;
	char *object_name;
	void *response;
5019
	char *image_id;
5020

A
Alex Elder 已提交
5021 5022 5023
	/*
	 * When probing a parent image, the image id is already
	 * known (and the image name likely is not).  There's no
5024 5025
	 * need to fetch the image id again in this case.  We
	 * do still need to set the image format though.
A
Alex Elder 已提交
5026
	 */
5027 5028 5029
	if (rbd_dev->spec->image_id) {
		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;

A
Alex Elder 已提交
5030
		return 0;
5031
	}
A
Alex Elder 已提交
5032

A
Alex Elder 已提交
5033 5034 5035 5036
	/*
	 * First, see if the format 2 image id file exists, and if
	 * so, get the image's persistent id from it.
	 */
A
Alex Elder 已提交
5037
	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
A
Alex Elder 已提交
5038 5039 5040
	object_name = kmalloc(size, GFP_NOIO);
	if (!object_name)
		return -ENOMEM;
5041
	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
A
Alex Elder 已提交
5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052
	dout("rbd id object name is %s\n", object_name);

	/* Response will be an encoded string, which includes a length */

	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
	response = kzalloc(size, GFP_NOIO);
	if (!response) {
		ret = -ENOMEM;
		goto out;
	}

5053 5054
	/* If it doesn't exist we'll assume it's a format 1 image */

5055
	ret = rbd_obj_method_sync(rbd_dev, object_name,
5056
				"rbd", "get_id", NULL, 0,
5057
				response, RBD_IMAGE_ID_LEN_MAX);
5058
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5059 5060 5061 5062 5063
	if (ret == -ENOENT) {
		image_id = kstrdup("", GFP_KERNEL);
		ret = image_id ? 0 : -ENOMEM;
		if (!ret)
			rbd_dev->image_format = 1;
5064
	} else if (ret >= 0) {
5065 5066 5067
		void *p = response;

		image_id = ceph_extract_encoded_string(&p, p + ret,
A
Alex Elder 已提交
5068
						NULL, GFP_NOIO);
5069
		ret = PTR_ERR_OR_ZERO(image_id);
5070 5071 5072 5073 5074 5075 5076
		if (!ret)
			rbd_dev->image_format = 2;
	}

	if (!ret) {
		rbd_dev->spec->image_id = image_id;
		dout("image_id is %s\n", image_id);
A
Alex Elder 已提交
5077 5078 5079 5080 5081 5082 5083 5084
	}
out:
	kfree(response);
	kfree(object_name);

	return ret;
}

A
Alex Elder 已提交
5085 5086 5087 5088
/*
 * Undo whatever state changes are made by v1 or v2 header info
 * call.
 */
A
Alex Elder 已提交
5089 5090 5091 5092
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
	struct rbd_image_header	*header;

5093
	rbd_dev_parent_put(rbd_dev);
A
Alex Elder 已提交
5094 5095 5096 5097

	/* Free dynamic fields from the header, then zero it out */

	header = &rbd_dev->header;
5098
	ceph_put_snap_context(header->snapc);
A
Alex Elder 已提交
5099 5100 5101 5102 5103 5104
	kfree(header->snap_sizes);
	kfree(header->snap_names);
	kfree(header->object_prefix);
	memset(header, 0, sizeof (*header));
}

5105
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5106 5107 5108
{
	int ret;

5109
	ret = rbd_dev_v2_object_prefix(rbd_dev);
5110
	if (ret)
5111 5112
		goto out_err;

5113 5114 5115 5116
	/*
	 * Get the and check features for the image.  Currently the
	 * features are assumed to never change.
	 */
5117
	ret = rbd_dev_v2_features(rbd_dev);
5118
	if (ret)
5119
		goto out_err;
5120

5121 5122 5123 5124 5125 5126 5127
	/* If the image supports fancy striping, get its parameters */

	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
		ret = rbd_dev_v2_striping_info(rbd_dev);
		if (ret < 0)
			goto out_err;
	}
5128
	/* No support for crypto and compression type format 2 images */
5129

A
Alex Elder 已提交
5130
	return 0;
5131
out_err:
A
Alex Elder 已提交
5132
	rbd_dev->header.features = 0;
5133 5134
	kfree(rbd_dev->header.object_prefix);
	rbd_dev->header.object_prefix = NULL;
5135 5136

	return ret;
5137 5138
}

5139 5140 5141 5142 5143 5144
/*
 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
 * rbd_dev_image_probe() recursion depth, which means it's also the
 * length of the already discovered part of the parent chain.
 */
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
A
Alex Elder 已提交
5145
{
5146
	struct rbd_device *parent = NULL;
5147 5148 5149 5150 5151
	int ret;

	if (!rbd_dev->parent_spec)
		return 0;

5152 5153 5154 5155 5156 5157
	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
		pr_info("parent chain is too long (%d)\n", depth);
		ret = -EINVAL;
		goto out_err;
	}

5158 5159 5160 5161
	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
				NULL);
	if (!parent) {
		ret = -ENOMEM;
5162
		goto out_err;
5163 5164 5165 5166 5167 5168 5169 5170
	}

	/*
	 * Images related by parent/child relationships always share
	 * rbd_client and spec/parent_spec, so bump their refcounts.
	 */
	__rbd_get_client(rbd_dev->rbd_client);
	rbd_spec_get(rbd_dev->parent_spec);
5171

5172
	ret = rbd_dev_image_probe(parent, depth);
5173 5174
	if (ret < 0)
		goto out_err;
5175

5176
	rbd_dev->parent = parent;
5177
	atomic_set(&rbd_dev->parent_ref, 1);
5178
	return 0;
5179

5180
out_err:
5181 5182
	rbd_dev_unparent(rbd_dev);
	if (parent)
5183 5184 5185 5186
		rbd_dev_destroy(parent);
	return ret;
}

5187
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5188
{
A
Alex Elder 已提交
5189
	int ret;
A
Alex Elder 已提交
5190

5191 5192 5193 5194 5195
	/* Get an id and fill in device name. */

	ret = rbd_dev_id_get(rbd_dev);
	if (ret)
		return ret;
A
Alex Elder 已提交
5196 5197 5198 5199 5200

	BUILD_BUG_ON(DEV_NAME_LEN
			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);

5201
	/* Record our major and minor device numbers. */
A
Alex Elder 已提交
5202

5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213
	if (!single_major) {
		ret = register_blkdev(0, rbd_dev->name);
		if (ret < 0)
			goto err_out_id;

		rbd_dev->major = ret;
		rbd_dev->minor = 0;
	} else {
		rbd_dev->major = rbd_major;
		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
	}
A
Alex Elder 已提交
5214 5215 5216 5217 5218 5219 5220

	/* Set up the blkdev mapping. */

	ret = rbd_init_disk(rbd_dev);
	if (ret)
		goto err_out_blkdev;

5221
	ret = rbd_dev_mapping_set(rbd_dev);
A
Alex Elder 已提交
5222 5223
	if (ret)
		goto err_out_disk;
I
Ilya Dryomov 已提交
5224

5225
	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5226
	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5227 5228 5229

	ret = rbd_bus_add_dev(rbd_dev);
	if (ret)
5230
		goto err_out_mapping;
A
Alex Elder 已提交
5231 5232 5233

	/* Everything's ready.  Announce the disk to the world. */

5234
	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
A
Alex Elder 已提交
5235 5236 5237 5238 5239 5240
	add_disk(rbd_dev->disk);

	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
		(unsigned long long) rbd_dev->mapping.size);

	return ret;
5241

5242 5243
err_out_mapping:
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5244 5245 5246
err_out_disk:
	rbd_free_disk(rbd_dev);
err_out_blkdev:
5247 5248
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5249 5250
err_out_id:
	rbd_dev_id_put(rbd_dev);
A
Alex Elder 已提交
5251
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5252 5253 5254 5255

	return ret;
}

A
Alex Elder 已提交
5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;
	size_t size;

	/* Record the header object name for this rbd image. */

	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
	else
		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);

	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
	if (!rbd_dev->header_name)
		return -ENOMEM;

	if (rbd_dev->image_format == 1)
		sprintf(rbd_dev->header_name, "%s%s",
			spec->image_name, RBD_SUFFIX);
	else
		sprintf(rbd_dev->header_name, "%s%s",
			RBD_HEADER_PREFIX, spec->image_id);
	return 0;
}

5283 5284
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5285
	rbd_dev_unprobe(rbd_dev);
5286
	kfree(rbd_dev->header_name);
A
Alex Elder 已提交
5287 5288 5289 5290 5291
	rbd_dev->header_name = NULL;
	rbd_dev->image_format = 0;
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;

5292 5293 5294
	rbd_dev_destroy(rbd_dev);
}

5295 5296
/*
 * Probe for the existence of the header object for the given rbd
5297 5298 5299
 * device.  If this image is the one being mapped (i.e., not a
 * parent), initiate a watch on its header object before using that
 * object to get detailed information about the rbd image.
5300
 */
5301
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5302 5303 5304 5305
{
	int ret;

	/*
A
Alex Elder 已提交
5306 5307 5308 5309
	 * Get the id from the image id object.  Unless there's an
	 * error, rbd_dev->spec->image_id will be filled in with
	 * a dynamically-allocated string, and rbd_dev->image_format
	 * will be set to either 1 or 2.
5310 5311 5312
	 */
	ret = rbd_dev_image_id(rbd_dev);
	if (ret)
5313 5314
		return ret;

A
Alex Elder 已提交
5315 5316 5317 5318
	ret = rbd_dev_header_name(rbd_dev);
	if (ret)
		goto err_out_format;

5319
	if (!depth) {
5320
		ret = rbd_dev_header_watch_sync(rbd_dev);
5321 5322 5323 5324 5325
		if (ret) {
			if (ret == -ENOENT)
				pr_info("image %s/%s does not exist\n",
					rbd_dev->spec->pool_name,
					rbd_dev->spec->image_name);
5326
			goto out_header_name;
5327
		}
5328
	}
5329

5330
	ret = rbd_dev_header_info(rbd_dev);
5331
	if (ret)
5332
		goto err_out_watch;
A
Alex Elder 已提交
5333

5334 5335 5336 5337 5338 5339
	/*
	 * If this image is the one being mapped, we have pool name and
	 * id, image name and id, and snap name - need to fill snap id.
	 * Otherwise this is a parent image, identified by pool, image
	 * and snap ids - need to fill in names for those ids.
	 */
5340
	if (!depth)
5341 5342 5343
		ret = rbd_spec_fill_snap_id(rbd_dev);
	else
		ret = rbd_spec_fill_names(rbd_dev);
5344 5345 5346 5347 5348 5349
	if (ret) {
		if (ret == -ENOENT)
			pr_info("snap %s/%s@%s does not exist\n",
				rbd_dev->spec->pool_name,
				rbd_dev->spec->image_name,
				rbd_dev->spec->snap_name);
A
Alex Elder 已提交
5350
		goto err_out_probe;
5351
	}
5352

5353 5354 5355 5356 5357 5358 5359 5360 5361
	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			goto err_out_probe;

		/*
		 * Need to warn users if this image is the one being
		 * mapped and has a parent.
		 */
5362
		if (!depth && rbd_dev->parent_spec)
5363 5364 5365 5366
			rbd_warn(rbd_dev,
				 "WARNING: kernel layering is EXPERIMENTAL!");
	}

5367
	ret = rbd_dev_probe_parent(rbd_dev, depth);
A
Alex Elder 已提交
5368 5369 5370 5371 5372 5373
	if (ret)
		goto err_out_probe;

	dout("discovered format %u image, header name is %s\n",
		rbd_dev->image_format, rbd_dev->header_name);
	return 0;
5374

A
Alex Elder 已提交
5375 5376
err_out_probe:
	rbd_dev_unprobe(rbd_dev);
5377
err_out_watch:
5378
	if (!depth)
5379
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5380 5381 5382 5383 5384
out_header_name:
	kfree(rbd_dev->header_name);
	rbd_dev->header_name = NULL;
err_out_format:
	rbd_dev->image_format = 0;
5385 5386
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;
5387 5388 5389
	return ret;
}

5390 5391 5392
static ssize_t do_rbd_add(struct bus_type *bus,
			  const char *buf,
			  size_t count)
5393
{
5394
	struct rbd_device *rbd_dev = NULL;
5395
	struct ceph_options *ceph_opts = NULL;
5396
	struct rbd_options *rbd_opts = NULL;
5397
	struct rbd_spec *spec = NULL;
5398
	struct rbd_client *rbdc;
5399
	bool read_only;
5400
	int rc = -ENOMEM;
5401 5402 5403 5404 5405

	if (!try_module_get(THIS_MODULE))
		return -ENODEV;

	/* parse add command */
5406
	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5407
	if (rc < 0)
5408
		goto err_out_module;
5409

5410 5411 5412
	rbdc = rbd_get_client(ceph_opts);
	if (IS_ERR(rbdc)) {
		rc = PTR_ERR(rbdc);
5413
		goto err_out_args;
5414
	}
5415 5416

	/* pick the pool */
5417
	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5418 5419 5420
	if (rc < 0) {
		if (rc == -ENOENT)
			pr_info("pool %s does not exist\n", spec->pool_name);
5421
		goto err_out_client;
5422
	}
A
Alex Elder 已提交
5423
	spec->pool_id = (u64)rc;
5424

5425 5426
	/* The ceph file layout needs to fit pool id in 32 bits */

A
Alex Elder 已提交
5427
	if (spec->pool_id > (u64)U32_MAX) {
5428
		rbd_warn(NULL, "pool id too large (%llu > %u)",
A
Alex Elder 已提交
5429
				(unsigned long long)spec->pool_id, U32_MAX);
5430 5431 5432 5433
		rc = -EIO;
		goto err_out_client;
	}

5434
	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5435 5436
	if (!rbd_dev)
		goto err_out_client;
5437 5438
	rbdc = NULL;		/* rbd_dev now owns this */
	spec = NULL;		/* rbd_dev now owns this */
5439
	rbd_opts = NULL;	/* rbd_dev now owns this */
5440

5441
	rc = rbd_dev_image_probe(rbd_dev, 0);
5442
	if (rc < 0)
5443
		goto err_out_rbd_dev;
5444

5445 5446
	/* If we are mapping a snapshot it must be marked read-only */

5447
	read_only = rbd_dev->opts->read_only;
5448 5449 5450 5451
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
		read_only = true;
	rbd_dev->mapping.read_only = read_only;

5452
	rc = rbd_dev_device_setup(rbd_dev);
A
Alex Elder 已提交
5453
	if (rc) {
5454 5455 5456 5457 5458 5459
		/*
		 * rbd_dev_header_unwatch_sync() can't be moved into
		 * rbd_dev_image_release() without refactoring, see
		 * commit 1f3ef78861ac.
		 */
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5460 5461 5462 5463 5464
		rbd_dev_image_release(rbd_dev);
		goto err_out_module;
	}

	return count;
5465

5466 5467
err_out_rbd_dev:
	rbd_dev_destroy(rbd_dev);
5468
err_out_client:
5469
	rbd_put_client(rbdc);
5470
err_out_args:
5471
	rbd_spec_put(spec);
5472
	kfree(rbd_opts);
5473 5474
err_out_module:
	module_put(THIS_MODULE);
5475

5476
	dout("Error adding device %s\n", buf);
5477

A
Alex Elder 已提交
5478
	return (ssize_t)rc;
5479 5480
}

5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497
static ssize_t rbd_add(struct bus_type *bus,
		       const char *buf,
		       size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_add(bus, buf, count);
}

static ssize_t rbd_add_single_major(struct bus_type *bus,
				    const char *buf,
				    size_t count)
{
	return do_rbd_add(bus, buf, count);
}

5498
static void rbd_dev_device_release(struct device *dev)
5499
{
A
Alex Elder 已提交
5500
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5501 5502

	rbd_free_disk(rbd_dev);
5503
	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
A
Alex Elder 已提交
5504
	rbd_dev_mapping_clear(rbd_dev);
5505 5506
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5507
	rbd_dev_id_put(rbd_dev);
A
Alex Elder 已提交
5508
	rbd_dev_mapping_clear(rbd_dev);
5509 5510
}

5511 5512
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5513
	while (rbd_dev->parent) {
5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525
		struct rbd_device *first = rbd_dev;
		struct rbd_device *second = first->parent;
		struct rbd_device *third;

		/*
		 * Follow to the parent with no grandparent and
		 * remove it.
		 */
		while (second && (third = second->parent)) {
			first = second;
			second = third;
		}
A
Alex Elder 已提交
5526
		rbd_assert(second);
5527
		rbd_dev_image_release(second);
A
Alex Elder 已提交
5528 5529 5530 5531
		first->parent = NULL;
		first->parent_overlap = 0;

		rbd_assert(first->parent_spec);
5532 5533 5534 5535 5536
		rbd_spec_put(first->parent_spec);
		first->parent_spec = NULL;
	}
}

5537 5538 5539
static ssize_t do_rbd_remove(struct bus_type *bus,
			     const char *buf,
			     size_t count)
5540 5541
{
	struct rbd_device *rbd_dev = NULL;
5542 5543
	struct list_head *tmp;
	int dev_id;
5544
	unsigned long ul;
5545
	bool already = false;
5546
	int ret;
5547

5548
	ret = kstrtoul(buf, 10, &ul);
5549 5550
	if (ret)
		return ret;
5551 5552

	/* convert to int; abort if we lost anything in the conversion */
5553 5554
	dev_id = (int)ul;
	if (dev_id != ul)
5555 5556
		return -EINVAL;

5557 5558 5559 5560 5561 5562 5563 5564
	ret = -ENOENT;
	spin_lock(&rbd_dev_list_lock);
	list_for_each(tmp, &rbd_dev_list) {
		rbd_dev = list_entry(tmp, struct rbd_device, node);
		if (rbd_dev->dev_id == dev_id) {
			ret = 0;
			break;
		}
5565
	}
5566 5567 5568 5569 5570
	if (!ret) {
		spin_lock_irq(&rbd_dev->lock);
		if (rbd_dev->open_count)
			ret = -EBUSY;
		else
5571 5572
			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
							&rbd_dev->flags);
5573 5574 5575
		spin_unlock_irq(&rbd_dev->lock);
	}
	spin_unlock(&rbd_dev_list_lock);
5576
	if (ret < 0 || already)
5577
		return ret;
5578

5579
	rbd_dev_header_unwatch_sync(rbd_dev);
5580 5581 5582 5583 5584 5585
	/*
	 * flush remaining watch callbacks - these must be complete
	 * before the osd_client is shutdown
	 */
	dout("%s: flushing notifies", __func__);
	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586

5587 5588 5589 5590 5591 5592 5593
	/*
	 * Don't free anything from rbd_dev->disk until after all
	 * notifies are completely processed. Otherwise
	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
	 * in a potential use after free of rbd_dev->disk or rbd_dev.
	 */
	rbd_bus_del_dev(rbd_dev);
5594
	rbd_dev_image_release(rbd_dev);
A
Alex Elder 已提交
5595
	module_put(THIS_MODULE);
A
Alex Elder 已提交
5596

5597
	return count;
5598 5599
}

5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616
static ssize_t rbd_remove(struct bus_type *bus,
			  const char *buf,
			  size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_remove(bus, buf, count);
}

static ssize_t rbd_remove_single_major(struct bus_type *bus,
				       const char *buf,
				       size_t count)
{
	return do_rbd_remove(bus, buf, count);
}

5617 5618
/*
 * create control files in sysfs
5619
 * /sys/bus/rbd/...
5620 5621 5622
 */
static int rbd_sysfs_init(void)
{
5623
	int ret;
5624

5625
	ret = device_register(&rbd_root_dev);
A
Alex Elder 已提交
5626
	if (ret < 0)
5627
		return ret;
5628

5629 5630 5631
	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
		device_unregister(&rbd_root_dev);
5632 5633 5634 5635 5636 5637

	return ret;
}

static void rbd_sysfs_cleanup(void)
{
5638
	bus_unregister(&rbd_bus_type);
5639
	device_unregister(&rbd_root_dev);
5640 5641
}

5642 5643 5644 5645 5646 5647 5648
static int rbd_slab_init(void)
{
	rbd_assert(!rbd_img_request_cache);
	rbd_img_request_cache = kmem_cache_create("rbd_img_request",
					sizeof (struct rbd_img_request),
					__alignof__(struct rbd_img_request),
					0, NULL);
5649 5650 5651 5652 5653 5654 5655 5656
	if (!rbd_img_request_cache)
		return -ENOMEM;

	rbd_assert(!rbd_obj_request_cache);
	rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
					sizeof (struct rbd_obj_request),
					__alignof__(struct rbd_obj_request),
					0, NULL);
5657 5658 5659 5660 5661
	if (!rbd_obj_request_cache)
		goto out_err;

	rbd_assert(!rbd_segment_name_cache);
	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5662
					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5663
	if (rbd_segment_name_cache)
5664
		return 0;
5665 5666 5667 5668 5669
out_err:
	if (rbd_obj_request_cache) {
		kmem_cache_destroy(rbd_obj_request_cache);
		rbd_obj_request_cache = NULL;
	}
5670

5671 5672 5673
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;

5674 5675 5676 5677 5678
	return -ENOMEM;
}

static void rbd_slab_exit(void)
{
5679 5680 5681 5682
	rbd_assert(rbd_segment_name_cache);
	kmem_cache_destroy(rbd_segment_name_cache);
	rbd_segment_name_cache = NULL;

5683 5684 5685 5686
	rbd_assert(rbd_obj_request_cache);
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;

5687 5688 5689 5690 5691
	rbd_assert(rbd_img_request_cache);
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;
}

A
Alex Elder 已提交
5692
static int __init rbd_init(void)
5693 5694 5695
{
	int rc;

5696 5697 5698 5699
	if (!libceph_compatible(NULL)) {
		rbd_warn(NULL, "libceph incompatibility (quitting)");
		return -EINVAL;
	}
I
Ilya Dryomov 已提交
5700

5701
	rc = rbd_slab_init();
5702 5703
	if (rc)
		return rc;
I
Ilya Dryomov 已提交
5704

5705 5706
	/*
	 * The number of active work items is limited by the number of
I
Ilya Dryomov 已提交
5707
	 * rbd devices * queue depth, so leave @max_active at default.
5708 5709 5710 5711 5712 5713 5714
	 */
	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
	if (!rbd_wq) {
		rc = -ENOMEM;
		goto err_out_slab;
	}

5715 5716 5717 5718
	if (single_major) {
		rbd_major = register_blkdev(0, RBD_DRV_NAME);
		if (rbd_major < 0) {
			rc = rbd_major;
5719
			goto err_out_wq;
5720 5721 5722
		}
	}

5723 5724
	rc = rbd_sysfs_init();
	if (rc)
5725 5726 5727 5728 5729 5730
		goto err_out_blkdev;

	if (single_major)
		pr_info("loaded (major %d)\n", rbd_major);
	else
		pr_info("loaded\n");
5731

I
Ilya Dryomov 已提交
5732 5733
	return 0;

5734 5735 5736
err_out_blkdev:
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5737 5738
err_out_wq:
	destroy_workqueue(rbd_wq);
I
Ilya Dryomov 已提交
5739 5740
err_out_slab:
	rbd_slab_exit();
5741
	return rc;
5742 5743
}

A
Alex Elder 已提交
5744
static void __exit rbd_exit(void)
5745
{
I
Ilya Dryomov 已提交
5746
	ida_destroy(&rbd_dev_id_ida);
5747
	rbd_sysfs_cleanup();
5748 5749
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5750
	destroy_workqueue(rbd_wq);
5751
	rbd_slab_exit();
5752 5753 5754 5755 5756
}

module_init(rbd_init);
module_exit(rbd_exit);

A
Alex Elder 已提交
5757
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5758 5759 5760 5761 5762
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");

5763
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5764
MODULE_LICENSE("GPL");