rbd.c 145.8 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
   rbd.c -- Export ceph rados objects as a Linux block device


   based on drivers/block/osdblk.c:

   Copyright 2009 Red Hat, Inc.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.



25
   For usage instructions, please refer to:
26

27
                 Documentation/ABI/testing/sysfs-bus-rbd
28 29 30 31 32 33 34

 */

#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
35
#include <linux/parser.h>
36
#include <linux/bsearch.h>
37 38 39 40 41 42

#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
43
#include <linux/slab.h>
44
#include <linux/idr.h>
I
Ilya Dryomov 已提交
45
#include <linux/workqueue.h>
46 47 48

#include "rbd_types.h"

A
Alex Elder 已提交
49 50
#define RBD_DEBUG	/* Activate rbd_assert() calls */

A
Alex Elder 已提交
51 52 53 54 55 56 57 58 59
/*
 * The basic unit of block I/O is a sector.  It is interpreted in a
 * number of contexts in Linux (blk, bio, genhd), but the default is
 * universally 512 bytes.  These symbols are just slightly more
 * meaningful than the bare numbers they represent.
 */
#define	SECTOR_SHIFT	9
#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * Increment the given counter and return its updated value.
 * If the counter is already 0 it will not be incremented.
 * If the counter is already at its maximum value returns
 * -EINVAL without updating it.
 */
static int atomic_inc_return_safe(atomic_t *v)
{
	unsigned int counter;

	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
	if (counter <= (unsigned int)INT_MAX)
		return (int)counter;

	atomic_dec(v);

	return -EINVAL;
}

/* Decrement the counter.  Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
	int counter;

	counter = atomic_dec_return(v);
	if (counter >= 0)
		return counter;

	atomic_inc(v);

	return -EINVAL;
}

A
Alex Elder 已提交
93
#define RBD_DRV_NAME "rbd"
94

95 96
#define RBD_MINORS_PER_MAJOR		256
#define RBD_SINGLE_MAJOR_PART_SHIFT	4
97

98 99 100 101
#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
#define RBD_MAX_SNAP_NAME_LEN	\
			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))

102
#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
103 104 105

#define RBD_SNAP_HEAD_NAME	"-"

106 107
#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */

108 109
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
110
#define RBD_IMAGE_ID_LEN_MAX	64
111

112
#define RBD_OBJ_PREFIX_LEN_MAX	64
A
Alex Elder 已提交
113

A
Alex Elder 已提交
114 115
/* Feature bits */

A
Alex Elder 已提交
116 117 118 119
#define RBD_FEATURE_LAYERING	(1<<0)
#define RBD_FEATURE_STRIPINGV2	(1<<1)
#define RBD_FEATURES_ALL \
	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
A
Alex Elder 已提交
120 121 122

/* Features supported by this (client software) implementation. */

123
#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
A
Alex Elder 已提交
124

A
Alex Elder 已提交
125 126 127 128 129 130
/*
 * An RBD device name will be "rbd#", where the "rbd" comes from
 * RBD_DRV_NAME above, and # is a unique integer identifier.
 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
 * enough to hold all possible device names.
 */
131
#define DEV_NAME_LEN		32
A
Alex Elder 已提交
132
#define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
133 134 135 136 137

/*
 * block device image metadata (in-memory version)
 */
struct rbd_image_header {
138
	/* These six fields never change for a given rbd image */
139
	char *object_prefix;
140 141 142
	__u8 obj_order;
	__u8 crypt_type;
	__u8 comp_type;
143 144 145
	u64 stripe_unit;
	u64 stripe_count;
	u64 features;		/* Might be changeable someday? */
146

A
Alex Elder 已提交
147 148 149
	/* The remaining fields need to be updated occasionally */
	u64 image_size;
	struct ceph_snap_context *snapc;
150 151
	char *snap_names;	/* format 1 only */
	u64 *snap_sizes;	/* format 1 only */
152 153
};

154 155 156 157
/*
 * An rbd image specification.
 *
 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
A
Alex Elder 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
 * identify an image.  Each rbd_dev structure includes a pointer to
 * an rbd_spec structure that encapsulates this identity.
 *
 * Each of the id's in an rbd_spec has an associated name.  For a
 * user-mapped image, the names are supplied and the id's associated
 * with them are looked up.  For a layered image, a parent image is
 * defined by the tuple, and the names are looked up.
 *
 * An rbd_dev structure contains a parent_spec pointer which is
 * non-null if the image it represents is a child in a layered
 * image.  This pointer will refer to the rbd_spec structure used
 * by the parent rbd_dev for its own identity (i.e., the structure
 * is shared between the parent and child).
 *
 * Since these structures are populated once, during the discovery
 * phase of image construction, they are effectively immutable so
 * we make no effort to synchronize access to them.
 *
 * Note that code herein does not assume the image name is known (it
 * could be a null pointer).
178 179 180
 */
struct rbd_spec {
	u64		pool_id;
181
	const char	*pool_name;
182

183 184
	const char	*image_id;
	const char	*image_name;
185 186

	u64		snap_id;
187
	const char	*snap_name;
188 189 190 191

	struct kref	kref;
};

192
/*
A
Alex Elder 已提交
193
 * an instance of the client.  multiple devices may share an rbd client.
194 195 196 197 198 199 200
 */
struct rbd_client {
	struct ceph_client	*client;
	struct kref		kref;
	struct list_head	node;
};

A
Alex Elder 已提交
201 202 203 204 205 206 207 208
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);

#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */

struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);

209 210 211
enum obj_request_type {
	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
A
Alex Elder 已提交
212

G
Guangliang Zhao 已提交
213 214 215
enum obj_operation_type {
	OBJ_OP_WRITE,
	OBJ_OP_READ,
216
	OBJ_OP_DISCARD,
G
Guangliang Zhao 已提交
217 218
};

219 220
enum obj_req_flags {
	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
221
	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
222 223
	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
224 225
};

A
Alex Elder 已提交
226 227 228 229
struct rbd_obj_request {
	const char		*object_name;
	u64			offset;		/* object start byte */
	u64			length;		/* bytes from offset */
230
	unsigned long		flags;
A
Alex Elder 已提交
231

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	/*
	 * An object request associated with an image will have its
	 * img_data flag set; a standalone object request will not.
	 *
	 * A standalone object request will have which == BAD_WHICH
	 * and a null obj_request pointer.
	 *
	 * An object request initiated in support of a layered image
	 * object (to check for its existence before a write) will
	 * have which == BAD_WHICH and a non-null obj_request pointer.
	 *
	 * Finally, an object request for rbd image data will have
	 * which != BAD_WHICH, and will have a non-null img_request
	 * pointer.  The value of which will be in the range
	 * 0..(img_request->obj_request_count-1).
	 */
	union {
		struct rbd_obj_request	*obj_request;	/* STAT op */
		struct {
			struct rbd_img_request	*img_request;
			u64			img_offset;
			/* links for img_request->obj_requests list */
			struct list_head	links;
		};
	};
A
Alex Elder 已提交
257 258 259
	u32			which;		/* posn image request list */

	enum obj_request_type	type;
260 261 262 263 264 265 266
	union {
		struct bio	*bio_list;
		struct {
			struct page	**pages;
			u32		page_count;
		};
	};
267
	struct page		**copyup_pages;
268
	u32			copyup_page_count;
A
Alex Elder 已提交
269 270 271 272

	struct ceph_osd_request	*osd_req;

	u64			xferred;	/* bytes transferred */
273
	int			result;
A
Alex Elder 已提交
274 275

	rbd_obj_callback_t	callback;
276
	struct completion	completion;
A
Alex Elder 已提交
277 278 279 280

	struct kref		kref;
};

A
Alex Elder 已提交
281
enum img_req_flags {
282 283
	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
284
	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
285
	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
A
Alex Elder 已提交
286 287
};

A
Alex Elder 已提交
288 289 290 291
struct rbd_img_request {
	struct rbd_device	*rbd_dev;
	u64			offset;	/* starting image byte offset */
	u64			length;	/* byte count from offset */
A
Alex Elder 已提交
292
	unsigned long		flags;
A
Alex Elder 已提交
293
	union {
294
		u64			snap_id;	/* for reads */
A
Alex Elder 已提交
295
		struct ceph_snap_context *snapc;	/* for writes */
296 297 298 299
	};
	union {
		struct request		*rq;		/* block request */
		struct rbd_obj_request	*obj_request;	/* obj req initiator */
A
Alex Elder 已提交
300
	};
301
	struct page		**copyup_pages;
302
	u32			copyup_page_count;
A
Alex Elder 已提交
303 304 305
	spinlock_t		completion_lock;/* protects next_completion */
	u32			next_completion;
	rbd_img_callback_t	callback;
306
	u64			xferred;/* aggregate bytes transferred */
307
	int			result;	/* first nonzero obj_request result */
A
Alex Elder 已提交
308 309 310 311 312 313 314 315

	u32			obj_request_count;
	struct list_head	obj_requests;	/* rbd_obj_request structs */

	struct kref		kref;
};

#define for_each_obj_request(ireq, oreq) \
316
	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
317
#define for_each_obj_request_from(ireq, oreq) \
318
	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
319
#define for_each_obj_request_safe(ireq, oreq, n) \
320
	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
321

A
Alex Elder 已提交
322
struct rbd_mapping {
A
Alex Elder 已提交
323
	u64                     size;
A
Alex Elder 已提交
324
	u64                     features;
A
Alex Elder 已提交
325 326 327
	bool			read_only;
};

328 329 330 331
/*
 * a single device
 */
struct rbd_device {
A
Alex Elder 已提交
332
	int			dev_id;		/* blkdev unique id */
333 334

	int			major;		/* blkdev assigned major */
335
	int			minor;
336 337
	struct gendisk		*disk;		/* blkdev's gendisk and rq */

338
	u32			image_format;	/* Either 1 or 2 */
339 340 341 342
	struct rbd_client	*rbd_client;

	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */

I
Ilya Dryomov 已提交
343
	struct list_head	rq_queue;	/* incoming rq queue */
344
	spinlock_t		lock;		/* queue, flags, open_count */
I
Ilya Dryomov 已提交
345 346
	struct workqueue_struct	*rq_wq;
	struct work_struct	rq_work;
347 348

	struct rbd_image_header	header;
349
	unsigned long		flags;		/* possibly lock protected */
350
	struct rbd_spec		*spec;
351

352
	char			*header_name;
353

354 355
	struct ceph_file_layout	layout;

356
	struct ceph_osd_event   *watch_event;
357
	struct rbd_obj_request	*watch_request;
358

359 360
	struct rbd_spec		*parent_spec;
	u64			parent_overlap;
361
	atomic_t		parent_ref;
362
	struct rbd_device	*parent;
363

364 365
	/* protects updating the header */
	struct rw_semaphore     header_rwsem;
A
Alex Elder 已提交
366 367

	struct rbd_mapping	mapping;
368 369

	struct list_head	node;
370 371 372

	/* sysfs related */
	struct device		dev;
373
	unsigned long		open_count;	/* protected by lock */
374 375
};

376 377 378 379 380 381 382
/*
 * Flag bits for rbd_dev->flags.  If atomicity is required,
 * rbd_dev->lock is used to protect access.
 *
 * Currently, only the "removing" flag (which is coupled with the
 * "open_count" field) requires atomic access.
 */
383 384
enum rbd_dev_flags {
	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
385
	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
386 387
};

388
static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
389

390
static LIST_HEAD(rbd_dev_list);    /* devices */
391 392
static DEFINE_SPINLOCK(rbd_dev_list_lock);

A
Alex Elder 已提交
393 394
static LIST_HEAD(rbd_client_list);		/* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
395

396 397
/* Slab caches for frequently-allocated structures */

398
static struct kmem_cache	*rbd_img_request_cache;
399
static struct kmem_cache	*rbd_obj_request_cache;
400
static struct kmem_cache	*rbd_segment_name_cache;
401

402
static int rbd_major;
403 404
static DEFINE_IDA(rbd_dev_id_ida);

405 406 407 408 409 410 411 412
/*
 * Default to false for now, as single-major requires >= 0.75 version of
 * userspace rbd utility.
 */
static bool single_major = false;
module_param(single_major, bool, S_IRUGO);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");

413 414
static int rbd_img_request_submit(struct rbd_img_request *img_request);

415
static void rbd_dev_device_release(struct device *dev);
416

A
Alex Elder 已提交
417 418 419 420
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
		       size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
			  size_t count);
421 422 423 424
static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
				    size_t count);
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
				       size_t count);
425
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
426
static void rbd_spec_put(struct rbd_spec *spec);
A
Alex Elder 已提交
427

428 429
static int rbd_dev_id_to_minor(int dev_id)
{
430
	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
431 432 433 434
}

static int minor_to_rbd_dev_id(int minor)
{
435
	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
436 437
}

438 439
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
440 441
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
442 443 444 445

static struct attribute *rbd_bus_attrs[] = {
	&bus_attr_add.attr,
	&bus_attr_remove.attr,
446 447
	&bus_attr_add_single_major.attr,
	&bus_attr_remove_single_major.attr,
448
	NULL,
A
Alex Elder 已提交
449
};
450 451 452 453

static umode_t rbd_bus_is_visible(struct kobject *kobj,
				  struct attribute *attr, int index)
{
454 455 456 457 458
	if (!single_major &&
	    (attr == &bus_attr_add_single_major.attr ||
	     attr == &bus_attr_remove_single_major.attr))
		return 0;

459 460 461 462 463 464 465 466
	return attr->mode;
}

static const struct attribute_group rbd_bus_group = {
	.attrs = rbd_bus_attrs,
	.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
A
Alex Elder 已提交
467 468 469

static struct bus_type rbd_bus_type = {
	.name		= "rbd",
470
	.bus_groups	= rbd_bus_groups,
A
Alex Elder 已提交
471 472 473 474 475 476 477 478 479 480 481
};

static void rbd_root_dev_release(struct device *dev)
{
}

static struct device rbd_root_dev = {
	.init_name =    "rbd",
	.release =      rbd_root_dev_release,
};

A
Alex Elder 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (!rbd_dev)
		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
	else if (rbd_dev->disk)
		printk(KERN_WARNING "%s: %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_name)
		printk(KERN_WARNING "%s: image %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_id)
		printk(KERN_WARNING "%s: id %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
	else	/* punt */
		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
			RBD_DRV_NAME, rbd_dev, &vaf);
	va_end(args);
}

A
Alex Elder 已提交
509 510 511 512 513 514 515 516 517 518 519 520
#ifdef RBD_DEBUG
#define rbd_assert(expr)						\
		if (unlikely(!(expr))) {				\
			printk(KERN_ERR "\nAssertion failure in %s() "	\
						"at line %d:\n\n"	\
					"\trbd_assert(%s);\n\n",	\
					__func__, __LINE__, #expr);	\
			BUG();						\
		}
#else /* !RBD_DEBUG */
#  define rbd_assert(expr)	((void) 0)
#endif /* !RBD_DEBUG */
521

522
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
523 524
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
525

A
Alex Elder 已提交
526
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
527
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
528
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
529
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
530 531
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id);
532 533 534 535 536
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features);
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
537

538 539
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
A
Alex Elder 已提交
540
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
541
	bool removing = false;
542

A
Alex Elder 已提交
543
	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
544 545
		return -EROFS;

546
	spin_lock_irq(&rbd_dev->lock);
547 548 549 550
	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
		removing = true;
	else
		rbd_dev->open_count++;
551
	spin_unlock_irq(&rbd_dev->lock);
552 553 554
	if (removing)
		return -ENOENT;

A
Alex Elder 已提交
555
	(void) get_device(&rbd_dev->dev);
556

557 558 559
	return 0;
}

560
static void rbd_release(struct gendisk *disk, fmode_t mode)
561 562
{
	struct rbd_device *rbd_dev = disk->private_data;
563 564
	unsigned long open_count_before;

565
	spin_lock_irq(&rbd_dev->lock);
566
	open_count_before = rbd_dev->open_count--;
567
	spin_unlock_irq(&rbd_dev->lock);
568
	rbd_assert(open_count_before > 0);
569

A
Alex Elder 已提交
570
	put_device(&rbd_dev->dev);
571 572
}

G
Guangliang Zhao 已提交
573 574
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
575
	int ret = 0;
G
Guangliang Zhao 已提交
576 577
	int val;
	bool ro;
578
	bool ro_changed = false;
G
Guangliang Zhao 已提交
579

580
	/* get_user() may sleep, so call it before taking rbd_dev->lock */
G
Guangliang Zhao 已提交
581 582 583 584 585 586 587 588
	if (get_user(val, (int __user *)(arg)))
		return -EFAULT;

	ro = val ? true : false;
	/* Snapshot doesn't allow to write*/
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
		return -EROFS;

589 590 591 592 593 594 595
	spin_lock_irq(&rbd_dev->lock);
	/* prevent others open this device */
	if (rbd_dev->open_count > 1) {
		ret = -EBUSY;
		goto out;
	}

G
Guangliang Zhao 已提交
596 597
	if (rbd_dev->mapping.read_only != ro) {
		rbd_dev->mapping.read_only = ro;
598
		ro_changed = true;
G
Guangliang Zhao 已提交
599 600
	}

601 602 603 604 605 606 607
out:
	spin_unlock_irq(&rbd_dev->lock);
	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
	if (ret == 0 && ro_changed)
		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);

	return ret;
G
Guangliang Zhao 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
}

static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
	int ret = 0;

	switch (cmd) {
	case BLKROSET:
		ret = rbd_ioctl_set_ro(rbd_dev, arg);
		break;
	default:
		ret = -ENOTTY;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
				unsigned int cmd, unsigned long arg)
{
	return rbd_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */

635 636 637
static const struct block_device_operations rbd_bd_ops = {
	.owner			= THIS_MODULE,
	.open			= rbd_open,
638
	.release		= rbd_release,
G
Guangliang Zhao 已提交
639 640 641 642
	.ioctl			= rbd_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= rbd_compat_ioctl,
#endif
643 644 645
};

/*
646
 * Initialize an rbd client instance.  Success or not, this function
647
 * consumes ceph_opts.  Caller holds client_mutex.
648
 */
649
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
650 651 652 653
{
	struct rbd_client *rbdc;
	int ret = -ENOMEM;

A
Alex Elder 已提交
654
	dout("%s:\n", __func__);
655 656 657 658 659 660 661
	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
	if (!rbdc)
		goto out_opt;

	kref_init(&rbdc->kref);
	INIT_LIST_HEAD(&rbdc->node);

A
Alex Elder 已提交
662
	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
663
	if (IS_ERR(rbdc->client))
664
		goto out_rbdc;
A
Alex Elder 已提交
665
	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
666 667 668

	ret = ceph_open_session(rbdc->client);
	if (ret < 0)
669
		goto out_client;
670

A
Alex Elder 已提交
671
	spin_lock(&rbd_client_list_lock);
672
	list_add_tail(&rbdc->node, &rbd_client_list);
A
Alex Elder 已提交
673
	spin_unlock(&rbd_client_list_lock);
674

A
Alex Elder 已提交
675
	dout("%s: rbdc %p\n", __func__, rbdc);
676

677
	return rbdc;
678
out_client:
679
	ceph_destroy_client(rbdc->client);
680
out_rbdc:
681 682
	kfree(rbdc);
out_opt:
A
Alex Elder 已提交
683 684
	if (ceph_opts)
		ceph_destroy_options(ceph_opts);
A
Alex Elder 已提交
685 686
	dout("%s: error %d\n", __func__, ret);

V
Vasiliy Kulikov 已提交
687
	return ERR_PTR(ret);
688 689
}

690 691 692 693 694 695 696
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
	kref_get(&rbdc->kref);

	return rbdc;
}

697
/*
698 699
 * Find a ceph client with specific addr and configuration.  If
 * found, bump its reference count.
700
 */
701
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
702 703
{
	struct rbd_client *client_node;
704
	bool found = false;
705

A
Alex Elder 已提交
706
	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
707 708
		return NULL;

709 710 711
	spin_lock(&rbd_client_list_lock);
	list_for_each_entry(client_node, &rbd_client_list, node) {
		if (!ceph_compare_options(ceph_opts, client_node->client)) {
712 713
			__rbd_get_client(client_node);

714 715 716 717 718 719 720
			found = true;
			break;
		}
	}
	spin_unlock(&rbd_client_list_lock);

	return found ? client_node : NULL;
721 722
}

723 724 725 726 727 728 729 730
/*
 * mount options
 */
enum {
	Opt_last_int,
	/* int args above */
	Opt_last_string,
	/* string args above */
A
Alex Elder 已提交
731 732 733 734
	Opt_read_only,
	Opt_read_write,
	/* Boolean args above */
	Opt_last_bool,
735 736
};

A
Alex Elder 已提交
737
static match_table_t rbd_opts_tokens = {
738 739
	/* int args above */
	/* string args above */
A
Alex Elder 已提交
740
	{Opt_read_only, "read_only"},
A
Alex Elder 已提交
741 742 743 744
	{Opt_read_only, "ro"},		/* Alternate spelling */
	{Opt_read_write, "read_write"},
	{Opt_read_write, "rw"},		/* Alternate spelling */
	/* Boolean args above */
745 746 747
	{-1, NULL}
};

A
Alex Elder 已提交
748 749 750 751 752 753
struct rbd_options {
	bool	read_only;
};

#define RBD_READ_ONLY_DEFAULT	false

754 755
static int parse_rbd_opts_token(char *c, void *private)
{
A
Alex Elder 已提交
756
	struct rbd_options *rbd_opts = private;
757 758 759
	substring_t argstr[MAX_OPT_ARGS];
	int token, intval, ret;

A
Alex Elder 已提交
760
	token = match_token(c, rbd_opts_tokens, argstr);
761 762 763 764 765 766 767 768 769 770 771 772 773 774
	if (token < 0)
		return -EINVAL;

	if (token < Opt_last_int) {
		ret = match_int(&argstr[0], &intval);
		if (ret < 0) {
			pr_err("bad mount option arg (not int) "
			       "at '%s'\n", c);
			return ret;
		}
		dout("got int token %d val %d\n", token, intval);
	} else if (token > Opt_last_int && token < Opt_last_string) {
		dout("got string token %d val %s\n", token,
		     argstr[0].from);
A
Alex Elder 已提交
775 776
	} else if (token > Opt_last_string && token < Opt_last_bool) {
		dout("got Boolean token %d\n", token);
777 778 779 780 781
	} else {
		dout("got token %d\n", token);
	}

	switch (token) {
A
Alex Elder 已提交
782 783 784 785 786 787
	case Opt_read_only:
		rbd_opts->read_only = true;
		break;
	case Opt_read_write:
		rbd_opts->read_only = false;
		break;
788
	default:
A
Alex Elder 已提交
789 790
		rbd_assert(false);
		break;
791 792 793 794
	}
	return 0;
}

G
Guangliang Zhao 已提交
795 796 797 798 799 800 801
static char* obj_op_name(enum obj_operation_type op_type)
{
	switch (op_type) {
	case OBJ_OP_READ:
		return "read";
	case OBJ_OP_WRITE:
		return "write";
802 803
	case OBJ_OP_DISCARD:
		return "discard";
G
Guangliang Zhao 已提交
804 805 806 807 808
	default:
		return "???";
	}
}

809 810
/*
 * Get a ceph client with specific addr and configuration, if one does
811 812
 * not exist create it.  Either way, ceph_opts is consumed by this
 * function.
813
 */
814
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
815
{
816
	struct rbd_client *rbdc;
817

818
	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
819
	rbdc = rbd_client_find(ceph_opts);
820
	if (rbdc)	/* using an existing client */
A
Alex Elder 已提交
821
		ceph_destroy_options(ceph_opts);
822
	else
823
		rbdc = rbd_client_create(ceph_opts);
824
	mutex_unlock(&client_mutex);
825

826
	return rbdc;
827 828 829 830
}

/*
 * Destroy ceph client
A
Alex Elder 已提交
831
 *
A
Alex Elder 已提交
832
 * Caller must hold rbd_client_list_lock.
833 834 835 836 837
 */
static void rbd_client_release(struct kref *kref)
{
	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);

A
Alex Elder 已提交
838
	dout("%s: rbdc %p\n", __func__, rbdc);
839
	spin_lock(&rbd_client_list_lock);
840
	list_del(&rbdc->node);
841
	spin_unlock(&rbd_client_list_lock);
842 843 844 845 846 847 848 849 850

	ceph_destroy_client(rbdc->client);
	kfree(rbdc);
}

/*
 * Drop reference to ceph client node. If it's not referenced anymore, release
 * it.
 */
851
static void rbd_put_client(struct rbd_client *rbdc)
852
{
853 854
	if (rbdc)
		kref_put(&rbdc->kref, rbd_client_release);
855 856
}

857 858 859 860 861
static bool rbd_image_format_valid(u32 image_format)
{
	return image_format == 1 || image_format == 2;
}

862 863
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
864 865 866 867 868 869 870
	size_t size;
	u32 snap_count;

	/* The header has to start with the magic rbd header text */
	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
		return false;

A
Alex Elder 已提交
871 872 873 874 875 876 877 878 879 880
	/* The bio layer requires at least sector-sized I/O */

	if (ondisk->options.order < SECTOR_SHIFT)
		return false;

	/* If we use u64 in a few spots we may be able to loosen this */

	if (ondisk->options.order > 8 * sizeof (int) - 1)
		return false;

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
	/*
	 * The size of a snapshot header has to fit in a size_t, and
	 * that limits the number of snapshots.
	 */
	snap_count = le32_to_cpu(ondisk->snap_count);
	size = SIZE_MAX - sizeof (struct ceph_snap_context);
	if (snap_count > size / sizeof (__le64))
		return false;

	/*
	 * Not only that, but the size of the entire the snapshot
	 * header must also be representable in a size_t.
	 */
	size -= snap_count * sizeof (__le64);
	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
		return false;

	return true;
899 900
}

901
/*
902 903
 * Fill an rbd image header with information from the given format 1
 * on-disk header.
904
 */
A
Alex Elder 已提交
905
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
906
				 struct rbd_image_header_ondisk *ondisk)
907
{
A
Alex Elder 已提交
908
	struct rbd_image_header *header = &rbd_dev->header;
909 910 911 912 913
	bool first_time = header->object_prefix == NULL;
	struct ceph_snap_context *snapc;
	char *object_prefix = NULL;
	char *snap_names = NULL;
	u64 *snap_sizes = NULL;
914
	u32 snap_count;
915
	size_t size;
916
	int ret = -ENOMEM;
917
	u32 i;
918

919
	/* Allocate this now to avoid having to handle failure below */
A
Alex Elder 已提交
920

921 922
	if (first_time) {
		size_t len;
923

924 925 926 927 928 929 930 931
		len = strnlen(ondisk->object_prefix,
				sizeof (ondisk->object_prefix));
		object_prefix = kmalloc(len + 1, GFP_KERNEL);
		if (!object_prefix)
			return -ENOMEM;
		memcpy(object_prefix, ondisk->object_prefix, len);
		object_prefix[len] = '\0';
	}
A
Alex Elder 已提交
932

933
	/* Allocate the snapshot context and fill it in */
A
Alex Elder 已提交
934

935 936 937 938 939
	snap_count = le32_to_cpu(ondisk->snap_count);
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
	if (!snapc)
		goto out_err;
	snapc->seq = le64_to_cpu(ondisk->snap_seq);
940
	if (snap_count) {
941
		struct rbd_image_snap_ondisk *snaps;
A
Alex Elder 已提交
942 943
		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);

944
		/* We'll keep a copy of the snapshot names... */
945

946 947 948 949
		if (snap_names_len > (u64)SIZE_MAX)
			goto out_2big;
		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
		if (!snap_names)
A
Alex Elder 已提交
950 951
			goto out_err;

952
		/* ...as well as the array of their sizes. */
953

954
		size = snap_count * sizeof (*header->snap_sizes);
955 956
		snap_sizes = kmalloc(size, GFP_KERNEL);
		if (!snap_sizes)
A
Alex Elder 已提交
957
			goto out_err;
958

A
Alex Elder 已提交
959
		/*
960 961 962
		 * Copy the names, and fill in each snapshot's id
		 * and size.
		 *
963
		 * Note that rbd_dev_v1_header_info() guarantees the
964
		 * ondisk buffer we're working with has
A
Alex Elder 已提交
965 966 967
		 * snap_names_len bytes beyond the end of the
		 * snapshot id array, this memcpy() is safe.
		 */
968 969 970 971 972 973
		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
		snaps = ondisk->snaps;
		for (i = 0; i < snap_count; i++) {
			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
		}
974
	}
A
Alex Elder 已提交
975

976
	/* We won't fail any more, fill in the header */
977

978 979 980 981 982 983 984 985 986
	if (first_time) {
		header->object_prefix = object_prefix;
		header->obj_order = ondisk->options.order;
		header->crypt_type = ondisk->options.crypt_type;
		header->comp_type = ondisk->options.comp_type;
		/* The rest aren't used for format 1 images */
		header->stripe_unit = 0;
		header->stripe_count = 0;
		header->features = 0;
987
	} else {
A
Alex Elder 已提交
988 989 990
		ceph_put_snap_context(header->snapc);
		kfree(header->snap_names);
		kfree(header->snap_sizes);
991
	}
992

993
	/* The remaining fields always get updated (when we refresh) */
994

A
Alex Elder 已提交
995
	header->image_size = le64_to_cpu(ondisk->image_size);
996 997 998
	header->snapc = snapc;
	header->snap_names = snap_names;
	header->snap_sizes = snap_sizes;
999

1000
	return 0;
1001 1002
out_2big:
	ret = -EIO;
A
Alex Elder 已提交
1003
out_err:
1004 1005 1006 1007
	kfree(snap_sizes);
	kfree(snap_names);
	ceph_put_snap_context(snapc);
	kfree(object_prefix);
1008

1009
	return ret;
1010 1011
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
	const char *snap_name;

	rbd_assert(which < rbd_dev->header.snapc->num_snaps);

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which--)
		snap_name += strlen(snap_name) + 1;

	return kstrdup(snap_name, GFP_KERNEL);
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
/*
 * Snapshot id comparison function for use with qsort()/bsearch().
 * Note that result is for snapshots in *descending* order.
 */
static int snapid_compare_reverse(const void *s1, const void *s2)
{
	u64 snap_id1 = *(u64 *)s1;
	u64 snap_id2 = *(u64 *)s2;

	if (snap_id1 < snap_id2)
		return 1;
	return snap_id1 == snap_id2 ? 0 : -1;
}

/*
 * Search a snapshot context to see if the given snapshot id is
 * present.
 *
 * Returns the position of the snapshot id in the array if it's found,
 * or BAD_SNAP_INDEX otherwise.
 *
 * Note: The snapshot array is in kept sorted (by the osd) in
 * reverse order, highest snapshot id first.
 */
1051 1052 1053
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1054
	u64 *found;
1055

1056 1057
	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
				sizeof (snap_id), snapid_compare_reverse);
1058

1059
	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1060 1061
}

1062 1063
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
1064
{
1065
	u32 which;
1066
	const char *snap_name;
1067

1068 1069
	which = rbd_dev_snap_index(rbd_dev, snap_id);
	if (which == BAD_SNAP_INDEX)
1070
		return ERR_PTR(-ENOENT);
1071

1072 1073
	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1074 1075 1076 1077
}

static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
1078 1079 1080
	if (snap_id == CEPH_NOSNAP)
		return RBD_SNAP_HEAD_NAME;

1081 1082 1083
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1084

1085
	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1086 1087
}

1088 1089
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u64 *snap_size)
1090
{
1091 1092 1093 1094 1095
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_size = rbd_dev->header.image_size;
	} else if (rbd_dev->image_format == 1) {
		u32 which;
1096

1097 1098 1099
		which = rbd_dev_snap_index(rbd_dev, snap_id);
		if (which == BAD_SNAP_INDEX)
			return -ENOENT;
1100

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
		*snap_size = rbd_dev->header.snap_sizes[which];
	} else {
		u64 size = 0;
		int ret;

		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
		if (ret)
			return ret;

		*snap_size = size;
	}
	return 0;
1113 1114
}

1115 1116
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
			u64 *snap_features)
1117
{
1118 1119 1120 1121 1122
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_features = rbd_dev->header.features;
	} else if (rbd_dev->image_format == 1) {
		*snap_features = 0;	/* No features for format 1 */
1123
	} else {
1124 1125
		u64 features = 0;
		int ret;
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
		if (ret)
			return ret;

		*snap_features = features;
	}
	return 0;
}

static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
1138
	u64 snap_id = rbd_dev->spec->snap_id;
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
	u64 size = 0;
	u64 features = 0;
	int ret;

	ret = rbd_snap_size(rbd_dev, snap_id, &size);
	if (ret)
		return ret;
	ret = rbd_snap_features(rbd_dev, snap_id, &features);
	if (ret)
		return ret;

	rbd_dev->mapping.size = size;
	rbd_dev->mapping.features = features;

1153
	return 0;
1154 1155
}

A
Alex Elder 已提交
1156 1157 1158 1159
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
	rbd_dev->mapping.size = 0;
	rbd_dev->mapping.features = 0;
1160 1161
}

1162 1163 1164 1165 1166 1167 1168
static void rbd_segment_name_free(const char *name)
{
	/* The explicit cast here is needed to drop the const qualifier */

	kmem_cache_free(rbd_segment_name_cache, (void *)name);
}

A
Alex Elder 已提交
1169
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1170
{
A
Alex Elder 已提交
1171 1172 1173
	char *name;
	u64 segment;
	int ret;
1174
	char *name_format;
1175

1176
	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
A
Alex Elder 已提交
1177 1178 1179
	if (!name)
		return NULL;
	segment = offset >> rbd_dev->header.obj_order;
1180 1181 1182
	name_format = "%s.%012llx";
	if (rbd_dev->image_format == 2)
		name_format = "%s.%016llx";
1183
	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
A
Alex Elder 已提交
1184
			rbd_dev->header.object_prefix, segment);
1185
	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
A
Alex Elder 已提交
1186 1187
		pr_err("error formatting segment name for #%llu (%d)\n",
			segment, ret);
1188
		rbd_segment_name_free(name);
A
Alex Elder 已提交
1189 1190
		name = NULL;
	}
1191

A
Alex Elder 已提交
1192 1193
	return name;
}
1194

A
Alex Elder 已提交
1195 1196 1197
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1198

A
Alex Elder 已提交
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	return offset & (segment_size - 1);
}

static u64 rbd_segment_length(struct rbd_device *rbd_dev,
				u64 offset, u64 length)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;

	offset &= segment_size - 1;

A
Alex Elder 已提交
1209
	rbd_assert(length <= U64_MAX - offset);
A
Alex Elder 已提交
1210 1211 1212 1213
	if (offset + length > segment_size)
		length = segment_size - offset;

	return length;
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223
/*
 * returns the size of an object in the image
 */
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
	return 1 << header->obj_order;
}

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
/*
 * bio helpers
 */

static void bio_chain_put(struct bio *chain)
{
	struct bio *tmp;

	while (chain) {
		tmp = chain;
		chain = chain->bi_next;
		bio_put(tmp);
	}
}

/*
 * zeros a bio chain, starting at specific offset
 */
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
1244 1245
	struct bio_vec bv;
	struct bvec_iter iter;
1246 1247 1248 1249 1250
	unsigned long flags;
	void *buf;
	int pos = 0;

	while (chain) {
1251 1252
		bio_for_each_segment(bv, chain, iter) {
			if (pos + bv.bv_len > start_ofs) {
1253
				int remainder = max(start_ofs - pos, 0);
1254
				buf = bvec_kmap_irq(&bv, &flags);
1255
				memset(buf + remainder, 0,
1256 1257
				       bv.bv_len - remainder);
				flush_dcache_page(bv.bv_page);
1258
				bvec_kunmap_irq(buf, &flags);
1259
			}
1260
			pos += bv.bv_len;
1261 1262 1263 1264 1265 1266
		}

		chain = chain->bi_next;
	}
}

A
Alex Elder 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
/*
 * similar to zero_bio_chain(), zeros data defined by a page array,
 * starting at the given byte offset from the start of the array and
 * continuing up to the given end offset.  The pages array is
 * assumed to be big enough to hold all bytes up to the end.
 */
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
	struct page **page = &pages[offset >> PAGE_SHIFT];

	rbd_assert(end > offset);
	rbd_assert(end - offset <= (u64)SIZE_MAX);
	while (offset < end) {
		size_t page_offset;
		size_t length;
		unsigned long flags;
		void *kaddr;

1285 1286
		page_offset = offset & ~PAGE_MASK;
		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
A
Alex Elder 已提交
1287 1288 1289
		local_irq_save(flags);
		kaddr = kmap_atomic(*page);
		memset(kaddr + page_offset, 0, length);
1290
		flush_dcache_page(*page);
A
Alex Elder 已提交
1291 1292 1293 1294 1295 1296 1297 1298
		kunmap_atomic(kaddr);
		local_irq_restore(flags);

		offset += length;
		page++;
	}
}

1299
/*
A
Alex Elder 已提交
1300 1301
 * Clone a portion of a bio, starting at the given byte offset
 * and continuing for the number of bytes indicated.
1302
 */
A
Alex Elder 已提交
1303 1304 1305 1306
static struct bio *bio_clone_range(struct bio *bio_src,
					unsigned int offset,
					unsigned int len,
					gfp_t gfpmask)
1307
{
A
Alex Elder 已提交
1308 1309
	struct bio *bio;

K
Kent Overstreet 已提交
1310
	bio = bio_clone(bio_src, gfpmask);
A
Alex Elder 已提交
1311 1312
	if (!bio)
		return NULL;	/* ENOMEM */
1313

K
Kent Overstreet 已提交
1314
	bio_advance(bio, offset);
1315
	bio->bi_iter.bi_size = len;
A
Alex Elder 已提交
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	return bio;
}

/*
 * Clone a portion of a bio chain, starting at the given byte offset
 * into the first bio in the source chain and continuing for the
 * number of bytes indicated.  The result is another bio chain of
 * exactly the given length, or a null pointer on error.
 *
 * The bio_src and offset parameters are both in-out.  On entry they
 * refer to the first source bio and the offset into that bio where
 * the start of data to be cloned is located.
 *
 * On return, bio_src is updated to refer to the bio in the source
 * chain that contains first un-cloned byte, and *offset will
 * contain the offset of that byte within that bio.
 */
static struct bio *bio_chain_clone_range(struct bio **bio_src,
					unsigned int *offset,
					unsigned int len,
					gfp_t gfpmask)
{
	struct bio *bi = *bio_src;
	unsigned int off = *offset;
	struct bio *chain = NULL;
	struct bio **end;

	/* Build up a chain of clone bios up to the limit */

1346
	if (!bi || off >= bi->bi_iter.bi_size || !len)
A
Alex Elder 已提交
1347
		return NULL;		/* Nothing to clone */
1348

A
Alex Elder 已提交
1349 1350 1351 1352 1353
	end = &chain;
	while (len) {
		unsigned int bi_size;
		struct bio *bio;

1354 1355
		if (!bi) {
			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
A
Alex Elder 已提交
1356
			goto out_err;	/* EINVAL; ran out of bio's */
1357
		}
1358
		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
A
Alex Elder 已提交
1359 1360 1361 1362 1363 1364
		bio = bio_clone_range(bi, off, bi_size, gfpmask);
		if (!bio)
			goto out_err;	/* ENOMEM */

		*end = bio;
		end = &bio->bi_next;
1365

A
Alex Elder 已提交
1366
		off += bi_size;
1367
		if (off == bi->bi_iter.bi_size) {
A
Alex Elder 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
			bi = bi->bi_next;
			off = 0;
		}
		len -= bi_size;
	}
	*bio_src = bi;
	*offset = off;

	return chain;
out_err:
	bio_chain_put(chain);
1379 1380 1381 1382

	return NULL;
}

1383 1384 1385 1386 1387
/*
 * The default/initial value for all object request flags is 0.  For
 * each flag, once its value is set to 1 it is never reset to 0
 * again.
 */
A
Alex Elder 已提交
1388
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1389
{
A
Alex Elder 已提交
1390
	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1391 1392
		struct rbd_device *rbd_dev;

A
Alex Elder 已提交
1393
		rbd_dev = obj_request->img_request->rbd_dev;
1394
		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1395 1396 1397 1398
			obj_request);
	}
}

A
Alex Elder 已提交
1399
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1400 1401
{
	smp_mb();
A
Alex Elder 已提交
1402
	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1403 1404
}

A
Alex Elder 已提交
1405
static void obj_request_done_set(struct rbd_obj_request *obj_request)
1406
{
A
Alex Elder 已提交
1407 1408
	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
		struct rbd_device *rbd_dev = NULL;
1409

A
Alex Elder 已提交
1410 1411
		if (obj_request_img_data_test(obj_request))
			rbd_dev = obj_request->img_request->rbd_dev;
1412
		rbd_warn(rbd_dev, "obj_request %p already marked done",
1413 1414 1415 1416
			obj_request);
	}
}

A
Alex Elder 已提交
1417
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1418 1419
{
	smp_mb();
A
Alex Elder 已提交
1420
	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1421 1422
}

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
/*
 * This sets the KNOWN flag after (possibly) setting the EXISTS
 * flag.  The latter is set based on the "exists" value provided.
 *
 * Note that for our purposes once an object exists it never goes
 * away again.  It's possible that the response from two existence
 * checks are separated by the creation of the target object, and
 * the first ("doesn't exist") response arrives *after* the second
 * ("does exist").  In that case we ignore the second one.
 */
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
				bool exists)
{
	if (exists)
		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
	smp_mb();
}

static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}

static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}

1454 1455 1456 1457 1458 1459 1460 1461
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;

	return obj_request->img_offset <
	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}

A
Alex Elder 已提交
1462 1463
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1464 1465
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1466 1467 1468 1469 1470 1471 1472
	kref_get(&obj_request->kref);
}

static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request != NULL);
A
Alex Elder 已提交
1473 1474
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1475 1476 1477
	kref_put(&obj_request->kref, rbd_obj_request_destroy);
}

1478 1479 1480 1481 1482 1483 1484
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
	dout("%s: img %p (was %d)\n", __func__, img_request,
	     atomic_read(&img_request->kref.refcount));
	kref_get(&img_request->kref);
}

1485 1486
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
A
Alex Elder 已提交
1487 1488 1489 1490
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
	rbd_assert(img_request != NULL);
A
Alex Elder 已提交
1491 1492
	dout("%s: img %p (was %d)\n", __func__, img_request,
		atomic_read(&img_request->kref.refcount));
1493 1494 1495 1496
	if (img_request_child_test(img_request))
		kref_put(&img_request->kref, rbd_parent_request_destroy);
	else
		kref_put(&img_request->kref, rbd_img_request_destroy);
A
Alex Elder 已提交
1497 1498 1499 1500 1501
}

static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
1502 1503
	rbd_assert(obj_request->img_request == NULL);

1504
	/* Image request now owns object's original reference */
A
Alex Elder 已提交
1505
	obj_request->img_request = img_request;
1506
	obj_request->which = img_request->obj_request_count;
1507 1508
	rbd_assert(!obj_request_img_data_test(obj_request));
	obj_request_img_data_set(obj_request);
A
Alex Elder 已提交
1509
	rbd_assert(obj_request->which != BAD_WHICH);
1510 1511
	img_request->obj_request_count++;
	list_add_tail(&obj_request->links, &img_request->obj_requests);
A
Alex Elder 已提交
1512 1513
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1514 1515 1516 1517 1518 1519
}

static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request->which != BAD_WHICH);
1520

A
Alex Elder 已提交
1521 1522
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1523
	list_del(&obj_request->links);
1524 1525 1526 1527
	rbd_assert(img_request->obj_request_count > 0);
	img_request->obj_request_count--;
	rbd_assert(obj_request->which == img_request->obj_request_count);
	obj_request->which = BAD_WHICH;
1528
	rbd_assert(obj_request_img_data_test(obj_request));
A
Alex Elder 已提交
1529 1530
	rbd_assert(obj_request->img_request == img_request);
	obj_request->img_request = NULL;
1531
	obj_request->callback = NULL;
A
Alex Elder 已提交
1532 1533 1534 1535 1536 1537
	rbd_obj_request_put(obj_request);
}

static bool obj_request_type_valid(enum obj_request_type type)
{
	switch (type) {
1538
	case OBJ_REQUEST_NODATA:
A
Alex Elder 已提交
1539
	case OBJ_REQUEST_BIO:
1540
	case OBJ_REQUEST_PAGES:
A
Alex Elder 已提交
1541 1542 1543 1544 1545 1546 1547 1548 1549
		return true;
	default:
		return false;
	}
}

static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
				struct rbd_obj_request *obj_request)
{
1550
	dout("%s %p\n", __func__, obj_request);
A
Alex Elder 已提交
1551 1552 1553
	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
{
	dout("%s %p\n", __func__, obj_request);
	ceph_osdc_cancel_request(obj_request->osd_req);
}

/*
 * Wait for an object request to complete.  If interrupted, cancel the
 * underlying osd request.
 */
static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
	int ret;

	dout("%s %p\n", __func__, obj_request);

	ret = wait_for_completion_interruptible(&obj_request->completion);
	if (ret < 0) {
		dout("%s %p interrupted\n", __func__, obj_request);
		rbd_obj_request_end(obj_request);
		return ret;
	}

	dout("%s %p done\n", __func__, obj_request);
	return 0;
}

A
Alex Elder 已提交
1581 1582
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
1583

A
Alex Elder 已提交
1584
	dout("%s: img %p\n", __func__, img_request);
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600

	/*
	 * If no error occurred, compute the aggregate transfer
	 * count for the image request.  We could instead use
	 * atomic64_cmpxchg() to update it as each object request
	 * completes; not clear which way is better off hand.
	 */
	if (!img_request->result) {
		struct rbd_obj_request *obj_request;
		u64 xferred = 0;

		for_each_obj_request(img_request, obj_request)
			xferred += obj_request->xferred;
		img_request->xferred = xferred;
	}

A
Alex Elder 已提交
1601 1602 1603 1604 1605 1606
	if (img_request->callback)
		img_request->callback(img_request);
	else
		rbd_img_request_put(img_request);
}

A
Alex Elder 已提交
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
/*
 * The default/initial value for all image request flags is 0.  Each
 * is conditionally set to 1 at image request initialization time
 * and currently never change thereafter.
 */
static void img_request_write_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_WRITE, &img_request->flags);
	smp_mb();
}

static bool img_request_write_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
/*
 * Set the discard flag when the img_request is an discard request
 */
static void img_request_discard_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_DISCARD, &img_request->flags);
	smp_mb();
}

static bool img_request_discard_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
}

1639 1640 1641 1642 1643 1644
static void img_request_child_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1645 1646 1647 1648 1649 1650
static void img_request_child_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1651 1652 1653 1654 1655 1656
static bool img_request_child_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}

1657 1658 1659 1660 1661 1662
static void img_request_layered_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1663 1664 1665 1666 1667 1668
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1669 1670 1671 1672 1673 1674
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}

1675 1676 1677
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1678 1679 1680
	u64 xferred = obj_request->xferred;
	u64 length = obj_request->length;

1681 1682
	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, obj_request->img_request, obj_request->result,
A
Alex Elder 已提交
1683
		xferred, length);
1684
	/*
1685 1686 1687 1688 1689 1690
	 * ENOENT means a hole in the image.  We zero-fill the entire
	 * length of the request.  A short read also implies zero-fill
	 * to the end of the request.  An error requires the whole
	 * length of the request to be reported finished with an error
	 * to the block layer.  In each case we update the xferred
	 * count to indicate the whole request was satisfied.
1691
	 */
A
Alex Elder 已提交
1692
	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1693
	if (obj_request->result == -ENOENT) {
A
Alex Elder 已提交
1694 1695 1696 1697
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, 0);
		else
			zero_pages(obj_request->pages, 0, length);
1698
		obj_request->result = 0;
A
Alex Elder 已提交
1699 1700 1701 1702 1703
	} else if (xferred < length && !obj_request->result) {
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, xferred);
		else
			zero_pages(obj_request->pages, xferred, length);
1704
	}
1705
	obj_request->xferred = length;
1706 1707 1708
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1709 1710
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1711 1712
	dout("%s: obj %p cb %p\n", __func__, obj_request,
		obj_request->callback);
A
Alex Elder 已提交
1713 1714
	if (obj_request->callback)
		obj_request->callback(obj_request);
1715 1716
	else
		complete_all(&obj_request->completion);
A
Alex Elder 已提交
1717 1718
}

1719
static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1720 1721 1722 1723 1724
{
	dout("%s: obj %p\n", __func__, obj_request);
	obj_request_done_set(obj_request);
}

1725
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1726
{
A
Alex Elder 已提交
1727
	struct rbd_img_request *img_request = NULL;
A
Alex Elder 已提交
1728
	struct rbd_device *rbd_dev = NULL;
A
Alex Elder 已提交
1729 1730 1731 1732 1733
	bool layered = false;

	if (obj_request_img_data_test(obj_request)) {
		img_request = obj_request->img_request;
		layered = img_request && img_request_layered_test(img_request);
A
Alex Elder 已提交
1734
		rbd_dev = img_request->rbd_dev;
A
Alex Elder 已提交
1735
	}
A
Alex Elder 已提交
1736 1737 1738 1739

	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, img_request, obj_request->result,
		obj_request->xferred, obj_request->length);
A
Alex Elder 已提交
1740 1741
	if (layered && obj_request->result == -ENOENT &&
			obj_request->img_offset < rbd_dev->parent_overlap)
A
Alex Elder 已提交
1742 1743
		rbd_img_parent_read(obj_request);
	else if (img_request)
1744 1745 1746
		rbd_img_obj_request_read_callback(obj_request);
	else
		obj_request_done_set(obj_request);
A
Alex Elder 已提交
1747 1748
}

1749
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1750
{
1751 1752 1753
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
A
Alex Elder 已提交
1754 1755
	 * There is no such thing as a successful short write.  Set
	 * it to our originally-requested length.
1756 1757
	 */
	obj_request->xferred = obj_request->length;
1758
	obj_request_done_set(obj_request);
A
Alex Elder 已提交
1759 1760
}

1761 1762 1763 1764 1765 1766 1767 1768 1769
static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
	 * There is no such thing as a successful short discard.  Set
	 * it to our originally-requested length.
	 */
	obj_request->xferred = obj_request->length;
1770 1771 1772
	/* discarding a non-existent object is not a problem */
	if (obj_request->result == -ENOENT)
		obj_request->result = 0;
1773 1774 1775
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1776 1777 1778 1779
/*
 * For a simple stat call there's nothing to do.  We'll do more if
 * this is part of a write sequence for a layered image.
 */
1780
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1781
{
A
Alex Elder 已提交
1782
	dout("%s: obj %p\n", __func__, obj_request);
A
Alex Elder 已提交
1783 1784 1785
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1786 1787 1788 1789 1790 1791
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
				struct ceph_msg *msg)
{
	struct rbd_obj_request *obj_request = osd_req->r_priv;
	u16 opcode;

A
Alex Elder 已提交
1792
	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
A
Alex Elder 已提交
1793
	rbd_assert(osd_req == obj_request->osd_req);
A
Alex Elder 已提交
1794 1795 1796 1797 1798 1799
	if (obj_request_img_data_test(obj_request)) {
		rbd_assert(obj_request->img_request);
		rbd_assert(obj_request->which != BAD_WHICH);
	} else {
		rbd_assert(obj_request->which == BAD_WHICH);
	}
A
Alex Elder 已提交
1800

1801 1802
	if (osd_req->r_result < 0)
		obj_request->result = osd_req->r_result;
A
Alex Elder 已提交
1803

1804
	rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
A
Alex Elder 已提交
1805

1806 1807 1808 1809
	/*
	 * We support a 64-bit length, but ultimately it has to be
	 * passed to blk_end_request(), which takes an unsigned int.
	 */
1810
	obj_request->xferred = osd_req->r_reply_op_len[0];
A
Alex Elder 已提交
1811
	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1812

1813
	opcode = osd_req->r_ops[0].op;
A
Alex Elder 已提交
1814 1815
	switch (opcode) {
	case CEPH_OSD_OP_READ:
1816
		rbd_osd_read_callback(obj_request);
A
Alex Elder 已提交
1817
		break;
1818 1819 1820
	case CEPH_OSD_OP_SETALLOCHINT:
		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
		/* fall through */
A
Alex Elder 已提交
1821
	case CEPH_OSD_OP_WRITE:
1822
		rbd_osd_write_callback(obj_request);
A
Alex Elder 已提交
1823
		break;
A
Alex Elder 已提交
1824
	case CEPH_OSD_OP_STAT:
1825
		rbd_osd_stat_callback(obj_request);
A
Alex Elder 已提交
1826
		break;
1827 1828 1829 1830 1831
	case CEPH_OSD_OP_DELETE:
	case CEPH_OSD_OP_TRUNCATE:
	case CEPH_OSD_OP_ZERO:
		rbd_osd_discard_callback(obj_request);
		break;
1832
	case CEPH_OSD_OP_CALL:
A
Alex Elder 已提交
1833
	case CEPH_OSD_OP_NOTIFY_ACK:
1834
	case CEPH_OSD_OP_WATCH:
1835
		rbd_osd_trivial_callback(obj_request);
1836
		break;
A
Alex Elder 已提交
1837
	default:
1838
		rbd_warn(NULL, "%s: unsupported op %hu",
A
Alex Elder 已提交
1839 1840 1841 1842
			obj_request->object_name, (unsigned short) opcode);
		break;
	}

1843
	if (obj_request_done_test(obj_request))
A
Alex Elder 已提交
1844 1845 1846
		rbd_obj_request_complete(obj_request);
}

1847
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1848 1849
{
	struct rbd_img_request *img_request = obj_request->img_request;
1850
	struct ceph_osd_request *osd_req = obj_request->osd_req;
1851
	u64 snap_id;
A
Alex Elder 已提交
1852

1853
	rbd_assert(osd_req != NULL);
A
Alex Elder 已提交
1854

1855
	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1856
	ceph_osdc_build_request(osd_req, obj_request->offset,
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
			NULL, snap_id, NULL);
}

static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct ceph_osd_request *osd_req = obj_request->osd_req;
	struct ceph_snap_context *snapc;
	struct timespec mtime = CURRENT_TIME;

	rbd_assert(osd_req != NULL);

	snapc = img_request ? img_request->snapc : NULL;
	ceph_osdc_build_request(osd_req, obj_request->offset,
			snapc, CEPH_NOSNAP, &mtime);
A
Alex Elder 已提交
1872 1873
}

1874 1875 1876 1877 1878 1879
/*
 * Create an osd request.  A read request has one osd op (read).
 * A write request has either one (watch) or two (hint+write) osd ops.
 * (All rbd data writes are prefixed with an allocation hint op, but
 * technically osd watch is a write request, hence this distinction.)
 */
A
Alex Elder 已提交
1880 1881
static struct ceph_osd_request *rbd_osd_req_create(
					struct rbd_device *rbd_dev,
G
Guangliang Zhao 已提交
1882
					enum obj_operation_type op_type,
1883
					unsigned int num_ops,
A
Alex Elder 已提交
1884
					struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1885 1886 1887 1888 1889
{
	struct ceph_snap_context *snapc = NULL;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

1890 1891
	if (obj_request_img_data_test(obj_request) &&
		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1892
		struct rbd_img_request *img_request = obj_request->img_request;
1893 1894 1895 1896 1897
		if (op_type == OBJ_OP_WRITE) {
			rbd_assert(img_request_write_test(img_request));
		} else {
			rbd_assert(img_request_discard_test(img_request));
		}
G
Guangliang Zhao 已提交
1898
		snapc = img_request->snapc;
A
Alex Elder 已提交
1899 1900
	}

G
Guangliang Zhao 已提交
1901
	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1902 1903

	/* Allocate and initialize the request, for the num_ops ops */
A
Alex Elder 已提交
1904 1905

	osdc = &rbd_dev->rbd_client->client->osdc;
1906 1907
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
					  GFP_ATOMIC);
A
Alex Elder 已提交
1908 1909 1910
	if (!osd_req)
		return NULL;	/* ENOMEM */

1911
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
A
Alex Elder 已提交
1912
		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
A
Alex Elder 已提交
1913
	else
A
Alex Elder 已提交
1914 1915 1916 1917 1918
		osd_req->r_flags = CEPH_OSD_FLAG_READ;

	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1919 1920
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
A
Alex Elder 已提交
1921 1922 1923 1924

	return osd_req;
}

1925 1926
/*
 * Create a copyup osd request based on the information in the
1927 1928
 * object request supplied.  A copyup request has three osd ops,
 * a copyup method call, a hint op, and a write op.
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
 */
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);
	rbd_assert(img_request_write_test(img_request));

1944
	/* Allocate and initialize the request, for the three ops */
1945 1946 1947 1948

	snapc = img_request->snapc;
	rbd_dev = img_request->rbd_dev;
	osdc = &rbd_dev->rbd_client->client->osdc;
1949
	osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1950 1951 1952 1953 1954 1955 1956
	if (!osd_req)
		return NULL;	/* ENOMEM */

	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1957 1958
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1959 1960 1961 1962 1963

	return osd_req;
}


A
Alex Elder 已提交
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
	ceph_osdc_put_request(osd_req);
}

/* object_name is assumed to be a non-null pointer and NUL-terminated */

static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
						u64 offset, u64 length,
						enum obj_request_type type)
{
	struct rbd_obj_request *obj_request;
	size_t size;
	char *name;

	rbd_assert(obj_request_type_valid(type));

	size = strlen(object_name) + 1;
1982 1983
	name = kmalloc(size, GFP_KERNEL);
	if (!name)
A
Alex Elder 已提交
1984 1985
		return NULL;

1986
	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1987 1988 1989 1990 1991
	if (!obj_request) {
		kfree(name);
		return NULL;
	}

A
Alex Elder 已提交
1992 1993 1994
	obj_request->object_name = memcpy(name, object_name, size);
	obj_request->offset = offset;
	obj_request->length = length;
1995
	obj_request->flags = 0;
A
Alex Elder 已提交
1996 1997 1998
	obj_request->which = BAD_WHICH;
	obj_request->type = type;
	INIT_LIST_HEAD(&obj_request->links);
1999
	init_completion(&obj_request->completion);
A
Alex Elder 已提交
2000 2001
	kref_init(&obj_request->kref);

A
Alex Elder 已提交
2002 2003 2004
	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
		offset, length, (int)type, obj_request);

A
Alex Elder 已提交
2005 2006 2007 2008 2009 2010 2011 2012 2013
	return obj_request;
}

static void rbd_obj_request_destroy(struct kref *kref)
{
	struct rbd_obj_request *obj_request;

	obj_request = container_of(kref, struct rbd_obj_request, kref);

A
Alex Elder 已提交
2014 2015
	dout("%s: obj %p\n", __func__, obj_request);

A
Alex Elder 已提交
2016 2017 2018 2019 2020 2021 2022 2023
	rbd_assert(obj_request->img_request == NULL);
	rbd_assert(obj_request->which == BAD_WHICH);

	if (obj_request->osd_req)
		rbd_osd_req_destroy(obj_request->osd_req);

	rbd_assert(obj_request_type_valid(obj_request->type));
	switch (obj_request->type) {
2024 2025
	case OBJ_REQUEST_NODATA:
		break;		/* Nothing to do */
A
Alex Elder 已提交
2026 2027 2028 2029
	case OBJ_REQUEST_BIO:
		if (obj_request->bio_list)
			bio_chain_put(obj_request->bio_list);
		break;
2030 2031 2032 2033 2034
	case OBJ_REQUEST_PAGES:
		if (obj_request->pages)
			ceph_release_page_vector(obj_request->pages,
						obj_request->page_count);
		break;
A
Alex Elder 已提交
2035 2036
	}

2037
	kfree(obj_request->object_name);
2038 2039
	obj_request->object_name = NULL;
	kmem_cache_free(rbd_obj_request_cache, obj_request);
A
Alex Elder 已提交
2040 2041
}

A
Alex Elder 已提交
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
/* It's OK to call this for a device with no parent */

static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
	rbd_dev_remove_parent(rbd_dev);
	rbd_spec_put(rbd_dev->parent_spec);
	rbd_dev->parent_spec = NULL;
	rbd_dev->parent_overlap = 0;
}

2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
/*
 * Parent image reference counting is used to determine when an
 * image's parent fields can be safely torn down--after there are no
 * more in-flight requests to the parent image.  When the last
 * reference is dropped, cleaning them up is safe.
 */
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return;

	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
	if (counter > 0)
		return;

	/* Last reference; clean up parent data structures */

	if (!counter)
		rbd_dev_unparent(rbd_dev);
	else
2075
		rbd_warn(rbd_dev, "parent reference underflow");
2076 2077 2078 2079 2080 2081
}

/*
 * If an image has a non-zero parent overlap, get a reference to its
 * parent.
 *
2082 2083 2084 2085 2086
 * We must get the reference before checking for the overlap to
 * coordinate properly with zeroing the parent overlap in
 * rbd_dev_v2_parent_info() when an image gets flattened.  We
 * drop it again if there is no overlap.
 *
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
 * Returns true if the rbd device has a parent with a non-zero
 * overlap and a reference for it was successfully taken, or
 * false otherwise.
 */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return false;

	counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
	if (counter > 0 && rbd_dev->parent_overlap)
		return true;

	/* Image was flattened, but parent is not yet torn down */

	if (counter < 0)
2105
		rbd_warn(rbd_dev, "parent reference overflow");
2106 2107 2108 2109

	return false;
}

A
Alex Elder 已提交
2110 2111 2112 2113 2114
/*
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 */
A
Alex Elder 已提交
2115 2116
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
A
Alex Elder 已提交
2117
					u64 offset, u64 length,
G
Guangliang Zhao 已提交
2118
					enum obj_operation_type op_type,
2119
					struct ceph_snap_context *snapc)
A
Alex Elder 已提交
2120 2121 2122
{
	struct rbd_img_request *img_request;

2123
	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
A
Alex Elder 已提交
2124 2125 2126 2127 2128 2129 2130
	if (!img_request)
		return NULL;

	img_request->rq = NULL;
	img_request->rbd_dev = rbd_dev;
	img_request->offset = offset;
	img_request->length = length;
A
Alex Elder 已提交
2131
	img_request->flags = 0;
2132 2133 2134 2135
	if (op_type == OBJ_OP_DISCARD) {
		img_request_discard_set(img_request);
		img_request->snapc = snapc;
	} else if (op_type == OBJ_OP_WRITE) {
A
Alex Elder 已提交
2136
		img_request_write_set(img_request);
2137
		img_request->snapc = snapc;
A
Alex Elder 已提交
2138
	} else {
A
Alex Elder 已提交
2139
		img_request->snap_id = rbd_dev->spec->snap_id;
A
Alex Elder 已提交
2140
	}
2141
	if (rbd_dev_parent_get(rbd_dev))
2142
		img_request_layered_set(img_request);
A
Alex Elder 已提交
2143 2144 2145
	spin_lock_init(&img_request->completion_lock);
	img_request->next_completion = 0;
	img_request->callback = NULL;
2146
	img_request->result = 0;
A
Alex Elder 已提交
2147 2148 2149 2150
	img_request->obj_request_count = 0;
	INIT_LIST_HEAD(&img_request->obj_requests);
	kref_init(&img_request->kref);

A
Alex Elder 已提交
2151
	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
G
Guangliang Zhao 已提交
2152
		obj_op_name(op_type), offset, length, img_request);
A
Alex Elder 已提交
2153

A
Alex Elder 已提交
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	return img_request;
}

static void rbd_img_request_destroy(struct kref *kref)
{
	struct rbd_img_request *img_request;
	struct rbd_obj_request *obj_request;
	struct rbd_obj_request *next_obj_request;

	img_request = container_of(kref, struct rbd_img_request, kref);

A
Alex Elder 已提交
2165 2166
	dout("%s: img %p\n", __func__, img_request);

A
Alex Elder 已提交
2167 2168
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
		rbd_img_obj_request_del(img_request, obj_request);
2169
	rbd_assert(img_request->obj_request_count == 0);
A
Alex Elder 已提交
2170

2171 2172 2173 2174 2175
	if (img_request_layered_test(img_request)) {
		img_request_layered_clear(img_request);
		rbd_dev_parent_put(img_request->rbd_dev);
	}

2176 2177
	if (img_request_write_test(img_request) ||
		img_request_discard_test(img_request))
2178
		ceph_put_snap_context(img_request->snapc);
A
Alex Elder 已提交
2179

2180
	kmem_cache_free(rbd_img_request_cache, img_request);
A
Alex Elder 已提交
2181 2182
}

2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
static struct rbd_img_request *rbd_parent_request_create(
					struct rbd_obj_request *obj_request,
					u64 img_offset, u64 length)
{
	struct rbd_img_request *parent_request;
	struct rbd_device *rbd_dev;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;

2193
	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
G
Guangliang Zhao 已提交
2194
						length, OBJ_OP_READ, NULL);
2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	if (!parent_request)
		return NULL;

	img_request_child_set(parent_request);
	rbd_obj_request_get(obj_request);
	parent_request->obj_request = obj_request;

	return parent_request;
}

static void rbd_parent_request_destroy(struct kref *kref)
{
	struct rbd_img_request *parent_request;
	struct rbd_obj_request *orig_request;

	parent_request = container_of(kref, struct rbd_img_request, kref);
	orig_request = parent_request->obj_request;

	parent_request->obj_request = NULL;
	rbd_obj_request_put(orig_request);
	img_request_child_clear(parent_request);

	rbd_img_request_destroy(kref);
}

2220 2221
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
2222
	struct rbd_img_request *img_request;
2223 2224
	unsigned int xferred;
	int result;
A
Alex Elder 已提交
2225
	bool more;
2226

2227 2228 2229
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;

2230 2231 2232 2233 2234
	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
	xferred = (unsigned int)obj_request->xferred;
	result = obj_request->result;
	if (result) {
		struct rbd_device *rbd_dev = img_request->rbd_dev;
G
Guangliang Zhao 已提交
2235 2236
		enum obj_operation_type op_type;

2237 2238 2239 2240 2241 2242
		if (img_request_discard_test(img_request))
			op_type = OBJ_OP_DISCARD;
		else if (img_request_write_test(img_request))
			op_type = OBJ_OP_WRITE;
		else
			op_type = OBJ_OP_READ;
2243

2244
		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
G
Guangliang Zhao 已提交
2245 2246
			obj_op_name(op_type), obj_request->length,
			obj_request->img_offset, obj_request->offset);
2247
		rbd_warn(rbd_dev, "  result %d xferred %x",
2248 2249 2250 2251 2252
			result, xferred);
		if (!img_request->result)
			img_request->result = result;
	}

2253 2254 2255 2256 2257 2258 2259
	/* Image object requests don't own their page array */

	if (obj_request->type == OBJ_REQUEST_PAGES) {
		obj_request->pages = NULL;
		obj_request->page_count = 0;
	}

A
Alex Elder 已提交
2260 2261 2262 2263 2264 2265 2266 2267 2268
	if (img_request_child_test(img_request)) {
		rbd_assert(img_request->obj_request != NULL);
		more = obj_request->which < img_request->obj_request_count - 1;
	} else {
		rbd_assert(img_request->rq != NULL);
		more = blk_end_request(img_request->rq, result, xferred);
	}

	return more;
2269 2270
}

2271 2272 2273 2274 2275 2276
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	u32 which = obj_request->which;
	bool more = true;

2277
	rbd_assert(obj_request_img_data_test(obj_request));
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	img_request = obj_request->img_request;

	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
	rbd_assert(img_request != NULL);
	rbd_assert(img_request->obj_request_count > 0);
	rbd_assert(which != BAD_WHICH);
	rbd_assert(which < img_request->obj_request_count);

	spin_lock_irq(&img_request->completion_lock);
	if (which != img_request->next_completion)
		goto out;

	for_each_obj_request_from(img_request, obj_request) {
		rbd_assert(more);
		rbd_assert(which < img_request->obj_request_count);

		if (!obj_request_done_test(obj_request))
			break;
2296
		more = rbd_img_obj_end_request(obj_request);
2297 2298 2299 2300 2301 2302 2303
		which++;
	}

	rbd_assert(more ^ (which == img_request->obj_request_count));
	img_request->next_completion = which;
out:
	spin_unlock_irq(&img_request->completion_lock);
2304
	rbd_img_request_put(img_request);
2305 2306 2307 2308 2309

	if (!more)
		rbd_img_request_complete(img_request);
}

2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
/*
 * Split up an image request into one or more object requests, each
 * to a different object.  The "type" parameter indicates whether
 * "data_desc" is the pointer to the head of a list of bio
 * structures, or the base of a page array.  In either case this
 * function assumes data_desc describes memory sufficient to hold
 * all data described by the image request.
 */
static int rbd_img_request_fill(struct rbd_img_request *img_request,
					enum obj_request_type type,
					void *data_desc)
A
Alex Elder 已提交
2321 2322 2323 2324
{
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	struct rbd_obj_request *obj_request = NULL;
	struct rbd_obj_request *next_obj_request;
J
Jingoo Han 已提交
2325
	struct bio *bio_list = NULL;
2326
	unsigned int bio_offset = 0;
J
Jingoo Han 已提交
2327
	struct page **pages = NULL;
G
Guangliang Zhao 已提交
2328
	enum obj_operation_type op_type;
2329
	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2330
	u64 img_offset;
2331
	u64 img_end;
A
Alex Elder 已提交
2332 2333 2334
	u64 resid;
	u16 opcode;

2335 2336
	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
		(int)type, data_desc);
A
Alex Elder 已提交
2337

2338
	img_offset = img_request->offset;
A
Alex Elder 已提交
2339
	resid = img_request->length;
A
Alex Elder 已提交
2340
	rbd_assert(resid > 0);
2341 2342 2343

	if (type == OBJ_REQUEST_BIO) {
		bio_list = data_desc;
2344 2345
		rbd_assert(img_offset ==
			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2346
	} else if (type == OBJ_REQUEST_PAGES) {
2347 2348 2349
		pages = data_desc;
	}

A
Alex Elder 已提交
2350
	while (resid) {
2351
		struct ceph_osd_request *osd_req;
A
Alex Elder 已提交
2352 2353 2354
		const char *object_name;
		u64 offset;
		u64 length;
2355
		unsigned int which = 0;
A
Alex Elder 已提交
2356

2357
		object_name = rbd_segment_name(rbd_dev, img_offset);
A
Alex Elder 已提交
2358 2359
		if (!object_name)
			goto out_unwind;
2360 2361
		offset = rbd_segment_offset(rbd_dev, img_offset);
		length = rbd_segment_length(rbd_dev, img_offset, resid);
A
Alex Elder 已提交
2362
		obj_request = rbd_obj_request_create(object_name,
2363
						offset, length, type);
2364 2365
		/* object request has its own copy of the object name */
		rbd_segment_name_free(object_name);
A
Alex Elder 已提交
2366 2367
		if (!obj_request)
			goto out_unwind;
2368

2369 2370 2371 2372 2373
		/*
		 * set obj_request->img_request before creating the
		 * osd_request so that it gets the right snapc
		 */
		rbd_img_obj_request_add(img_request, obj_request);
A
Alex Elder 已提交
2374

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
		if (type == OBJ_REQUEST_BIO) {
			unsigned int clone_size;

			rbd_assert(length <= (u64)UINT_MAX);
			clone_size = (unsigned int)length;
			obj_request->bio_list =
					bio_chain_clone_range(&bio_list,
								&bio_offset,
								clone_size,
								GFP_ATOMIC);
			if (!obj_request->bio_list)
2386
				goto out_unwind;
2387
		} else if (type == OBJ_REQUEST_PAGES) {
2388 2389 2390 2391 2392 2393 2394 2395 2396
			unsigned int page_count;

			obj_request->pages = pages;
			page_count = (u32)calc_pages_for(offset, length);
			obj_request->page_count = page_count;
			if ((offset + length) & ~PAGE_MASK)
				page_count--;	/* more on last page */
			pages += page_count;
		}
A
Alex Elder 已提交
2397

2398 2399 2400 2401 2402
		if (img_request_discard_test(img_request)) {
			op_type = OBJ_OP_DISCARD;
			if (!offset && (length == object_size)
				&& (!img_request_layered_test(img_request) ||
					(rbd_dev->parent_overlap <=
2403
						obj_request->img_offset))) {
2404
				opcode = CEPH_OSD_OP_DELETE;
2405
			} else if ((offset + length == object_size)) {
2406
				opcode = CEPH_OSD_OP_TRUNCATE;
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
			} else {
				down_read(&rbd_dev->header_rwsem);
				img_end = rbd_dev->header.image_size;
				up_read(&rbd_dev->header_rwsem);

				if (obj_request->img_offset + length == img_end)
					opcode = CEPH_OSD_OP_TRUNCATE;
				else
					opcode = CEPH_OSD_OP_ZERO;
			}
2417
		} else if (img_request_write_test(img_request)) {
G
Guangliang Zhao 已提交
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
			op_type = OBJ_OP_WRITE;
			opcode = CEPH_OSD_OP_WRITE;
		} else {
			op_type = OBJ_OP_READ;
			opcode = CEPH_OSD_OP_READ;
		}

		osd_req = rbd_osd_req_create(rbd_dev, op_type,
					(op_type == OBJ_OP_WRITE) ? 2 : 1,
					obj_request);
2428
		if (!osd_req)
2429
			goto out_unwind;
2430
		obj_request->osd_req = osd_req;
2431
		obj_request->callback = rbd_img_obj_callback;
2432
		rbd_img_request_get(img_request);
A
Alex Elder 已提交
2433

G
Guangliang Zhao 已提交
2434
		if (op_type == OBJ_OP_WRITE) {
2435 2436 2437 2438 2439 2440 2441 2442
			osd_req_op_alloc_hint_init(osd_req, which,
					     rbd_obj_bytes(&rbd_dev->header),
					     rbd_obj_bytes(&rbd_dev->header));
			which++;
		}

		osd_req_op_extent_init(osd_req, which, opcode, offset, length,
				       0, 0);
2443
		if (type == OBJ_REQUEST_BIO)
2444
			osd_req_op_extent_osd_data_bio(osd_req, which,
2445
					obj_request->bio_list, length);
2446
		else if (type == OBJ_REQUEST_PAGES)
2447
			osd_req_op_extent_osd_data_pages(osd_req, which,
2448 2449
					obj_request->pages, length,
					offset & ~PAGE_MASK, false, false);
2450

2451 2452
		/* Discards are also writes */
		if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2453 2454 2455
			rbd_osd_req_format_write(obj_request);
		else
			rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
2456

2457
		obj_request->img_offset = img_offset;
A
Alex Elder 已提交
2458

2459
		img_offset += length;
A
Alex Elder 已提交
2460 2461 2462 2463 2464 2465 2466
		resid -= length;
	}

	return 0;

out_unwind:
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2467
		rbd_img_obj_request_del(img_request, obj_request);
A
Alex Elder 已提交
2468 2469 2470 2471

	return -ENOMEM;
}

2472 2473 2474 2475 2476
static void
rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct rbd_device *rbd_dev;
2477
	struct page **pages;
2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	u32 page_count;

	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);

	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev);

2488 2489
	pages = obj_request->copyup_pages;
	rbd_assert(pages != NULL);
2490
	obj_request->copyup_pages = NULL;
2491 2492 2493 2494
	page_count = obj_request->copyup_page_count;
	rbd_assert(page_count);
	obj_request->copyup_page_count = 0;
	ceph_release_page_vector(pages, page_count);
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509

	/*
	 * We want the transfer count to reflect the size of the
	 * original write request.  There is no such thing as a
	 * successful short write, so if the request was successful
	 * we can just set it to the originally-requested length.
	 */
	if (!obj_request->result)
		obj_request->xferred = obj_request->length;

	/* Finish up with the normal image object callback */

	rbd_img_obj_callback(obj_request);
}

2510 2511 2512 2513
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *orig_request;
2514 2515 2516
	struct ceph_osd_request *osd_req;
	struct ceph_osd_client *osdc;
	struct rbd_device *rbd_dev;
2517
	struct page **pages;
2518
	u32 page_count;
2519
	int img_result;
2520
	u64 parent_length;
2521 2522
	u64 offset;
	u64 length;
2523 2524 2525 2526 2527 2528 2529 2530

	rbd_assert(img_request_child_test(img_request));

	/* First get what we need from the image request */

	pages = img_request->copyup_pages;
	rbd_assert(pages != NULL);
	img_request->copyup_pages = NULL;
2531 2532 2533
	page_count = img_request->copyup_page_count;
	rbd_assert(page_count);
	img_request->copyup_page_count = 0;
2534 2535 2536

	orig_request = img_request->obj_request;
	rbd_assert(orig_request != NULL);
2537
	rbd_assert(obj_request_type_valid(orig_request->type));
2538
	img_result = img_request->result;
2539 2540
	parent_length = img_request->length;
	rbd_assert(parent_length == img_request->xferred);
2541
	rbd_img_request_put(img_request);
2542

2543 2544
	rbd_assert(orig_request->img_request);
	rbd_dev = orig_request->img_request->rbd_dev;
2545 2546
	rbd_assert(rbd_dev);

2547 2548 2549 2550 2551 2552 2553
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;
2554

2555 2556 2557 2558 2559 2560
		ceph_release_page_vector(pages, page_count);
		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, orig_request);
		if (!img_result)
			return;
	}
2561

2562
	if (img_result)
2563 2564
		goto out_err;

2565 2566
	/*
	 * The original osd request is of no use to use any more.
2567
	 * We need a new one that can hold the three ops in a copyup
2568 2569 2570
	 * request.  Allocate the new copyup osd request for the
	 * original request, and release the old one.
	 */
2571
	img_result = -ENOMEM;
2572 2573 2574
	osd_req = rbd_osd_req_create_copyup(orig_request);
	if (!osd_req)
		goto out_err;
2575
	rbd_osd_req_destroy(orig_request->osd_req);
2576 2577
	orig_request->osd_req = osd_req;
	orig_request->copyup_pages = pages;
2578
	orig_request->copyup_page_count = page_count;
2579

2580
	/* Initialize the copyup op */
2581

2582
	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2583
	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2584
						false, false);
2585

2586 2587 2588 2589 2590 2591
	/* Then the hint op */

	osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
				   rbd_obj_bytes(&rbd_dev->header));

	/* And the original write request op */
2592

2593 2594
	offset = orig_request->offset;
	length = orig_request->length;
2595
	osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2596 2597
					offset, length, 0, 0);
	if (orig_request->type == OBJ_REQUEST_BIO)
2598
		osd_req_op_extent_osd_data_bio(osd_req, 2,
2599 2600
					orig_request->bio_list, length);
	else
2601
		osd_req_op_extent_osd_data_pages(osd_req, 2,
2602 2603
					orig_request->pages, length,
					offset & ~PAGE_MASK, false, false);
2604 2605 2606 2607 2608 2609 2610

	rbd_osd_req_format_write(orig_request);

	/* All set, send it off. */

	orig_request->callback = rbd_img_obj_copyup_callback;
	osdc = &rbd_dev->rbd_client->client->osdc;
2611 2612
	img_result = rbd_obj_request_submit(osdc, orig_request);
	if (!img_result)
2613 2614 2615 2616
		return;
out_err:
	/* Record the error code and complete the request */

2617
	orig_request->result = img_result;
2618 2619 2620
	orig_request->xferred = 0;
	obj_request_done_set(orig_request);
	rbd_obj_request_complete(orig_request);
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
}

/*
 * Read from the parent image the range of data that covers the
 * entire target of the given object request.  This is used for
 * satisfying a layered image write request when the target of an
 * object request from the image request does not exist.
 *
 * A page array big enough to hold the returned data is allocated
 * and supplied to rbd_img_request_fill() as the "data descriptor."
 * When the read completes, this page array will be transferred to
 * the original object request for the copyup operation.
 *
 * If an error occurs, record it as the result of the original
 * object request and mark it done so it gets completed.
 */
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = NULL;
	struct rbd_img_request *parent_request = NULL;
	struct rbd_device *rbd_dev;
	u64 img_offset;
	u64 length;
	struct page **pages = NULL;
	u32 page_count;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
2649
	rbd_assert(obj_request_type_valid(obj_request->type));
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662

	img_request = obj_request->img_request;
	rbd_assert(img_request != NULL);
	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev->parent != NULL);

	/*
	 * Determine the byte range covered by the object in the
	 * child image to which the original request was to be sent.
	 */
	img_offset = obj_request->img_offset - obj_request->offset;
	length = (u64)1 << rbd_dev->header.obj_order;

A
Alex Elder 已提交
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	/*
	 * There is no defined parent data beyond the parent
	 * overlap, so limit what we read at that boundary if
	 * necessary.
	 */
	if (img_offset + length > rbd_dev->parent_overlap) {
		rbd_assert(img_offset < rbd_dev->parent_overlap);
		length = rbd_dev->parent_overlap - img_offset;
	}

2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
	/*
	 * Allocate a page array big enough to receive the data read
	 * from the parent.
	 */
	page_count = (u32)calc_pages_for(0, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages)) {
		result = PTR_ERR(pages);
		pages = NULL;
		goto out_err;
	}

	result = -ENOMEM;
2686 2687
	parent_request = rbd_parent_request_create(obj_request,
						img_offset, length);
2688 2689 2690 2691 2692 2693 2694
	if (!parent_request)
		goto out_err;

	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
	if (result)
		goto out_err;
	parent_request->copyup_pages = pages;
2695
	parent_request->copyup_page_count = page_count;
2696 2697 2698 2699 2700 2701 2702

	parent_request->callback = rbd_img_obj_parent_read_full_callback;
	result = rbd_img_request_submit(parent_request);
	if (!result)
		return 0;

	parent_request->copyup_pages = NULL;
2703
	parent_request->copyup_page_count = 0;
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
	parent_request->obj_request = NULL;
	rbd_obj_request_put(obj_request);
out_err:
	if (pages)
		ceph_release_page_vector(pages, page_count);
	if (parent_request)
		rbd_img_request_put(parent_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);

	return result;
}

2718 2719 2720
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *orig_request;
2721
	struct rbd_device *rbd_dev;
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
	int result;

	rbd_assert(!obj_request_img_data_test(obj_request));

	/*
	 * All we need from the object request is the original
	 * request and the result of the STAT op.  Grab those, then
	 * we're done with the request.
	 */
	orig_request = obj_request->obj_request;
	obj_request->obj_request = NULL;
2733
	rbd_obj_request_put(orig_request);
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
	rbd_assert(orig_request);
	rbd_assert(orig_request->img_request);

	result = obj_request->result;
	obj_request->result = 0;

	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
		obj_request, orig_request, result,
		obj_request->xferred, obj_request->length);
	rbd_obj_request_put(obj_request);

2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	rbd_dev = orig_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		result = rbd_obj_request_submit(osdc, orig_request);
		if (!result)
			return;
	}
2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771

	/*
	 * Our only purpose here is to determine whether the object
	 * exists, and we don't want to treat the non-existence as
	 * an error.  If something else comes back, transfer the
	 * error to the original request and complete it now.
	 */
	if (!result) {
		obj_request_existence_set(orig_request, true);
	} else if (result == -ENOENT) {
		obj_request_existence_set(orig_request, false);
	} else if (result) {
		orig_request->result = result;
2772
		goto out;
2773 2774 2775 2776 2777 2778
	}

	/*
	 * Resubmit the original request now that we have recorded
	 * whether the target object exists.
	 */
2779
	orig_request->result = rbd_img_obj_request_submit(orig_request);
2780
out:
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
	if (orig_request->result)
		rbd_obj_request_complete(orig_request);
}

static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *stat_request;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct page **pages = NULL;
	u32 page_count;
	size_t size;
	int ret;

	/*
	 * The response data for a STAT call consists of:
	 *     le64 length;
	 *     struct {
	 *         le32 tv_sec;
	 *         le32 tv_nsec;
	 *     } mtime;
	 */
	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
	page_count = (u32)calc_pages_for(0, size);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
							OBJ_REQUEST_PAGES);
	if (!stat_request)
		goto out;

	rbd_obj_request_get(obj_request);
	stat_request->obj_request = obj_request;
	stat_request->pages = pages;
	stat_request->page_count = page_count;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;
G
Guangliang Zhao 已提交
2822
	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2823
						   stat_request);
2824 2825 2826 2827 2828 2829 2830
	if (!stat_request->osd_req)
		goto out;
	stat_request->callback = rbd_img_obj_exists_callback;

	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
					false, false);
2831
	rbd_osd_req_format_read(stat_request);
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841

	osdc = &rbd_dev->rbd_client->client->osdc;
	ret = rbd_obj_request_submit(osdc, stat_request);
out:
	if (ret)
		rbd_obj_request_put(obj_request);

	return ret;
}

2842
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2843 2844
{
	struct rbd_img_request *img_request;
A
Alex Elder 已提交
2845
	struct rbd_device *rbd_dev;
2846 2847 2848 2849 2850

	rbd_assert(obj_request_img_data_test(obj_request));

	img_request = obj_request->img_request;
	rbd_assert(img_request);
A
Alex Elder 已提交
2851
	rbd_dev = img_request->rbd_dev;
2852

2853
	/* Reads */
2854 2855
	if (!img_request_write_test(img_request) &&
	    !img_request_discard_test(img_request))
2856 2857 2858 2859 2860 2861
		return true;

	/* Non-layered writes */
	if (!img_request_layered_test(img_request))
		return true;

2862
	/*
2863 2864
	 * Layered writes outside of the parent overlap range don't
	 * share any data with the parent.
2865
	 */
2866 2867
	if (!obj_request_overlaps_parent(obj_request))
		return true;
2868

2869 2870 2871 2872 2873 2874 2875 2876
	/*
	 * Entire-object layered writes - we will overwrite whatever
	 * parent data there is anyway.
	 */
	if (!obj_request->offset &&
	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
		return true;

2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
	/*
	 * If the object is known to already exist, its parent data has
	 * already been copied.
	 */
	if (obj_request_known_test(obj_request) &&
	    obj_request_exists_test(obj_request))
		return true;

	return false;
}

static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
	if (img_obj_request_simple(obj_request)) {
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900
		struct rbd_device *rbd_dev;
		struct ceph_osd_client *osdc;

		rbd_dev = obj_request->img_request->rbd_dev;
		osdc = &rbd_dev->rbd_client->client->osdc;

		return rbd_obj_request_submit(osdc, obj_request);
	}

	/*
2901 2902 2903 2904
	 * It's a layered write.  The target object might exist but
	 * we may not know that yet.  If we know it doesn't exist,
	 * start by reading the data for the full target object from
	 * the parent so we can use it for a copyup to the target.
2905
	 */
2906
	if (obj_request_known_test(obj_request))
2907 2908 2909
		return rbd_img_obj_parent_read_full(obj_request);

	/* We don't know whether the target exists.  Go find out. */
2910 2911 2912 2913

	return rbd_img_obj_exists_submit(obj_request);
}

A
Alex Elder 已提交
2914 2915 2916
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
2917
	struct rbd_obj_request *next_obj_request;
A
Alex Elder 已提交
2918

A
Alex Elder 已提交
2919
	dout("%s: img %p\n", __func__, img_request);
2920
	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
A
Alex Elder 已提交
2921 2922
		int ret;

2923
		ret = rbd_img_obj_request_submit(obj_request);
A
Alex Elder 已提交
2924 2925 2926 2927 2928 2929
		if (ret)
			return ret;
	}

	return 0;
}
A
Alex Elder 已提交
2930 2931 2932 2933

static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
A
Alex Elder 已提交
2934 2935
	struct rbd_device *rbd_dev;
	u64 obj_end;
2936 2937
	u64 img_xferred;
	int img_result;
A
Alex Elder 已提交
2938 2939 2940

	rbd_assert(img_request_child_test(img_request));

2941 2942
	/* First get what we need from the image request and release it */

A
Alex Elder 已提交
2943
	obj_request = img_request->obj_request;
2944 2945 2946 2947 2948 2949 2950 2951 2952
	img_xferred = img_request->xferred;
	img_result = img_request->result;
	rbd_img_request_put(img_request);

	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to re-submit the
	 * original request.
	 */
A
Alex Elder 已提交
2953 2954
	rbd_assert(obj_request);
	rbd_assert(obj_request->img_request);
2955 2956 2957 2958 2959 2960 2961 2962 2963
	rbd_dev = obj_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, obj_request);
		if (!img_result)
			return;
	}
A
Alex Elder 已提交
2964

2965
	obj_request->result = img_result;
A
Alex Elder 已提交
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
	if (obj_request->result)
		goto out;

	/*
	 * We need to zero anything beyond the parent overlap
	 * boundary.  Since rbd_img_obj_request_read_callback()
	 * will zero anything beyond the end of a short read, an
	 * easy way to do this is to pretend the data from the
	 * parent came up short--ending at the overlap boundary.
	 */
	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
	obj_end = obj_request->img_offset + obj_request->length;
	if (obj_end > rbd_dev->parent_overlap) {
		u64 xferred = 0;

		if (obj_request->img_offset < rbd_dev->parent_overlap)
			xferred = rbd_dev->parent_overlap -
					obj_request->img_offset;
A
Alex Elder 已提交
2984

2985
		obj_request->xferred = min(img_xferred, xferred);
A
Alex Elder 已提交
2986
	} else {
2987
		obj_request->xferred = img_xferred;
A
Alex Elder 已提交
2988 2989
	}
out:
A
Alex Elder 已提交
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
	rbd_img_obj_request_read_callback(obj_request);
	rbd_obj_request_complete(obj_request);
}

static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
	rbd_assert(obj_request->img_request != NULL);
	rbd_assert(obj_request->result == (s32) -ENOENT);
3002
	rbd_assert(obj_request_type_valid(obj_request->type));
A
Alex Elder 已提交
3003 3004

	/* rbd_read_finish(obj_request, obj_request->length); */
3005
	img_request = rbd_parent_request_create(obj_request,
A
Alex Elder 已提交
3006
						obj_request->img_offset,
3007
						obj_request->length);
A
Alex Elder 已提交
3008 3009 3010 3011
	result = -ENOMEM;
	if (!img_request)
		goto out_err;

3012 3013 3014 3015 3016 3017
	if (obj_request->type == OBJ_REQUEST_BIO)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
						obj_request->bio_list);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
						obj_request->pages);
A
Alex Elder 已提交
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
	if (result)
		goto out_err;

	img_request->callback = rbd_img_parent_read_callback;
	result = rbd_img_request_submit(img_request);
	if (result)
		goto out_err;

	return;
out_err:
	if (img_request)
		rbd_img_request_put(img_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);
}
A
Alex Elder 已提交
3034

3035
static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
A
Alex Elder 已提交
3036 3037
{
	struct rbd_obj_request *obj_request;
3038
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
A
Alex Elder 已提交
3039 3040 3041 3042 3043 3044 3045 3046
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
							OBJ_REQUEST_NODATA);
	if (!obj_request)
		return -ENOMEM;

	ret = -ENOMEM;
G
Guangliang Zhao 已提交
3047
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3048
						  obj_request);
A
Alex Elder 已提交
3049 3050 3051
	if (!obj_request->osd_req)
		goto out;

3052
	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
A
Alex Elder 已提交
3053
					notify_id, 0, 0);
3054
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3055

A
Alex Elder 已提交
3056
	ret = rbd_obj_request_submit(osdc, obj_request);
A
Alex Elder 已提交
3057
	if (ret)
3058 3059 3060 3061
		goto out;
	ret = rbd_obj_request_wait(obj_request);
out:
	rbd_obj_request_put(obj_request);
A
Alex Elder 已提交
3062 3063 3064 3065 3066 3067 3068

	return ret;
}

static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
	struct rbd_device *rbd_dev = (struct rbd_device *)data;
3069
	int ret;
A
Alex Elder 已提交
3070 3071 3072 3073

	if (!rbd_dev)
		return;

A
Alex Elder 已提交
3074
	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
A
Alex Elder 已提交
3075 3076
		rbd_dev->header_name, (unsigned long long)notify_id,
		(unsigned int)opcode);
3077 3078 3079 3080 3081 3082 3083

	/*
	 * Until adequate refresh error handling is in place, there is
	 * not much we can do here, except warn.
	 *
	 * See http://tracker.ceph.com/issues/5040
	 */
3084 3085
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
3086
		rbd_warn(rbd_dev, "refresh failed: %d", ret);
A
Alex Elder 已提交
3087

3088 3089
	ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
	if (ret)
3090
		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
A
Alex Elder 已提交
3091 3092
}

3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
/*
 * Send a (un)watch request and wait for the ack.  Return a request
 * with a ref held on success or error.
 */
static struct rbd_obj_request *rbd_obj_watch_request_helper(
						struct rbd_device *rbd_dev,
						bool watch)
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_obj_request *obj_request;
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
					     OBJ_REQUEST_NODATA);
	if (!obj_request)
		return ERR_PTR(-ENOMEM);

G
Guangliang Zhao 已提交
3110
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
						  obj_request);
	if (!obj_request->osd_req) {
		ret = -ENOMEM;
		goto out;
	}

	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
			      rbd_dev->watch_event->cookie, 0, watch);
	rbd_osd_req_format_write(obj_request);

	if (watch)
		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);

	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;

	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret) {
		if (watch)
			rbd_obj_request_end(obj_request);
		goto out;
	}

	return obj_request;

out:
	rbd_obj_request_put(obj_request);
	return ERR_PTR(ret);
}

3146
/*
3147
 * Initiate a watch request, synchronously.
3148
 */
3149
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3150 3151 3152 3153 3154
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_obj_request *obj_request;
	int ret;

3155 3156
	rbd_assert(!rbd_dev->watch_event);
	rbd_assert(!rbd_dev->watch_request);
3157

3158 3159 3160 3161 3162
	ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
				     &rbd_dev->watch_event);
	if (ret < 0)
		return ret;

3163 3164 3165 3166 3167
	obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
	if (IS_ERR(obj_request)) {
		ceph_osdc_cancel_event(rbd_dev->watch_event);
		rbd_dev->watch_event = NULL;
		return PTR_ERR(obj_request);
3168
	}
3169

3170 3171 3172 3173
	/*
	 * A watch request is set to linger, so the underlying osd
	 * request won't go away until we unregister it.  We retain
	 * a pointer to the object request during that time (in
3174 3175 3176
	 * rbd_dev->watch_request), so we'll keep a reference to it.
	 * We'll drop that reference after we've unregistered it in
	 * rbd_dev_header_unwatch_sync().
3177
	 */
3178
	rbd_dev->watch_request = obj_request;
3179

3180 3181 3182 3183 3184 3185
	return 0;
}

/*
 * Tear down a watch request, synchronously.
 */
3186
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3187 3188 3189 3190 3191 3192
{
	struct rbd_obj_request *obj_request;

	rbd_assert(rbd_dev->watch_event);
	rbd_assert(rbd_dev->watch_request);

3193
	rbd_obj_request_end(rbd_dev->watch_request);
3194 3195
	rbd_obj_request_put(rbd_dev->watch_request);
	rbd_dev->watch_request = NULL;
3196

3197 3198 3199 3200 3201 3202 3203
	obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
	if (!IS_ERR(obj_request))
		rbd_obj_request_put(obj_request);
	else
		rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
			 PTR_ERR(obj_request));

3204 3205
	ceph_osdc_cancel_event(rbd_dev->watch_event);
	rbd_dev->watch_event = NULL;
3206 3207
}

3208
/*
3209 3210
 * Synchronous osd object method call.  Returns the number of bytes
 * returned in the outbound buffer, or a negative error code.
3211 3212 3213 3214 3215
 */
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
			     const char *object_name,
			     const char *class_name,
			     const char *method_name,
3216
			     const void *outbound,
3217
			     size_t outbound_size,
3218
			     void *inbound,
3219
			     size_t inbound_size)
3220
{
3221
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3222 3223 3224 3225 3226 3227
	struct rbd_obj_request *obj_request;
	struct page **pages;
	u32 page_count;
	int ret;

	/*
3228 3229 3230 3231 3232
	 * Method calls are ultimately read operations.  The result
	 * should placed into the inbound buffer provided.  They
	 * also supply outbound data--parameters for the object
	 * method.  Currently if this is present it will be a
	 * snapshot id.
3233
	 */
3234
	page_count = (u32)calc_pages_for(0, inbound_size);
3235 3236 3237 3238 3239
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
3240
	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3241 3242 3243 3244 3245 3246 3247
							OBJ_REQUEST_PAGES);
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3248
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3249
						  obj_request);
3250 3251 3252
	if (!obj_request->osd_req)
		goto out;

3253
	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
					class_name, method_name);
	if (outbound_size) {
		struct ceph_pagelist *pagelist;

		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
		if (!pagelist)
			goto out;

		ceph_pagelist_init(pagelist);
		ceph_pagelist_append(pagelist, outbound, outbound_size);
		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
						pagelist);
	}
3267 3268
	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
					obj_request->pages, inbound_size,
3269
					0, false, false);
3270
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3271

3272 3273 3274 3275 3276 3277 3278 3279 3280 3281
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3282 3283 3284

	rbd_assert(obj_request->xferred < (u64)INT_MAX);
	ret = (int)obj_request->xferred;
3285
	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3286 3287 3288 3289 3290 3291 3292 3293 3294
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

I
Ilya Dryomov 已提交
3295
static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
A
Alex Elder 已提交
3296
{
I
Ilya Dryomov 已提交
3297
	struct rbd_img_request *img_request;
3298
	struct ceph_snap_context *snapc = NULL;
I
Ilya Dryomov 已提交
3299 3300
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
G
Guangliang Zhao 已提交
3301
	enum obj_operation_type op_type;
3302
	u64 mapping_size;
A
Alex Elder 已提交
3303 3304
	int result;

3305 3306 3307
	if (rq->cmd_flags & REQ_DISCARD)
		op_type = OBJ_OP_DISCARD;
	else if (rq->cmd_flags & REQ_WRITE)
G
Guangliang Zhao 已提交
3308 3309 3310 3311
		op_type = OBJ_OP_WRITE;
	else
		op_type = OBJ_OP_READ;

I
Ilya Dryomov 已提交
3312
	/* Ignore/skip any zero-length requests */
A
Alex Elder 已提交
3313

I
Ilya Dryomov 已提交
3314 3315 3316 3317 3318
	if (!length) {
		dout("%s: zero-length request\n", __func__);
		result = 0;
		goto err_rq;
	}
A
Alex Elder 已提交
3319

G
Guangliang Zhao 已提交
3320
	/* Only reads are allowed to a read-only device */
I
Ilya Dryomov 已提交
3321

G
Guangliang Zhao 已提交
3322
	if (op_type != OBJ_OP_READ) {
I
Ilya Dryomov 已提交
3323 3324 3325
		if (rbd_dev->mapping.read_only) {
			result = -EROFS;
			goto err_rq;
A
Alex Elder 已提交
3326
		}
I
Ilya Dryomov 已提交
3327 3328
		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
	}
A
Alex Elder 已提交
3329

I
Ilya Dryomov 已提交
3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
	/*
	 * Quit early if the mapped snapshot no longer exists.  It's
	 * still possible the snapshot will have disappeared by the
	 * time our request arrives at the osd, but there's no sense in
	 * sending it if we already know.
	 */
	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
		dout("request for non-existent snapshot");
		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
		result = -ENXIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3342

I
Ilya Dryomov 已提交
3343 3344 3345 3346 3347 3348
	if (offset && length > U64_MAX - offset + 1) {
		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
			 length);
		result = -EINVAL;
		goto err_rq;	/* Shouldn't happen */
	}
A
Alex Elder 已提交
3349

3350 3351
	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
G
Guangliang Zhao 已提交
3352
	if (op_type != OBJ_OP_READ) {
3353 3354 3355 3356 3357 3358
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
I
Ilya Dryomov 已提交
3359
		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3360
			 length, mapping_size);
I
Ilya Dryomov 已提交
3361 3362 3363
		result = -EIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3364

G
Guangliang Zhao 已提交
3365
	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3366
					     snapc);
I
Ilya Dryomov 已提交
3367 3368 3369 3370 3371
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
A
Alex Elder 已提交
3372

3373 3374 3375 3376 3377 3378
	if (op_type == OBJ_OP_DISCARD)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
					      NULL);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
					      rq->bio);
I
Ilya Dryomov 已提交
3379 3380
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3381

I
Ilya Dryomov 已提交
3382 3383 3384
	result = rbd_img_request_submit(img_request);
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3385

I
Ilya Dryomov 已提交
3386
	return;
A
Alex Elder 已提交
3387

I
Ilya Dryomov 已提交
3388 3389 3390 3391 3392
err_img_request:
	rbd_img_request_put(img_request);
err_rq:
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
G
Guangliang Zhao 已提交
3393
			 obj_op_name(op_type), length, offset, result);
3394 3395
	if (snapc)
		ceph_put_snap_context(snapc);
I
Ilya Dryomov 已提交
3396 3397
	blk_end_request_all(rq, result);
}
A
Alex Elder 已提交
3398

I
Ilya Dryomov 已提交
3399 3400 3401 3402 3403 3404
static void rbd_request_workfn(struct work_struct *work)
{
	struct rbd_device *rbd_dev =
	    container_of(work, struct rbd_device, rq_work);
	struct request *rq, *next;
	LIST_HEAD(requests);
3405

I
Ilya Dryomov 已提交
3406 3407 3408
	spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
	list_splice_init(&rbd_dev->rq_queue, &requests);
	spin_unlock_irq(&rbd_dev->lock);
A
Alex Elder 已提交
3409

I
Ilya Dryomov 已提交
3410 3411 3412 3413 3414
	list_for_each_entry_safe(rq, next, &requests, queuelist) {
		list_del_init(&rq->queuelist);
		rbd_handle_request(rbd_dev, rq);
	}
}
A
Alex Elder 已提交
3415

I
Ilya Dryomov 已提交
3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
/*
 * Called with q->queue_lock held and interrupts disabled, possibly on
 * the way to schedule().  Do not sleep here!
 */
static void rbd_request_fn(struct request_queue *q)
{
	struct rbd_device *rbd_dev = q->queuedata;
	struct request *rq;
	int queued = 0;

	rbd_assert(rbd_dev);

	while ((rq = blk_fetch_request(q))) {
		/* Ignore any non-FS requests that filter through. */
		if (rq->cmd_type != REQ_TYPE_FS) {
			dout("%s: non-fs request type %d\n", __func__,
				(int) rq->cmd_type);
			__blk_end_request_all(rq, 0);
			continue;
A
Alex Elder 已提交
3435
		}
I
Ilya Dryomov 已提交
3436 3437 3438

		list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
		queued++;
A
Alex Elder 已提交
3439
	}
I
Ilya Dryomov 已提交
3440 3441 3442

	if (queued)
		queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
A
Alex Elder 已提交
3443 3444
}

3445 3446 3447
/*
 * a queue callback. Makes sure that we don't create a bio that spans across
 * multiple osd objects. One exception would be with a single page bios,
A
Alex Elder 已提交
3448
 * which we handle later at bio_chain_clone_range()
3449 3450 3451 3452 3453
 */
static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
			  struct bio_vec *bvec)
{
	struct rbd_device *rbd_dev = q->queuedata;
A
Alex Elder 已提交
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
	sector_t sector_offset;
	sector_t sectors_per_obj;
	sector_t obj_sector_offset;
	int ret;

	/*
	 * Find how far into its rbd object the partition-relative
	 * bio start sector is to offset relative to the enclosing
	 * device.
	 */
	sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
	sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
	obj_sector_offset = sector_offset & (sectors_per_obj - 1);

	/*
	 * Compute the number of bytes from that offset to the end
	 * of the object.  Account for what's already used by the bio.
	 */
	ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
	if (ret > bmd->bi_size)
		ret -= bmd->bi_size;
	else
		ret = 0;

	/*
	 * Don't send back more than was asked for.  And if the bio
	 * was empty, let the whole thing through because:  "Note
	 * that a block device *must* allow a single page to be
	 * added to an empty bio."
	 */
	rbd_assert(bvec->bv_len <= PAGE_SIZE);
	if (ret > (int) bvec->bv_len || !bmd->bi_size)
		ret = (int) bvec->bv_len;

	return ret;
3489 3490 3491 3492 3493 3494 3495 3496 3497
}

static void rbd_free_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk = rbd_dev->disk;

	if (!disk)
		return;

3498 3499
	rbd_dev->disk = NULL;
	if (disk->flags & GENHD_FL_UP) {
3500
		del_gendisk(disk);
3501 3502 3503
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
	}
3504 3505 3506
	put_disk(disk);
}

3507 3508
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
				const char *object_name,
3509
				u64 offset, u64 length, void *buf)
3510 3511

{
3512
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3513 3514 3515
	struct rbd_obj_request *obj_request;
	struct page **pages = NULL;
	u32 page_count;
3516
	size_t size;
3517 3518 3519 3520 3521 3522 3523 3524 3525
	int ret;

	page_count = (u32) calc_pages_for(offset, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		ret = PTR_ERR(pages);

	ret = -ENOMEM;
	obj_request = rbd_obj_request_create(object_name, offset, length,
3526
							OBJ_REQUEST_PAGES);
3527 3528 3529 3530 3531 3532
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3533
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3534
						  obj_request);
3535 3536 3537
	if (!obj_request->osd_req)
		goto out;

3538 3539
	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
					offset, length, 0, 0);
3540
	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3541
					obj_request->pages,
3542 3543 3544
					obj_request->length,
					obj_request->offset & ~PAGE_MASK,
					false, false);
3545
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3546

3547 3548 3549 3550 3551 3552 3553 3554 3555 3556
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3557 3558 3559

	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
	size = (size_t) obj_request->xferred;
3560
	ceph_copy_from_page_vector(pages, buf, 0, size);
3561 3562
	rbd_assert(size <= (size_t)INT_MAX);
	ret = (int)size;
3563 3564 3565 3566 3567 3568 3569 3570 3571
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

3572
/*
A
Alex Elder 已提交
3573 3574 3575
 * Read the complete header for the given rbd device.  On successful
 * return, the rbd_dev->header field will contain up-to-date
 * information about the image.
3576
 */
3577
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3578
{
3579
	struct rbd_image_header_ondisk *ondisk = NULL;
3580
	u32 snap_count = 0;
3581 3582 3583
	u64 names_size = 0;
	u32 want_count;
	int ret;
3584

A
Alex Elder 已提交
3585
	/*
3586 3587 3588 3589 3590
	 * The complete header will include an array of its 64-bit
	 * snapshot ids, followed by the names of those snapshots as
	 * a contiguous block of NUL-terminated strings.  Note that
	 * the number of snapshots could change by the time we read
	 * it in, in which case we re-read it.
A
Alex Elder 已提交
3591
	 */
3592 3593 3594 3595 3596 3597 3598 3599 3600 3601
	do {
		size_t size;

		kfree(ondisk);

		size = sizeof (*ondisk);
		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
		size += names_size;
		ondisk = kmalloc(size, GFP_KERNEL);
		if (!ondisk)
A
Alex Elder 已提交
3602
			return -ENOMEM;
3603

3604
		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3605
				       0, size, ondisk);
3606
		if (ret < 0)
A
Alex Elder 已提交
3607
			goto out;
A
Alex Elder 已提交
3608
		if ((size_t)ret < size) {
3609
			ret = -ENXIO;
A
Alex Elder 已提交
3610 3611
			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
				size, ret);
A
Alex Elder 已提交
3612
			goto out;
3613 3614 3615
		}
		if (!rbd_dev_ondisk_valid(ondisk)) {
			ret = -ENXIO;
A
Alex Elder 已提交
3616
			rbd_warn(rbd_dev, "invalid header");
A
Alex Elder 已提交
3617
			goto out;
3618
		}
3619

3620 3621 3622 3623
		names_size = le64_to_cpu(ondisk->snap_names_len);
		want_count = snap_count;
		snap_count = le32_to_cpu(ondisk->snap_count);
	} while (snap_count != want_count);
A
Alex Elder 已提交
3624

A
Alex Elder 已提交
3625 3626
	ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
3627 3628 3629
	kfree(ondisk);

	return ret;
3630 3631
}

3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
/*
 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
 * has disappeared from the (just updated) snapshot context.
 */
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
	u64 snap_id;

	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
		return;

	snap_id = rbd_dev->spec->snap_id;
	if (snap_id == CEPH_NOSNAP)
		return;

	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}

3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
	sector_t size;
	bool removing;

	/*
	 * Don't hold the lock while doing disk operations,
	 * or lock ordering will conflict with the bdev mutex via:
	 * rbd_add() -> blkdev_get() -> rbd_open()
	 */
	spin_lock_irq(&rbd_dev->lock);
	removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
	spin_unlock_irq(&rbd_dev->lock);
	/*
	 * If the device is being removed, rbd_dev->disk has
	 * been destroyed, so don't try to update its size
	 */
	if (!removing) {
		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
		dout("setting size to %llu sectors", (unsigned long long)size);
		set_capacity(rbd_dev->disk, size);
		revalidate_disk(rbd_dev->disk);
	}
}

A
Alex Elder 已提交
3676
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
3677
{
3678
	u64 mapping_size;
A
Alex Elder 已提交
3679 3680
	int ret;

3681
	down_write(&rbd_dev->header_rwsem);
3682
	mapping_size = rbd_dev->mapping.size;
3683 3684

	ret = rbd_dev_header_info(rbd_dev);
3685 3686
	if (ret)
		return ret;
3687

3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
	/*
	 * If there is a parent, see if it has disappeared due to the
	 * mapped image getting flattened.
	 */
	if (rbd_dev->parent) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			return ret;
	}

3698 3699 3700 3701 3702 3703 3704
	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
		if (rbd_dev->mapping.size != rbd_dev->header.image_size)
			rbd_dev->mapping.size = rbd_dev->header.image_size;
	} else {
		/* validate mapped snapshot's EXISTS flag */
		rbd_exists_validate(rbd_dev);
	}
3705

3706 3707
	up_write(&rbd_dev->header_rwsem);

3708
	if (mapping_size != rbd_dev->mapping.size)
3709
		rbd_dev_update_size(rbd_dev);
A
Alex Elder 已提交
3710

3711
	return 0;
A
Alex Elder 已提交
3712 3713
}

3714 3715 3716 3717
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk;
	struct request_queue *q;
A
Alex Elder 已提交
3718
	u64 segment_size;
3719 3720

	/* create gendisk info */
3721 3722 3723
	disk = alloc_disk(single_major ?
			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
			  RBD_MINORS_PER_MAJOR);
3724
	if (!disk)
A
Alex Elder 已提交
3725
		return -ENOMEM;
3726

A
Alex Elder 已提交
3727
	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
A
Alex Elder 已提交
3728
		 rbd_dev->dev_id);
3729
	disk->major = rbd_dev->major;
3730
	disk->first_minor = rbd_dev->minor;
3731 3732
	if (single_major)
		disk->flags |= GENHD_FL_EXT_DEVT;
3733 3734 3735
	disk->fops = &rbd_bd_ops;
	disk->private_data = rbd_dev;

A
Alex Elder 已提交
3736
	q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3737 3738
	if (!q)
		goto out_disk;
3739

A
Alex Elder 已提交
3740 3741 3742
	/* We use the default size, but let's be explicit about it. */
	blk_queue_physical_block_size(q, SECTOR_SIZE);

3743
	/* set io sizes to object size */
A
Alex Elder 已提交
3744 3745 3746 3747 3748
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);
3749

3750 3751 3752 3753 3754
	/* enable the discard support */
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.discard_granularity = segment_size;
	q->limits.discard_alignment = segment_size;

3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
	blk_queue_merge_bvec(q, rbd_merge_bvec);
	disk->queue = q;

	q->queuedata = rbd_dev;

	rbd_dev->disk = disk;

	return 0;
out_disk:
	put_disk(disk);
A
Alex Elder 已提交
3765 3766

	return -ENOMEM;
3767 3768
}

3769 3770 3771 3772
/*
  sysfs
*/

A
Alex Elder 已提交
3773 3774 3775 3776 3777
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
	return container_of(dev, struct rbd_device, dev);
}

3778 3779 3780
static ssize_t rbd_size_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3781
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3782

A
Alex Elder 已提交
3783 3784
	return sprintf(buf, "%llu\n",
		(unsigned long long)rbd_dev->mapping.size);
3785 3786
}

A
Alex Elder 已提交
3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
/*
 * Note this shows the features for whatever's mapped, which is not
 * necessarily the base image.
 */
static ssize_t rbd_features_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

	return sprintf(buf, "0x%016llx\n",
A
Alex Elder 已提交
3797
			(unsigned long long)rbd_dev->mapping.features);
A
Alex Elder 已提交
3798 3799
}

3800 3801 3802
static ssize_t rbd_major_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3803
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3804

A
Alex Elder 已提交
3805 3806 3807 3808
	if (rbd_dev->major)
		return sprintf(buf, "%d\n", rbd_dev->major);

	return sprintf(buf, "(none)\n");
3809 3810 3811 3812 3813 3814
}

static ssize_t rbd_minor_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
A
Alex Elder 已提交
3815

3816
	return sprintf(buf, "%d\n", rbd_dev->minor);
3817 3818 3819 3820
}

static ssize_t rbd_client_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
3821
{
A
Alex Elder 已提交
3822
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3823

3824 3825
	return sprintf(buf, "client%lld\n",
			ceph_client_id(rbd_dev->rbd_client->client));
3826 3827
}

3828 3829
static ssize_t rbd_pool_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
3830
{
A
Alex Elder 已提交
3831
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3832

3833
	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3834 3835
}

3836 3837 3838 3839 3840
static ssize_t rbd_pool_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3841
	return sprintf(buf, "%llu\n",
A
Alex Elder 已提交
3842
			(unsigned long long) rbd_dev->spec->pool_id);
3843 3844
}

3845 3846 3847
static ssize_t rbd_name_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3848
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3849

A
Alex Elder 已提交
3850 3851 3852 3853
	if (rbd_dev->spec->image_name)
		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);

	return sprintf(buf, "(unknown)\n");
3854 3855
}

A
Alex Elder 已提交
3856 3857 3858 3859 3860
static ssize_t rbd_image_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3861
	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
A
Alex Elder 已提交
3862 3863
}

A
Alex Elder 已提交
3864 3865 3866 3867
/*
 * Shows the name of the currently-mapped snapshot (or
 * RBD_SNAP_HEAD_NAME for the base image).
 */
3868 3869 3870 3871
static ssize_t rbd_snap_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
A
Alex Elder 已提交
3872
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3873

3874
	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3875 3876
}

3877
/*
3878 3879 3880
 * For a v2 image, shows the chain of parent images, separated by empty
 * lines.  For v1 images or if there is no parent, shows "(no parent
 * image)".
3881 3882
 */
static ssize_t rbd_parent_show(struct device *dev,
3883 3884
			       struct device_attribute *attr,
			       char *buf)
3885 3886
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3887
	ssize_t count = 0;
3888

3889
	if (!rbd_dev->parent)
3890 3891
		return sprintf(buf, "(no parent image)\n");

3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907
	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
		struct rbd_spec *spec = rbd_dev->parent_spec;

		count += sprintf(&buf[count], "%s"
			    "pool_id %llu\npool_name %s\n"
			    "image_id %s\nimage_name %s\n"
			    "snap_id %llu\nsnap_name %s\n"
			    "overlap %llu\n",
			    !count ? "" : "\n", /* first? */
			    spec->pool_id, spec->pool_name,
			    spec->image_id, spec->image_name ?: "(unknown)",
			    spec->snap_id, spec->snap_name,
			    rbd_dev->parent_overlap);
	}

	return count;
3908 3909
}

3910 3911 3912 3913 3914
static ssize_t rbd_image_refresh(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t size)
{
A
Alex Elder 已提交
3915
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3916
	int ret;
3917

A
Alex Elder 已提交
3918
	ret = rbd_dev_refresh(rbd_dev);
3919
	if (ret)
3920
		return ret;
3921

3922
	return size;
3923
}
3924

3925
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
A
Alex Elder 已提交
3926
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3927
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3928
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3929 3930
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3931
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3932
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
A
Alex Elder 已提交
3933
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3934 3935
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3936
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3937 3938 3939

static struct attribute *rbd_attrs[] = {
	&dev_attr_size.attr,
A
Alex Elder 已提交
3940
	&dev_attr_features.attr,
3941
	&dev_attr_major.attr,
3942
	&dev_attr_minor.attr,
3943 3944
	&dev_attr_client_id.attr,
	&dev_attr_pool.attr,
3945
	&dev_attr_pool_id.attr,
3946
	&dev_attr_name.attr,
A
Alex Elder 已提交
3947
	&dev_attr_image_id.attr,
3948
	&dev_attr_current_snap.attr,
3949
	&dev_attr_parent.attr,
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972
	&dev_attr_refresh.attr,
	NULL
};

static struct attribute_group rbd_attr_group = {
	.attrs = rbd_attrs,
};

static const struct attribute_group *rbd_attr_groups[] = {
	&rbd_attr_group,
	NULL
};

static void rbd_sysfs_dev_release(struct device *dev)
{
}

static struct device_type rbd_device_type = {
	.name		= "rbd",
	.groups		= rbd_attr_groups,
	.release	= rbd_sysfs_dev_release,
};

3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
	kref_get(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
	if (spec)
		kref_put(&spec->kref, rbd_spec_free);
}

static struct rbd_spec *rbd_spec_alloc(void)
{
	struct rbd_spec *spec;

	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
	if (!spec)
		return NULL;
3994 3995 3996

	spec->pool_id = CEPH_NOPOOL;
	spec->snap_id = CEPH_NOSNAP;
3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012
	kref_init(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref)
{
	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);

	kfree(spec->pool_name);
	kfree(spec->image_id);
	kfree(spec->image_name);
	kfree(spec->snap_name);
	kfree(spec);
}

A
Alex Elder 已提交
4013
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4014 4015 4016 4017 4018 4019 4020 4021 4022
				struct rbd_spec *spec)
{
	struct rbd_device *rbd_dev;

	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
	if (!rbd_dev)
		return NULL;

	spin_lock_init(&rbd_dev->lock);
I
Ilya Dryomov 已提交
4023 4024
	INIT_LIST_HEAD(&rbd_dev->rq_queue);
	INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
4025
	rbd_dev->flags = 0;
4026
	atomic_set(&rbd_dev->parent_ref, 0);
4027 4028 4029 4030 4031 4032
	INIT_LIST_HEAD(&rbd_dev->node);
	init_rwsem(&rbd_dev->header_rwsem);

	rbd_dev->spec = spec;
	rbd_dev->rbd_client = rbdc;

4033 4034 4035 4036 4037 4038 4039
	/* Initialize the layout used for all rbd requests */

	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);

4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
	return rbd_dev;
}

static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
	rbd_put_client(rbd_dev->rbd_client);
	rbd_spec_put(rbd_dev->spec);
	kfree(rbd_dev);
}

4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064
/*
 * Get the size and object order for an image snapshot, or if
 * snap_id is CEPH_NOSNAP, gets this information for the base
 * image.
 */
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size)
{
	__le64 snapid = cpu_to_le64(snap_id);
	int ret;
	struct {
		u8 order;
		__le64 size;
	} __attribute__ ((packed)) size_buf = { 0 };

4065
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4066
				"rbd", "get_size",
4067
				&snapid, sizeof (snapid),
4068
				&size_buf, sizeof (size_buf));
4069
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4070 4071
	if (ret < 0)
		return ret;
4072 4073
	if (ret < sizeof (size_buf))
		return -ERANGE;
4074

J
Josh Durgin 已提交
4075
	if (order) {
4076
		*order = size_buf.order;
J
Josh Durgin 已提交
4077 4078
		dout("  order %u", (unsigned int)*order);
	}
4079 4080
	*snap_size = le64_to_cpu(size_buf.size);

J
Josh Durgin 已提交
4081 4082
	dout("  snap_id 0x%016llx snap_size = %llu\n",
		(unsigned long long)snap_id,
4083
		(unsigned long long)*snap_size);
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094

	return 0;
}

static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
					&rbd_dev->header.obj_order,
					&rbd_dev->header.image_size);
}

4095 4096 4097 4098 4099 4100 4101 4102 4103 4104
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
	void *reply_buf;
	int ret;
	void *p;

	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4105
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4106
				"rbd", "get_object_prefix", NULL, 0,
4107
				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4108
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4109 4110 4111 4112 4113
	if (ret < 0)
		goto out;

	p = reply_buf;
	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4114 4115
						p + ret, NULL, GFP_NOIO);
	ret = 0;
4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128

	if (IS_ERR(rbd_dev->header.object_prefix)) {
		ret = PTR_ERR(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	} else {
		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
	}
out:
	kfree(reply_buf);

	return ret;
}

4129 4130 4131 4132 4133 4134 4135
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features)
{
	__le64 snapid = cpu_to_le64(snap_id);
	struct {
		__le64 features;
		__le64 incompat;
4136
	} __attribute__ ((packed)) features_buf = { 0 };
A
Alex Elder 已提交
4137
	u64 incompat;
4138 4139
	int ret;

4140
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4141
				"rbd", "get_features",
4142
				&snapid, sizeof (snapid),
4143
				&features_buf, sizeof (features_buf));
4144
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4145 4146
	if (ret < 0)
		return ret;
4147 4148
	if (ret < sizeof (features_buf))
		return -ERANGE;
A
Alex Elder 已提交
4149 4150

	incompat = le64_to_cpu(features_buf.incompat);
A
Alex Elder 已提交
4151
	if (incompat & ~RBD_FEATURES_SUPPORTED)
A
Alex Elder 已提交
4152
		return -ENXIO;
A
Alex Elder 已提交
4153

4154 4155 4156
	*snap_features = le64_to_cpu(features_buf.features);

	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4157 4158 4159
		(unsigned long long)snap_id,
		(unsigned long long)*snap_features,
		(unsigned long long)le64_to_cpu(features_buf.incompat));
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169

	return 0;
}

static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
						&rbd_dev->header.features);
}

4170 4171 4172 4173 4174 4175 4176 4177
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
	struct rbd_spec *parent_spec;
	size_t size;
	void *reply_buf = NULL;
	__le64 snapid;
	void *p;
	void *end;
A
Alex Elder 已提交
4178
	u64 pool_id;
4179
	char *image_id;
4180
	u64 snap_id;
4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
	u64 overlap;
	int ret;

	parent_spec = rbd_spec_alloc();
	if (!parent_spec)
		return -ENOMEM;

	size = sizeof (__le64) +				/* pool_id */
		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
		sizeof (__le64) +				/* snap_id */
		sizeof (__le64);				/* overlap */
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf) {
		ret = -ENOMEM;
		goto out_err;
	}

4198
	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4199
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4200
				"rbd", "get_parent",
4201
				&snapid, sizeof (snapid),
4202
				reply_buf, size);
4203
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4204 4205 4206 4207
	if (ret < 0)
		goto out_err;

	p = reply_buf;
4208 4209
	end = reply_buf + ret;
	ret = -ERANGE;
A
Alex Elder 已提交
4210
	ceph_decode_64_safe(&p, end, pool_id, out_err);
4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228
	if (pool_id == CEPH_NOPOOL) {
		/*
		 * Either the parent never existed, or we have
		 * record of it but the image got flattened so it no
		 * longer has a parent.  When the parent of a
		 * layered image disappears we immediately set the
		 * overlap to 0.  The effect of this is that all new
		 * requests will be treated as if the image had no
		 * parent.
		 */
		if (rbd_dev->parent_overlap) {
			rbd_dev->parent_overlap = 0;
			smp_mb();
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image has been flattened\n",
				rbd_dev->disk->disk_name);
		}

4229
		goto out;	/* No parent?  No problem. */
4230
	}
4231

4232 4233 4234
	/* The ceph file layout needs to fit pool id in 32 bits */

	ret = -EIO;
A
Alex Elder 已提交
4235
	if (pool_id > (u64)U32_MAX) {
4236
		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
A
Alex Elder 已提交
4237
			(unsigned long long)pool_id, U32_MAX);
4238
		goto out_err;
A
Alex Elder 已提交
4239
	}
4240

A
Alex Elder 已提交
4241
	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4242 4243 4244 4245
	if (IS_ERR(image_id)) {
		ret = PTR_ERR(image_id);
		goto out_err;
	}
4246
	ceph_decode_64_safe(&p, end, snap_id, out_err);
4247 4248
	ceph_decode_64_safe(&p, end, overlap, out_err);

4249 4250 4251 4252 4253 4254 4255 4256 4257
	/*
	 * The parent won't change (except when the clone is
	 * flattened, already handled that).  So we only need to
	 * record the parent spec we have not already done so.
	 */
	if (!rbd_dev->parent_spec) {
		parent_spec->pool_id = pool_id;
		parent_spec->image_id = image_id;
		parent_spec->snap_id = snap_id;
A
Alex Elder 已提交
4258 4259
		rbd_dev->parent_spec = parent_spec;
		parent_spec = NULL;	/* rbd_dev now owns this */
4260 4261
	} else {
		kfree(image_id);
4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
	}

	/*
	 * We always update the parent overlap.  If it's zero we
	 * treat it specially.
	 */
	rbd_dev->parent_overlap = overlap;
	smp_mb();
	if (!overlap) {

		/* A null parent_spec indicates it's the initial probe */

		if (parent_spec) {
			/*
			 * The overlap has become zero, so the clone
			 * must have been resized down to 0 at some
			 * point.  Treat this the same as a flatten.
			 */
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image now standalone\n",
				rbd_dev->disk->disk_name);
		} else {
			/*
			 * For the initial probe, if we find the
			 * overlap is zero we just pretend there was
			 * no parent image.
			 */
4289
			rbd_warn(rbd_dev, "ignoring parent with overlap 0");
4290
		}
A
Alex Elder 已提交
4291
	}
4292 4293 4294 4295 4296 4297 4298 4299 4300
out:
	ret = 0;
out_err:
	kfree(reply_buf);
	rbd_spec_put(parent_spec);

	return ret;
}

4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
	struct {
		__le64 stripe_unit;
		__le64 stripe_count;
	} __attribute__ ((packed)) striping_info_buf = { 0 };
	size_t size = sizeof (striping_info_buf);
	void *p;
	u64 obj_size;
	u64 stripe_unit;
	u64 stripe_count;
	int ret;

	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
				"rbd", "get_stripe_unit_count", NULL, 0,
4316
				(char *)&striping_info_buf, size);
4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
	if (ret < 0)
		return ret;
	if (ret < size)
		return -ERANGE;

	/*
	 * We don't actually support the "fancy striping" feature
	 * (STRIPINGV2) yet, but if the striping sizes are the
	 * defaults the behavior is the same as before.  So find
	 * out, and only fail if the image has non-default values.
	 */
	ret = -EINVAL;
	obj_size = (u64)1 << rbd_dev->header.obj_order;
	p = &striping_info_buf;
	stripe_unit = ceph_decode_64(&p);
	if (stripe_unit != obj_size) {
		rbd_warn(rbd_dev, "unsupported stripe unit "
				"(got %llu want %llu)",
				stripe_unit, obj_size);
		return -EINVAL;
	}
	stripe_count = ceph_decode_64(&p);
	if (stripe_count != 1) {
		rbd_warn(rbd_dev, "unsupported stripe count "
				"(got %llu want 1)", stripe_count);
		return -EINVAL;
	}
4345 4346
	rbd_dev->header.stripe_unit = stripe_unit;
	rbd_dev->header.stripe_count = stripe_count;
4347 4348 4349 4350

	return 0;
}

4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
	size_t image_id_size;
	char *image_id;
	void *p;
	void *end;
	size_t size;
	void *reply_buf = NULL;
	size_t len = 0;
	char *image_name = NULL;
	int ret;

	rbd_assert(!rbd_dev->spec->image_name);

A
Alex Elder 已提交
4365 4366
	len = strlen(rbd_dev->spec->image_id);
	image_id_size = sizeof (__le32) + len;
4367 4368 4369 4370 4371
	image_id = kmalloc(image_id_size, GFP_KERNEL);
	if (!image_id)
		return NULL;

	p = image_id;
4372
	end = image_id + image_id_size;
4373
	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4374 4375 4376 4377 4378 4379

	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		goto out;

4380
	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4381 4382
				"rbd", "dir_get_name",
				image_id, image_id_size,
4383
				reply_buf, size);
4384 4385 4386
	if (ret < 0)
		goto out;
	p = reply_buf;
4387 4388
	end = reply_buf + ret;

4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
	if (IS_ERR(image_name))
		image_name = NULL;
	else
		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
	kfree(reply_buf);
	kfree(image_id);

	return image_name;
}

4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	const char *snap_name;
	u32 which = 0;

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which < snapc->num_snaps) {
		if (!strcmp(name, snap_name))
			return snapc->snaps[which];
		snap_name += strlen(snap_name) + 1;
		which++;
	}
	return CEPH_NOSNAP;
}

static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	u32 which;
	bool found = false;
	u64 snap_id;

	for (which = 0; !found && which < snapc->num_snaps; which++) {
		const char *snap_name;

		snap_id = snapc->snaps[which];
		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4431 4432 4433 4434 4435 4436 4437
		if (IS_ERR(snap_name)) {
			/* ignore no-longer existing snapshots */
			if (PTR_ERR(snap_name) == -ENOENT)
				continue;
			else
				break;
		}
4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455
		found = !strcmp(name, snap_name);
		kfree(snap_name);
	}
	return found ? snap_id : CEPH_NOSNAP;
}

/*
 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
 * no snapshot by that name is found, or if an error occurs.
 */
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	if (rbd_dev->image_format == 1)
		return rbd_v1_snap_id_by_name(rbd_dev, name);

	return rbd_v2_snap_id_by_name(rbd_dev, name);
}

4456
/*
4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
 * An image being mapped will have everything but the snap id.
 */
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;

	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
	rbd_assert(spec->image_id && spec->image_name);
	rbd_assert(spec->snap_name);

	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
		u64 snap_id;

		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
		if (snap_id == CEPH_NOSNAP)
			return -ENOENT;

		spec->snap_id = snap_id;
	} else {
		spec->snap_id = CEPH_NOSNAP;
	}

	return 0;
}

/*
 * A parent image will have all ids but none of the names.
4484
 *
4485 4486
 * All names in an rbd spec are dynamically allocated.  It's OK if we
 * can't figure out the name for an image id.
4487
 */
4488
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4489
{
4490 4491 4492 4493 4494
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_spec *spec = rbd_dev->spec;
	const char *pool_name;
	const char *image_name;
	const char *snap_name;
4495 4496
	int ret;

4497 4498 4499
	rbd_assert(spec->pool_id != CEPH_NOPOOL);
	rbd_assert(spec->image_id);
	rbd_assert(spec->snap_id != CEPH_NOSNAP);
4500

4501
	/* Get the pool name; we have to make our own copy of this */
4502

4503 4504 4505
	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
	if (!pool_name) {
		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4506 4507
		return -EIO;
	}
4508 4509
	pool_name = kstrdup(pool_name, GFP_KERNEL);
	if (!pool_name)
4510 4511 4512 4513
		return -ENOMEM;

	/* Fetch the image name; tolerate failure here */

4514 4515
	image_name = rbd_dev_image_name(rbd_dev);
	if (!image_name)
A
Alex Elder 已提交
4516
		rbd_warn(rbd_dev, "unable to get image name");
4517

4518
	/* Fetch the snapshot name */
4519

4520
	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4521 4522
	if (IS_ERR(snap_name)) {
		ret = PTR_ERR(snap_name);
4523
		goto out_err;
4524 4525 4526 4527 4528
	}

	spec->pool_name = pool_name;
	spec->image_name = image_name;
	spec->snap_name = snap_name;
4529 4530

	return 0;
4531

4532
out_err:
4533 4534
	kfree(image_name);
	kfree(pool_name);
4535 4536 4537
	return ret;
}

A
Alex Elder 已提交
4538
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561
{
	size_t size;
	int ret;
	void *reply_buf;
	void *p;
	void *end;
	u64 seq;
	u32 snap_count;
	struct ceph_snap_context *snapc;
	u32 i;

	/*
	 * We'll need room for the seq value (maximum snapshot id),
	 * snapshot count, and array of that many snapshot ids.
	 * For now we have a fixed upper limit on the number we're
	 * prepared to receive.
	 */
	size = sizeof (__le64) + sizeof (__le32) +
			RBD_MAX_SNAP_COUNT * sizeof (__le64);
	reply_buf = kzalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4562
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4563
				"rbd", "get_snapcontext", NULL, 0,
4564
				reply_buf, size);
4565
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4566 4567 4568 4569
	if (ret < 0)
		goto out;

	p = reply_buf;
4570 4571
	end = reply_buf + ret;
	ret = -ERANGE;
4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587
	ceph_decode_64_safe(&p, end, seq, out);
	ceph_decode_32_safe(&p, end, snap_count, out);

	/*
	 * Make sure the reported number of snapshot ids wouldn't go
	 * beyond the end of our buffer.  But before checking that,
	 * make sure the computed size of the snapshot context we
	 * allocate is representable in a size_t.
	 */
	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
				 / sizeof (u64)) {
		ret = -EINVAL;
		goto out;
	}
	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
		goto out;
4588
	ret = 0;
4589

4590
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4591 4592 4593 4594 4595 4596 4597 4598
	if (!snapc) {
		ret = -ENOMEM;
		goto out;
	}
	snapc->seq = seq;
	for (i = 0; i < snap_count; i++)
		snapc->snaps[i] = ceph_decode_64(&p);

4599
	ceph_put_snap_context(rbd_dev->header.snapc);
4600 4601 4602
	rbd_dev->header.snapc = snapc;

	dout("  snap context seq = %llu, snap_count = %u\n",
4603
		(unsigned long long)seq, (unsigned int)snap_count);
4604 4605 4606
out:
	kfree(reply_buf);

4607
	return ret;
4608 4609
}

4610 4611
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
A
Alex Elder 已提交
4612 4613 4614
{
	size_t size;
	void *reply_buf;
4615
	__le64 snapid;
A
Alex Elder 已提交
4616 4617 4618 4619 4620 4621 4622 4623 4624 4625
	int ret;
	void *p;
	void *end;
	char *snap_name;

	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return ERR_PTR(-ENOMEM);

4626
	snapid = cpu_to_le64(snap_id);
4627
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
A
Alex Elder 已提交
4628
				"rbd", "get_snapshot_name",
4629
				&snapid, sizeof (snapid),
4630
				reply_buf, size);
4631
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4632 4633
	if (ret < 0) {
		snap_name = ERR_PTR(ret);
A
Alex Elder 已提交
4634
		goto out;
4635
	}
A
Alex Elder 已提交
4636 4637

	p = reply_buf;
4638
	end = reply_buf + ret;
A
Alex Elder 已提交
4639
	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4640
	if (IS_ERR(snap_name))
A
Alex Elder 已提交
4641 4642
		goto out;

4643
	dout("  snap_id 0x%016llx snap_name = %s\n",
4644
		(unsigned long long)snap_id, snap_name);
A
Alex Elder 已提交
4645 4646 4647
out:
	kfree(reply_buf);

4648
	return snap_name;
A
Alex Elder 已提交
4649 4650
}

4651
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
4652
{
4653
	bool first_time = rbd_dev->header.object_prefix == NULL;
A
Alex Elder 已提交
4654 4655
	int ret;

4656 4657
	ret = rbd_dev_v2_image_size(rbd_dev);
	if (ret)
4658
		return ret;
4659

4660 4661 4662
	if (first_time) {
		ret = rbd_dev_v2_header_onetime(rbd_dev);
		if (ret)
4663
			return ret;
4664 4665
	}

A
Alex Elder 已提交
4666
	ret = rbd_dev_v2_snap_context(rbd_dev);
A
Alex Elder 已提交
4667 4668 4669 4670 4671
	dout("rbd_dev_v2_snap_context returned %d\n", ret);

	return ret;
}

4672 4673 4674 4675 4676 4677 4678 4679 4680 4681
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_header_info(rbd_dev);

	return rbd_dev_v2_header_info(rbd_dev);
}

4682 4683 4684
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
	struct device *dev;
4685
	int ret;
4686

4687
	dev = &rbd_dev->dev;
4688 4689 4690
	dev->bus = &rbd_bus_type;
	dev->type = &rbd_device_type;
	dev->parent = &rbd_root_dev;
4691
	dev->release = rbd_dev_device_release;
A
Alex Elder 已提交
4692
	dev_set_name(dev, "%d", rbd_dev->dev_id);
4693 4694 4695
	ret = device_register(dev);

	return ret;
4696 4697
}

4698 4699 4700 4701 4702
static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
{
	device_unregister(&rbd_dev->dev);
}

4703
/*
4704
 * Get a unique rbd identifier for the given new rbd_dev, and add
4705
 * the rbd_dev to the global list.
4706
 */
4707
static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4708
{
4709 4710
	int new_dev_id;

4711 4712 4713
	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
				    0, minor_to_rbd_dev_id(1 << MINORBITS),
				    GFP_KERNEL);
4714 4715 4716 4717
	if (new_dev_id < 0)
		return new_dev_id;

	rbd_dev->dev_id = new_dev_id;
4718 4719 4720 4721

	spin_lock(&rbd_dev_list_lock);
	list_add_tail(&rbd_dev->node, &rbd_dev_list);
	spin_unlock(&rbd_dev_list_lock);
4722

4723
	dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4724 4725

	return 0;
4726
}
4727

4728
/*
4729 4730
 * Remove an rbd_dev from the global list, and record that its
 * identifier is no longer in use.
4731
 */
A
Alex Elder 已提交
4732
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4733
{
4734 4735 4736
	spin_lock(&rbd_dev_list_lock);
	list_del_init(&rbd_dev->node);
	spin_unlock(&rbd_dev_list_lock);
4737

4738 4739 4740
	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);

	dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4741 4742
}

4743 4744 4745
/*
 * Skips over white space at *buf, and updates *buf to point to the
 * first found non-space character (if any). Returns the length of
A
Alex Elder 已提交
4746 4747
 * the token (string of non-white space characters) found.  Note
 * that *buf must be terminated with '\0'.
4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764
 */
static inline size_t next_token(const char **buf)
{
        /*
        * These are the characters that produce nonzero for
        * isspace() in the "C" and "POSIX" locales.
        */
        const char *spaces = " \f\n\r\t\v";

        *buf += strspn(*buf, spaces);	/* Find start of token */

	return strcspn(*buf, spaces);   /* Return token length */
}

/*
 * Finds the next token in *buf, and if the provided token buffer is
 * big enough, copies the found token into it.  The result, if
A
Alex Elder 已提交
4765 4766
 * copied, is guaranteed to be terminated with '\0'.  Note that *buf
 * must be terminated with '\0' on entry.
4767 4768 4769 4770 4771
 *
 * Returns the length of the token found (not including the '\0').
 * Return value will be 0 if no token is found, and it will be >=
 * token_size if the token would not fit.
 *
A
Alex Elder 已提交
4772
 * The *buf pointer will be updated to point beyond the end of the
4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791
 * found token.  Note that this occurs even if the token buffer is
 * too small to hold it.
 */
static inline size_t copy_token(const char **buf,
				char *token,
				size_t token_size)
{
        size_t len;

	len = next_token(buf);
	if (len < token_size) {
		memcpy(token, *buf, len);
		*(token + len) = '\0';
	}
	*buf += len;

        return len;
}

A
Alex Elder 已提交
4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813
/*
 * Finds the next token in *buf, dynamically allocates a buffer big
 * enough to hold a copy of it, and copies the token into the new
 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
 * that a duplicate buffer is created even for a zero-length token.
 *
 * Returns a pointer to the newly-allocated duplicate, or a null
 * pointer if memory for the duplicate was not available.  If
 * the lenp argument is a non-null pointer, the length of the token
 * (not including the '\0') is returned in *lenp.
 *
 * If successful, the *buf pointer will be updated to point beyond
 * the end of the found token.
 *
 * Note: uses GFP_KERNEL for allocation.
 */
static inline char *dup_token(const char **buf, size_t *lenp)
{
	char *dup;
	size_t len;

	len = next_token(buf);
A
Alex Elder 已提交
4814
	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
A
Alex Elder 已提交
4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825
	if (!dup)
		return NULL;
	*(dup + len) = '\0';
	*buf += len;

	if (lenp)
		*lenp = len;

	return dup;
}

4826
/*
4827 4828 4829 4830
 * Parse the options provided for an "rbd add" (i.e., rbd image
 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
 * and the data written is passed here via a NUL-terminated buffer.
 * Returns 0 if successful or an error code otherwise.
A
Alex Elder 已提交
4831
 *
4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865
 * The information extracted from these options is recorded in
 * the other parameters which return dynamically-allocated
 * structures:
 *  ceph_opts
 *      The address of a pointer that will refer to a ceph options
 *      structure.  Caller must release the returned pointer using
 *      ceph_destroy_options() when it is no longer needed.
 *  rbd_opts
 *	Address of an rbd options pointer.  Fully initialized by
 *	this function; caller must release with kfree().
 *  spec
 *	Address of an rbd image specification pointer.  Fully
 *	initialized by this function based on parsed options.
 *	Caller must release with rbd_spec_put().
 *
 * The options passed take this form:
 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
 * where:
 *  <mon_addrs>
 *      A comma-separated list of one or more monitor addresses.
 *      A monitor address is an ip address, optionally followed
 *      by a port number (separated by a colon).
 *        I.e.:  ip1[:port1][,ip2[:port2]...]
 *  <options>
 *      A comma-separated list of ceph and/or rbd options.
 *  <pool_name>
 *      The name of the rados pool containing the rbd image.
 *  <image_name>
 *      The name of the image in that pool to map.
 *  <snap_id>
 *      An optional snapshot id.  If provided, the mapping will
 *      present data from the image at the time that snapshot was
 *      created.  The image head is used if no snapshot id is
 *      provided.  Snapshot mappings are always read-only.
4866
 */
4867
static int rbd_add_parse_args(const char *buf,
4868
				struct ceph_options **ceph_opts,
4869 4870
				struct rbd_options **opts,
				struct rbd_spec **rbd_spec)
4871
{
A
Alex Elder 已提交
4872
	size_t len;
4873
	char *options;
4874
	const char *mon_addrs;
4875
	char *snap_name;
4876
	size_t mon_addrs_size;
4877
	struct rbd_spec *spec = NULL;
4878
	struct rbd_options *rbd_opts = NULL;
4879
	struct ceph_options *copts;
4880
	int ret;
4881 4882 4883

	/* The first four tokens are required */

4884
	len = next_token(&buf);
4885 4886 4887 4888
	if (!len) {
		rbd_warn(NULL, "no monitor address(es) provided");
		return -EINVAL;
	}
4889
	mon_addrs = buf;
4890
	mon_addrs_size = len + 1;
4891
	buf += len;
4892

4893
	ret = -EINVAL;
4894 4895
	options = dup_token(&buf, NULL);
	if (!options)
4896
		return -ENOMEM;
4897 4898 4899 4900
	if (!*options) {
		rbd_warn(NULL, "no options provided");
		goto out_err;
	}
4901

4902 4903
	spec = rbd_spec_alloc();
	if (!spec)
4904
		goto out_mem;
4905 4906 4907 4908

	spec->pool_name = dup_token(&buf, NULL);
	if (!spec->pool_name)
		goto out_mem;
4909 4910 4911 4912
	if (!*spec->pool_name) {
		rbd_warn(NULL, "no pool name provided");
		goto out_err;
	}
4913

A
Alex Elder 已提交
4914
	spec->image_name = dup_token(&buf, NULL);
4915
	if (!spec->image_name)
4916
		goto out_mem;
4917 4918 4919 4920
	if (!*spec->image_name) {
		rbd_warn(NULL, "no image name provided");
		goto out_err;
	}
4921

4922 4923 4924 4925
	/*
	 * Snapshot name is optional; default is to use "-"
	 * (indicating the head/no snapshot).
	 */
4926
	len = next_token(&buf);
4927
	if (!len) {
4928 4929
		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4930
	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4931
		ret = -ENAMETOOLONG;
4932
		goto out_err;
4933
	}
4934 4935
	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
	if (!snap_name)
4936
		goto out_mem;
4937 4938
	*(snap_name + len) = '\0';
	spec->snap_name = snap_name;
A
Alex Elder 已提交
4939

4940
	/* Initialize all rbd options to the defaults */
4941

4942 4943 4944 4945 4946
	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
	if (!rbd_opts)
		goto out_mem;

	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
A
Alex Elder 已提交
4947

4948
	copts = ceph_parse_options(options, mon_addrs,
4949
					mon_addrs + mon_addrs_size - 1,
4950
					parse_rbd_opts_token, rbd_opts);
4951 4952
	if (IS_ERR(copts)) {
		ret = PTR_ERR(copts);
4953 4954
		goto out_err;
	}
4955 4956 4957
	kfree(options);

	*ceph_opts = copts;
4958
	*opts = rbd_opts;
4959
	*rbd_spec = spec;
4960

4961
	return 0;
4962
out_mem:
4963
	ret = -ENOMEM;
A
Alex Elder 已提交
4964
out_err:
4965 4966
	kfree(rbd_opts);
	rbd_spec_put(spec);
4967
	kfree(options);
A
Alex Elder 已提交
4968

4969
	return ret;
4970 4971
}

4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003
/*
 * Return pool id (>= 0) or a negative error code.
 */
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
	u64 newest_epoch;
	unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
	int tries = 0;
	int ret;

again:
	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
	if (ret == -ENOENT && tries++ < 1) {
		ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
					       &newest_epoch);
		if (ret < 0)
			return ret;

		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
			ceph_monc_request_next_osdmap(&rbdc->client->monc);
			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
						     newest_epoch, timeout);
			goto again;
		} else {
			/* the osdmap we have is new enough */
			return -ENOENT;
		}
	}

	return ret;
}

A
Alex Elder 已提交
5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023
/*
 * An rbd format 2 image has a unique identifier, distinct from the
 * name given to it by the user.  Internally, that identifier is
 * what's used to specify the names of objects related to the image.
 *
 * A special "rbd id" object is used to map an rbd image name to its
 * id.  If that object doesn't exist, then there is no v2 rbd image
 * with the supplied name.
 *
 * This function will record the given rbd_dev's image_id field if
 * it can be determined, and in that case will return 0.  If any
 * errors occur a negative errno will be returned and the rbd_dev's
 * image_id field will be unchanged (and should be NULL).
 */
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
	int ret;
	size_t size;
	char *object_name;
	void *response;
5024
	char *image_id;
5025

A
Alex Elder 已提交
5026 5027 5028
	/*
	 * When probing a parent image, the image id is already
	 * known (and the image name likely is not).  There's no
5029 5030
	 * need to fetch the image id again in this case.  We
	 * do still need to set the image format though.
A
Alex Elder 已提交
5031
	 */
5032 5033 5034
	if (rbd_dev->spec->image_id) {
		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;

A
Alex Elder 已提交
5035
		return 0;
5036
	}
A
Alex Elder 已提交
5037

A
Alex Elder 已提交
5038 5039 5040 5041
	/*
	 * First, see if the format 2 image id file exists, and if
	 * so, get the image's persistent id from it.
	 */
A
Alex Elder 已提交
5042
	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
A
Alex Elder 已提交
5043 5044 5045
	object_name = kmalloc(size, GFP_NOIO);
	if (!object_name)
		return -ENOMEM;
5046
	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
A
Alex Elder 已提交
5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057
	dout("rbd id object name is %s\n", object_name);

	/* Response will be an encoded string, which includes a length */

	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
	response = kzalloc(size, GFP_NOIO);
	if (!response) {
		ret = -ENOMEM;
		goto out;
	}

5058 5059
	/* If it doesn't exist we'll assume it's a format 1 image */

5060
	ret = rbd_obj_method_sync(rbd_dev, object_name,
5061
				"rbd", "get_id", NULL, 0,
5062
				response, RBD_IMAGE_ID_LEN_MAX);
5063
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5064 5065 5066 5067 5068
	if (ret == -ENOENT) {
		image_id = kstrdup("", GFP_KERNEL);
		ret = image_id ? 0 : -ENOMEM;
		if (!ret)
			rbd_dev->image_format = 1;
5069
	} else if (ret >= 0) {
5070 5071 5072
		void *p = response;

		image_id = ceph_extract_encoded_string(&p, p + ret,
A
Alex Elder 已提交
5073
						NULL, GFP_NOIO);
5074
		ret = PTR_ERR_OR_ZERO(image_id);
5075 5076 5077 5078 5079 5080 5081
		if (!ret)
			rbd_dev->image_format = 2;
	}

	if (!ret) {
		rbd_dev->spec->image_id = image_id;
		dout("image_id is %s\n", image_id);
A
Alex Elder 已提交
5082 5083 5084 5085 5086 5087 5088 5089
	}
out:
	kfree(response);
	kfree(object_name);

	return ret;
}

A
Alex Elder 已提交
5090 5091 5092 5093
/*
 * Undo whatever state changes are made by v1 or v2 header info
 * call.
 */
A
Alex Elder 已提交
5094 5095 5096 5097
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
	struct rbd_image_header	*header;

5098 5099 5100 5101
	/* Drop parent reference unless it's already been done (or none) */

	if (rbd_dev->parent_overlap)
		rbd_dev_parent_put(rbd_dev);
A
Alex Elder 已提交
5102 5103 5104 5105

	/* Free dynamic fields from the header, then zero it out */

	header = &rbd_dev->header;
5106
	ceph_put_snap_context(header->snapc);
A
Alex Elder 已提交
5107 5108 5109 5110 5111 5112
	kfree(header->snap_sizes);
	kfree(header->snap_names);
	kfree(header->object_prefix);
	memset(header, 0, sizeof (*header));
}

5113
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5114 5115 5116
{
	int ret;

5117
	ret = rbd_dev_v2_object_prefix(rbd_dev);
5118
	if (ret)
5119 5120
		goto out_err;

5121 5122 5123 5124
	/*
	 * Get the and check features for the image.  Currently the
	 * features are assumed to never change.
	 */
5125
	ret = rbd_dev_v2_features(rbd_dev);
5126
	if (ret)
5127
		goto out_err;
5128

5129 5130 5131 5132 5133 5134 5135
	/* If the image supports fancy striping, get its parameters */

	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
		ret = rbd_dev_v2_striping_info(rbd_dev);
		if (ret < 0)
			goto out_err;
	}
5136
	/* No support for crypto and compression type format 2 images */
5137

A
Alex Elder 已提交
5138
	return 0;
5139
out_err:
A
Alex Elder 已提交
5140
	rbd_dev->header.features = 0;
5141 5142
	kfree(rbd_dev->header.object_prefix);
	rbd_dev->header.object_prefix = NULL;
5143 5144

	return ret;
5145 5146
}

5147
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
5148
{
5149
	struct rbd_device *parent = NULL;
5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168
	struct rbd_spec *parent_spec;
	struct rbd_client *rbdc;
	int ret;

	if (!rbd_dev->parent_spec)
		return 0;
	/*
	 * We need to pass a reference to the client and the parent
	 * spec when creating the parent rbd_dev.  Images related by
	 * parent/child relationships always share both.
	 */
	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
	rbdc = __rbd_get_client(rbd_dev->rbd_client);

	ret = -ENOMEM;
	parent = rbd_dev_create(rbdc, parent_spec);
	if (!parent)
		goto out_err;

5169
	ret = rbd_dev_image_probe(parent, false);
5170 5171 5172
	if (ret < 0)
		goto out_err;
	rbd_dev->parent = parent;
5173
	atomic_set(&rbd_dev->parent_ref, 1);
5174 5175 5176 5177

	return 0;
out_err:
	if (parent) {
A
Alex Elder 已提交
5178
		rbd_dev_unparent(rbd_dev);
5179 5180 5181 5182 5183 5184 5185 5186 5187 5188
		kfree(rbd_dev->header_name);
		rbd_dev_destroy(parent);
	} else {
		rbd_put_client(rbdc);
		rbd_spec_put(parent_spec);
	}

	return ret;
}

5189
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5190
{
A
Alex Elder 已提交
5191
	int ret;
A
Alex Elder 已提交
5192

5193 5194 5195 5196 5197
	/* Get an id and fill in device name. */

	ret = rbd_dev_id_get(rbd_dev);
	if (ret)
		return ret;
A
Alex Elder 已提交
5198 5199 5200 5201 5202

	BUILD_BUG_ON(DEV_NAME_LEN
			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);

5203
	/* Record our major and minor device numbers. */
A
Alex Elder 已提交
5204

5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215
	if (!single_major) {
		ret = register_blkdev(0, rbd_dev->name);
		if (ret < 0)
			goto err_out_id;

		rbd_dev->major = ret;
		rbd_dev->minor = 0;
	} else {
		rbd_dev->major = rbd_major;
		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
	}
A
Alex Elder 已提交
5216 5217 5218 5219 5220 5221 5222

	/* Set up the blkdev mapping. */

	ret = rbd_init_disk(rbd_dev);
	if (ret)
		goto err_out_blkdev;

5223
	ret = rbd_dev_mapping_set(rbd_dev);
A
Alex Elder 已提交
5224 5225
	if (ret)
		goto err_out_disk;
I
Ilya Dryomov 已提交
5226

5227
	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5228
	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5229

5230
	rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
5231 5232
	if (!rbd_dev->rq_wq) {
		ret = -ENOMEM;
I
Ilya Dryomov 已提交
5233
		goto err_out_mapping;
5234
	}
I
Ilya Dryomov 已提交
5235

5236 5237
	ret = rbd_bus_add_dev(rbd_dev);
	if (ret)
I
Ilya Dryomov 已提交
5238
		goto err_out_workqueue;
A
Alex Elder 已提交
5239 5240 5241

	/* Everything's ready.  Announce the disk to the world. */

5242
	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
A
Alex Elder 已提交
5243 5244 5245 5246 5247 5248
	add_disk(rbd_dev->disk);

	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
		(unsigned long long) rbd_dev->mapping.size);

	return ret;
5249

I
Ilya Dryomov 已提交
5250 5251 5252
err_out_workqueue:
	destroy_workqueue(rbd_dev->rq_wq);
	rbd_dev->rq_wq = NULL;
5253 5254
err_out_mapping:
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5255 5256 5257
err_out_disk:
	rbd_free_disk(rbd_dev);
err_out_blkdev:
5258 5259
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5260 5261
err_out_id:
	rbd_dev_id_put(rbd_dev);
A
Alex Elder 已提交
5262
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5263 5264 5265 5266

	return ret;
}

A
Alex Elder 已提交
5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;
	size_t size;

	/* Record the header object name for this rbd image. */

	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
	else
		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);

	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
	if (!rbd_dev->header_name)
		return -ENOMEM;

	if (rbd_dev->image_format == 1)
		sprintf(rbd_dev->header_name, "%s%s",
			spec->image_name, RBD_SUFFIX);
	else
		sprintf(rbd_dev->header_name, "%s%s",
			RBD_HEADER_PREFIX, spec->image_id);
	return 0;
}

5294 5295
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5296
	rbd_dev_unprobe(rbd_dev);
5297
	kfree(rbd_dev->header_name);
A
Alex Elder 已提交
5298 5299 5300 5301 5302
	rbd_dev->header_name = NULL;
	rbd_dev->image_format = 0;
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;

5303 5304 5305
	rbd_dev_destroy(rbd_dev);
}

5306 5307
/*
 * Probe for the existence of the header object for the given rbd
5308 5309 5310
 * device.  If this image is the one being mapped (i.e., not a
 * parent), initiate a watch on its header object before using that
 * object to get detailed information about the rbd image.
5311
 */
5312
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5313 5314 5315 5316
{
	int ret;

	/*
A
Alex Elder 已提交
5317 5318 5319 5320
	 * Get the id from the image id object.  Unless there's an
	 * error, rbd_dev->spec->image_id will be filled in with
	 * a dynamically-allocated string, and rbd_dev->image_format
	 * will be set to either 1 or 2.
5321 5322 5323
	 */
	ret = rbd_dev_image_id(rbd_dev);
	if (ret)
5324 5325
		return ret;

A
Alex Elder 已提交
5326 5327 5328 5329
	ret = rbd_dev_header_name(rbd_dev);
	if (ret)
		goto err_out_format;

5330
	if (mapping) {
5331
		ret = rbd_dev_header_watch_sync(rbd_dev);
5332 5333 5334
		if (ret)
			goto out_header_name;
	}
5335

5336
	ret = rbd_dev_header_info(rbd_dev);
5337
	if (ret)
5338
		goto err_out_watch;
A
Alex Elder 已提交
5339

5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
	/*
	 * If this image is the one being mapped, we have pool name and
	 * id, image name and id, and snap name - need to fill snap id.
	 * Otherwise this is a parent image, identified by pool, image
	 * and snap ids - need to fill in names for those ids.
	 */
	if (mapping)
		ret = rbd_spec_fill_snap_id(rbd_dev);
	else
		ret = rbd_spec_fill_names(rbd_dev);
5350
	if (ret)
A
Alex Elder 已提交
5351
		goto err_out_probe;
5352

5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366
	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			goto err_out_probe;

		/*
		 * Need to warn users if this image is the one being
		 * mapped and has a parent.
		 */
		if (mapping && rbd_dev->parent_spec)
			rbd_warn(rbd_dev,
				 "WARNING: kernel layering is EXPERIMENTAL!");
	}

5367
	ret = rbd_dev_probe_parent(rbd_dev);
A
Alex Elder 已提交
5368 5369 5370 5371 5372 5373
	if (ret)
		goto err_out_probe;

	dout("discovered format %u image, header name is %s\n",
		rbd_dev->image_format, rbd_dev->header_name);
	return 0;
5374

A
Alex Elder 已提交
5375 5376
err_out_probe:
	rbd_dev_unprobe(rbd_dev);
5377
err_out_watch:
5378 5379
	if (mapping)
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5380 5381 5382 5383 5384
out_header_name:
	kfree(rbd_dev->header_name);
	rbd_dev->header_name = NULL;
err_out_format:
	rbd_dev->image_format = 0;
5385 5386
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;
5387 5388 5389
	return ret;
}

5390 5391 5392
static ssize_t do_rbd_add(struct bus_type *bus,
			  const char *buf,
			  size_t count)
5393
{
5394
	struct rbd_device *rbd_dev = NULL;
5395
	struct ceph_options *ceph_opts = NULL;
5396
	struct rbd_options *rbd_opts = NULL;
5397
	struct rbd_spec *spec = NULL;
5398
	struct rbd_client *rbdc;
5399
	bool read_only;
5400
	int rc = -ENOMEM;
5401 5402 5403 5404 5405

	if (!try_module_get(THIS_MODULE))
		return -ENODEV;

	/* parse add command */
5406
	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5407
	if (rc < 0)
5408
		goto err_out_module;
5409 5410 5411
	read_only = rbd_opts->read_only;
	kfree(rbd_opts);
	rbd_opts = NULL;	/* done with this */
5412

5413 5414 5415
	rbdc = rbd_get_client(ceph_opts);
	if (IS_ERR(rbdc)) {
		rc = PTR_ERR(rbdc);
5416
		goto err_out_args;
5417
	}
5418 5419

	/* pick the pool */
5420
	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5421 5422
	if (rc < 0)
		goto err_out_client;
A
Alex Elder 已提交
5423
	spec->pool_id = (u64)rc;
5424

5425 5426
	/* The ceph file layout needs to fit pool id in 32 bits */

A
Alex Elder 已提交
5427
	if (spec->pool_id > (u64)U32_MAX) {
5428
		rbd_warn(NULL, "pool id too large (%llu > %u)",
A
Alex Elder 已提交
5429
				(unsigned long long)spec->pool_id, U32_MAX);
5430 5431 5432 5433
		rc = -EIO;
		goto err_out_client;
	}

5434
	rbd_dev = rbd_dev_create(rbdc, spec);
5435 5436
	if (!rbd_dev)
		goto err_out_client;
5437 5438
	rbdc = NULL;		/* rbd_dev now owns this */
	spec = NULL;		/* rbd_dev now owns this */
5439

5440
	rc = rbd_dev_image_probe(rbd_dev, true);
5441
	if (rc < 0)
5442
		goto err_out_rbd_dev;
5443

5444 5445 5446 5447 5448 5449
	/* If we are mapping a snapshot it must be marked read-only */

	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
		read_only = true;
	rbd_dev->mapping.read_only = read_only;

5450
	rc = rbd_dev_device_setup(rbd_dev);
A
Alex Elder 已提交
5451
	if (rc) {
5452 5453 5454 5455 5456 5457
		/*
		 * rbd_dev_header_unwatch_sync() can't be moved into
		 * rbd_dev_image_release() without refactoring, see
		 * commit 1f3ef78861ac.
		 */
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5458 5459 5460 5461 5462
		rbd_dev_image_release(rbd_dev);
		goto err_out_module;
	}

	return count;
5463

5464 5465
err_out_rbd_dev:
	rbd_dev_destroy(rbd_dev);
5466
err_out_client:
5467
	rbd_put_client(rbdc);
5468
err_out_args:
5469
	rbd_spec_put(spec);
5470 5471
err_out_module:
	module_put(THIS_MODULE);
5472

5473
	dout("Error adding device %s\n", buf);
5474

A
Alex Elder 已提交
5475
	return (ssize_t)rc;
5476 5477
}

5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494
static ssize_t rbd_add(struct bus_type *bus,
		       const char *buf,
		       size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_add(bus, buf, count);
}

static ssize_t rbd_add_single_major(struct bus_type *bus,
				    const char *buf,
				    size_t count)
{
	return do_rbd_add(bus, buf, count);
}

5495
static void rbd_dev_device_release(struct device *dev)
5496
{
A
Alex Elder 已提交
5497
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5498

I
Ilya Dryomov 已提交
5499
	destroy_workqueue(rbd_dev->rq_wq);
5500
	rbd_free_disk(rbd_dev);
5501
	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
A
Alex Elder 已提交
5502
	rbd_dev_mapping_clear(rbd_dev);
5503 5504
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5505
	rbd_dev_id_put(rbd_dev);
A
Alex Elder 已提交
5506
	rbd_dev_mapping_clear(rbd_dev);
5507 5508
}

5509 5510
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5511
	while (rbd_dev->parent) {
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523
		struct rbd_device *first = rbd_dev;
		struct rbd_device *second = first->parent;
		struct rbd_device *third;

		/*
		 * Follow to the parent with no grandparent and
		 * remove it.
		 */
		while (second && (third = second->parent)) {
			first = second;
			second = third;
		}
A
Alex Elder 已提交
5524
		rbd_assert(second);
5525
		rbd_dev_image_release(second);
A
Alex Elder 已提交
5526 5527 5528 5529
		first->parent = NULL;
		first->parent_overlap = 0;

		rbd_assert(first->parent_spec);
5530 5531 5532 5533 5534
		rbd_spec_put(first->parent_spec);
		first->parent_spec = NULL;
	}
}

5535 5536 5537
static ssize_t do_rbd_remove(struct bus_type *bus,
			     const char *buf,
			     size_t count)
5538 5539
{
	struct rbd_device *rbd_dev = NULL;
5540 5541
	struct list_head *tmp;
	int dev_id;
5542
	unsigned long ul;
5543
	bool already = false;
5544
	int ret;
5545

5546
	ret = kstrtoul(buf, 10, &ul);
5547 5548
	if (ret)
		return ret;
5549 5550

	/* convert to int; abort if we lost anything in the conversion */
5551 5552
	dev_id = (int)ul;
	if (dev_id != ul)
5553 5554
		return -EINVAL;

5555 5556 5557 5558 5559 5560 5561 5562
	ret = -ENOENT;
	spin_lock(&rbd_dev_list_lock);
	list_for_each(tmp, &rbd_dev_list) {
		rbd_dev = list_entry(tmp, struct rbd_device, node);
		if (rbd_dev->dev_id == dev_id) {
			ret = 0;
			break;
		}
5563
	}
5564 5565 5566 5567 5568
	if (!ret) {
		spin_lock_irq(&rbd_dev->lock);
		if (rbd_dev->open_count)
			ret = -EBUSY;
		else
5569 5570
			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
							&rbd_dev->flags);
5571 5572 5573
		spin_unlock_irq(&rbd_dev->lock);
	}
	spin_unlock(&rbd_dev_list_lock);
5574
	if (ret < 0 || already)
5575
		return ret;
5576

5577
	rbd_dev_header_unwatch_sync(rbd_dev);
5578 5579 5580 5581 5582 5583
	/*
	 * flush remaining watch callbacks - these must be complete
	 * before the osd_client is shutdown
	 */
	dout("%s: flushing notifies", __func__);
	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5584

5585 5586 5587 5588 5589 5590 5591
	/*
	 * Don't free anything from rbd_dev->disk until after all
	 * notifies are completely processed. Otherwise
	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
	 * in a potential use after free of rbd_dev->disk or rbd_dev.
	 */
	rbd_bus_del_dev(rbd_dev);
5592
	rbd_dev_image_release(rbd_dev);
A
Alex Elder 已提交
5593
	module_put(THIS_MODULE);
A
Alex Elder 已提交
5594

5595
	return count;
5596 5597
}

5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614
static ssize_t rbd_remove(struct bus_type *bus,
			  const char *buf,
			  size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_remove(bus, buf, count);
}

static ssize_t rbd_remove_single_major(struct bus_type *bus,
				       const char *buf,
				       size_t count)
{
	return do_rbd_remove(bus, buf, count);
}

5615 5616
/*
 * create control files in sysfs
5617
 * /sys/bus/rbd/...
5618 5619 5620
 */
static int rbd_sysfs_init(void)
{
5621
	int ret;
5622

5623
	ret = device_register(&rbd_root_dev);
A
Alex Elder 已提交
5624
	if (ret < 0)
5625
		return ret;
5626

5627 5628 5629
	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
		device_unregister(&rbd_root_dev);
5630 5631 5632 5633 5634 5635

	return ret;
}

static void rbd_sysfs_cleanup(void)
{
5636
	bus_unregister(&rbd_bus_type);
5637
	device_unregister(&rbd_root_dev);
5638 5639
}

5640 5641 5642 5643 5644 5645 5646
static int rbd_slab_init(void)
{
	rbd_assert(!rbd_img_request_cache);
	rbd_img_request_cache = kmem_cache_create("rbd_img_request",
					sizeof (struct rbd_img_request),
					__alignof__(struct rbd_img_request),
					0, NULL);
5647 5648 5649 5650 5651 5652 5653 5654
	if (!rbd_img_request_cache)
		return -ENOMEM;

	rbd_assert(!rbd_obj_request_cache);
	rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
					sizeof (struct rbd_obj_request),
					__alignof__(struct rbd_obj_request),
					0, NULL);
5655 5656 5657 5658 5659
	if (!rbd_obj_request_cache)
		goto out_err;

	rbd_assert(!rbd_segment_name_cache);
	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5660
					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5661
	if (rbd_segment_name_cache)
5662
		return 0;
5663 5664 5665 5666 5667
out_err:
	if (rbd_obj_request_cache) {
		kmem_cache_destroy(rbd_obj_request_cache);
		rbd_obj_request_cache = NULL;
	}
5668

5669 5670 5671
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;

5672 5673 5674 5675 5676
	return -ENOMEM;
}

static void rbd_slab_exit(void)
{
5677 5678 5679 5680
	rbd_assert(rbd_segment_name_cache);
	kmem_cache_destroy(rbd_segment_name_cache);
	rbd_segment_name_cache = NULL;

5681 5682 5683 5684
	rbd_assert(rbd_obj_request_cache);
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;

5685 5686 5687 5688 5689
	rbd_assert(rbd_img_request_cache);
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;
}

A
Alex Elder 已提交
5690
static int __init rbd_init(void)
5691 5692 5693
{
	int rc;

5694 5695 5696 5697
	if (!libceph_compatible(NULL)) {
		rbd_warn(NULL, "libceph incompatibility (quitting)");
		return -EINVAL;
	}
I
Ilya Dryomov 已提交
5698

5699
	rc = rbd_slab_init();
5700 5701
	if (rc)
		return rc;
I
Ilya Dryomov 已提交
5702

5703 5704 5705 5706 5707 5708 5709 5710
	if (single_major) {
		rbd_major = register_blkdev(0, RBD_DRV_NAME);
		if (rbd_major < 0) {
			rc = rbd_major;
			goto err_out_slab;
		}
	}

5711 5712
	rc = rbd_sysfs_init();
	if (rc)
5713 5714 5715 5716 5717 5718
		goto err_out_blkdev;

	if (single_major)
		pr_info("loaded (major %d)\n", rbd_major);
	else
		pr_info("loaded\n");
5719

I
Ilya Dryomov 已提交
5720 5721
	return 0;

5722 5723 5724
err_out_blkdev:
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
I
Ilya Dryomov 已提交
5725 5726
err_out_slab:
	rbd_slab_exit();
5727
	return rc;
5728 5729
}

A
Alex Elder 已提交
5730
static void __exit rbd_exit(void)
5731
{
I
Ilya Dryomov 已提交
5732
	ida_destroy(&rbd_dev_id_ida);
5733
	rbd_sysfs_cleanup();
5734 5735
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5736
	rbd_slab_exit();
5737 5738 5739 5740 5741
}

module_init(rbd_init);
module_exit(rbd_exit);

A
Alex Elder 已提交
5742
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5743 5744 5745 5746 5747
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");

5748
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5749
MODULE_LICENSE("GPL");