rbd.c 146.3 KB
Newer Older
1

2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
   rbd.c -- Export ceph rados objects as a Linux block device


   based on drivers/block/osdblk.c:

   Copyright 2009 Red Hat, Inc.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program; see the file COPYING.  If not, write to
   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.



25
   For usage instructions, please refer to:
26

27
                 Documentation/ABI/testing/sysfs-bus-rbd
28 29 30 31 32 33 34

 */

#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
35
#include <linux/parser.h>
36
#include <linux/bsearch.h>
37 38 39 40

#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
C
Christoph Hellwig 已提交
41
#include <linux/blk-mq.h>
42 43
#include <linux/fs.h>
#include <linux/blkdev.h>
44
#include <linux/slab.h>
45
#include <linux/idr.h>
I
Ilya Dryomov 已提交
46
#include <linux/workqueue.h>
47 48 49

#include "rbd_types.h"

A
Alex Elder 已提交
50 51
#define RBD_DEBUG	/* Activate rbd_assert() calls */

A
Alex Elder 已提交
52 53 54 55 56 57 58 59 60
/*
 * The basic unit of block I/O is a sector.  It is interpreted in a
 * number of contexts in Linux (blk, bio, genhd), but the default is
 * universally 512 bytes.  These symbols are just slightly more
 * meaningful than the bare numbers they represent.
 */
#define	SECTOR_SHIFT	9
#define	SECTOR_SIZE	(1ULL << SECTOR_SHIFT)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Increment the given counter and return its updated value.
 * If the counter is already 0 it will not be incremented.
 * If the counter is already at its maximum value returns
 * -EINVAL without updating it.
 */
static int atomic_inc_return_safe(atomic_t *v)
{
	unsigned int counter;

	counter = (unsigned int)__atomic_add_unless(v, 1, 0);
	if (counter <= (unsigned int)INT_MAX)
		return (int)counter;

	atomic_dec(v);

	return -EINVAL;
}

/* Decrement the counter.  Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
	int counter;

	counter = atomic_dec_return(v);
	if (counter >= 0)
		return counter;

	atomic_inc(v);

	return -EINVAL;
}

A
Alex Elder 已提交
94
#define RBD_DRV_NAME "rbd"
95

96 97
#define RBD_MINORS_PER_MAJOR		256
#define RBD_SINGLE_MAJOR_PART_SHIFT	4
98

99 100
#define RBD_MAX_PARENT_CHAIN_LEN	16

101 102 103 104
#define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
#define RBD_MAX_SNAP_NAME_LEN	\
			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))

105
#define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
106 107 108

#define RBD_SNAP_HEAD_NAME	"-"

109 110
#define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */

111 112
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
113
#define RBD_IMAGE_ID_LEN_MAX	64
114

115
#define RBD_OBJ_PREFIX_LEN_MAX	64
A
Alex Elder 已提交
116

A
Alex Elder 已提交
117 118
/* Feature bits */

A
Alex Elder 已提交
119 120 121 122
#define RBD_FEATURE_LAYERING	(1<<0)
#define RBD_FEATURE_STRIPINGV2	(1<<1)
#define RBD_FEATURES_ALL \
	    (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
A
Alex Elder 已提交
123 124 125

/* Features supported by this (client software) implementation. */

126
#define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
A
Alex Elder 已提交
127

A
Alex Elder 已提交
128 129 130 131 132 133
/*
 * An RBD device name will be "rbd#", where the "rbd" comes from
 * RBD_DRV_NAME above, and # is a unique integer identifier.
 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
 * enough to hold all possible device names.
 */
134
#define DEV_NAME_LEN		32
A
Alex Elder 已提交
135
#define MAX_INT_FORMAT_WIDTH	((5 * sizeof (int)) / 2 + 1)
136 137 138 139 140

/*
 * block device image metadata (in-memory version)
 */
struct rbd_image_header {
141
	/* These six fields never change for a given rbd image */
142
	char *object_prefix;
143 144 145
	__u8 obj_order;
	__u8 crypt_type;
	__u8 comp_type;
146 147 148
	u64 stripe_unit;
	u64 stripe_count;
	u64 features;		/* Might be changeable someday? */
149

A
Alex Elder 已提交
150 151 152
	/* The remaining fields need to be updated occasionally */
	u64 image_size;
	struct ceph_snap_context *snapc;
153 154
	char *snap_names;	/* format 1 only */
	u64 *snap_sizes;	/* format 1 only */
155 156
};

157 158 159 160
/*
 * An rbd image specification.
 *
 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
A
Alex Elder 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
 * identify an image.  Each rbd_dev structure includes a pointer to
 * an rbd_spec structure that encapsulates this identity.
 *
 * Each of the id's in an rbd_spec has an associated name.  For a
 * user-mapped image, the names are supplied and the id's associated
 * with them are looked up.  For a layered image, a parent image is
 * defined by the tuple, and the names are looked up.
 *
 * An rbd_dev structure contains a parent_spec pointer which is
 * non-null if the image it represents is a child in a layered
 * image.  This pointer will refer to the rbd_spec structure used
 * by the parent rbd_dev for its own identity (i.e., the structure
 * is shared between the parent and child).
 *
 * Since these structures are populated once, during the discovery
 * phase of image construction, they are effectively immutable so
 * we make no effort to synchronize access to them.
 *
 * Note that code herein does not assume the image name is known (it
 * could be a null pointer).
181 182 183
 */
struct rbd_spec {
	u64		pool_id;
184
	const char	*pool_name;
185

186 187
	const char	*image_id;
	const char	*image_name;
188 189

	u64		snap_id;
190
	const char	*snap_name;
191 192 193 194

	struct kref	kref;
};

195
/*
A
Alex Elder 已提交
196
 * an instance of the client.  multiple devices may share an rbd client.
197 198 199 200 201 202 203
 */
struct rbd_client {
	struct ceph_client	*client;
	struct kref		kref;
	struct list_head	node;
};

A
Alex Elder 已提交
204 205 206 207 208 209 210 211
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);

#define	BAD_WHICH	U32_MAX		/* Good which or bad which, which? */

struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);

212 213 214
enum obj_request_type {
	OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
A
Alex Elder 已提交
215

G
Guangliang Zhao 已提交
216 217 218
enum obj_operation_type {
	OBJ_OP_WRITE,
	OBJ_OP_READ,
219
	OBJ_OP_DISCARD,
G
Guangliang Zhao 已提交
220 221
};

222 223
enum obj_req_flags {
	OBJ_REQ_DONE,		/* completion flag: not done = 0, done = 1 */
224
	OBJ_REQ_IMG_DATA,	/* object usage: standalone = 0, image = 1 */
225 226
	OBJ_REQ_KNOWN,		/* EXISTS flag valid: no = 0, yes = 1 */
	OBJ_REQ_EXISTS,		/* target exists: no = 0, yes = 1 */
227 228
};

A
Alex Elder 已提交
229 230 231 232
struct rbd_obj_request {
	const char		*object_name;
	u64			offset;		/* object start byte */
	u64			length;		/* bytes from offset */
233
	unsigned long		flags;
A
Alex Elder 已提交
234

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	/*
	 * An object request associated with an image will have its
	 * img_data flag set; a standalone object request will not.
	 *
	 * A standalone object request will have which == BAD_WHICH
	 * and a null obj_request pointer.
	 *
	 * An object request initiated in support of a layered image
	 * object (to check for its existence before a write) will
	 * have which == BAD_WHICH and a non-null obj_request pointer.
	 *
	 * Finally, an object request for rbd image data will have
	 * which != BAD_WHICH, and will have a non-null img_request
	 * pointer.  The value of which will be in the range
	 * 0..(img_request->obj_request_count-1).
	 */
	union {
		struct rbd_obj_request	*obj_request;	/* STAT op */
		struct {
			struct rbd_img_request	*img_request;
			u64			img_offset;
			/* links for img_request->obj_requests list */
			struct list_head	links;
		};
	};
A
Alex Elder 已提交
260 261 262
	u32			which;		/* posn image request list */

	enum obj_request_type	type;
263 264 265 266 267 268 269
	union {
		struct bio	*bio_list;
		struct {
			struct page	**pages;
			u32		page_count;
		};
	};
270
	struct page		**copyup_pages;
271
	u32			copyup_page_count;
A
Alex Elder 已提交
272 273 274 275

	struct ceph_osd_request	*osd_req;

	u64			xferred;	/* bytes transferred */
276
	int			result;
A
Alex Elder 已提交
277 278

	rbd_obj_callback_t	callback;
279
	struct completion	completion;
A
Alex Elder 已提交
280 281 282 283

	struct kref		kref;
};

A
Alex Elder 已提交
284
enum img_req_flags {
285 286
	IMG_REQ_WRITE,		/* I/O direction: read = 0, write = 1 */
	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
287
	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
288
	IMG_REQ_DISCARD,	/* discard: normal = 0, discard request = 1 */
A
Alex Elder 已提交
289 290
};

A
Alex Elder 已提交
291 292 293 294
struct rbd_img_request {
	struct rbd_device	*rbd_dev;
	u64			offset;	/* starting image byte offset */
	u64			length;	/* byte count from offset */
A
Alex Elder 已提交
295
	unsigned long		flags;
A
Alex Elder 已提交
296
	union {
297
		u64			snap_id;	/* for reads */
A
Alex Elder 已提交
298
		struct ceph_snap_context *snapc;	/* for writes */
299 300 301 302
	};
	union {
		struct request		*rq;		/* block request */
		struct rbd_obj_request	*obj_request;	/* obj req initiator */
A
Alex Elder 已提交
303
	};
304
	struct page		**copyup_pages;
305
	u32			copyup_page_count;
A
Alex Elder 已提交
306 307 308
	spinlock_t		completion_lock;/* protects next_completion */
	u32			next_completion;
	rbd_img_callback_t	callback;
309
	u64			xferred;/* aggregate bytes transferred */
310
	int			result;	/* first nonzero obj_request result */
A
Alex Elder 已提交
311 312 313 314 315 316 317 318

	u32			obj_request_count;
	struct list_head	obj_requests;	/* rbd_obj_request structs */

	struct kref		kref;
};

#define for_each_obj_request(ireq, oreq) \
319
	list_for_each_entry(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
320
#define for_each_obj_request_from(ireq, oreq) \
321
	list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
322
#define for_each_obj_request_safe(ireq, oreq, n) \
323
	list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
A
Alex Elder 已提交
324

A
Alex Elder 已提交
325
struct rbd_mapping {
A
Alex Elder 已提交
326
	u64                     size;
A
Alex Elder 已提交
327
	u64                     features;
A
Alex Elder 已提交
328 329 330
	bool			read_only;
};

331 332 333 334
/*
 * a single device
 */
struct rbd_device {
A
Alex Elder 已提交
335
	int			dev_id;		/* blkdev unique id */
336 337

	int			major;		/* blkdev assigned major */
338
	int			minor;
339 340
	struct gendisk		*disk;		/* blkdev's gendisk and rq */

341
	u32			image_format;	/* Either 1 or 2 */
342 343 344 345
	struct rbd_client	*rbd_client;

	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */

346
	spinlock_t		lock;		/* queue, flags, open_count */
347 348

	struct rbd_image_header	header;
349
	unsigned long		flags;		/* possibly lock protected */
350
	struct rbd_spec		*spec;
351
	struct rbd_options	*opts;
352

353
	char			*header_name;
354

355 356
	struct ceph_file_layout	layout;

357
	struct ceph_osd_event   *watch_event;
358
	struct rbd_obj_request	*watch_request;
359

360 361
	struct rbd_spec		*parent_spec;
	u64			parent_overlap;
362
	atomic_t		parent_ref;
363
	struct rbd_device	*parent;
364

C
Christoph Hellwig 已提交
365 366 367
	/* Block layer tags. */
	struct blk_mq_tag_set	tag_set;

368 369
	/* protects updating the header */
	struct rw_semaphore     header_rwsem;
A
Alex Elder 已提交
370 371

	struct rbd_mapping	mapping;
372 373

	struct list_head	node;
374 375 376

	/* sysfs related */
	struct device		dev;
377
	unsigned long		open_count;	/* protected by lock */
378 379
};

380 381 382 383 384 385 386
/*
 * Flag bits for rbd_dev->flags.  If atomicity is required,
 * rbd_dev->lock is used to protect access.
 *
 * Currently, only the "removing" flag (which is coupled with the
 * "open_count" field) requires atomic access.
 */
387 388
enum rbd_dev_flags {
	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
389
	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
390 391
};

392
static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
393

394
static LIST_HEAD(rbd_dev_list);    /* devices */
395 396
static DEFINE_SPINLOCK(rbd_dev_list_lock);

A
Alex Elder 已提交
397 398
static LIST_HEAD(rbd_client_list);		/* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
399

400 401
/* Slab caches for frequently-allocated structures */

402
static struct kmem_cache	*rbd_img_request_cache;
403
static struct kmem_cache	*rbd_obj_request_cache;
404
static struct kmem_cache	*rbd_segment_name_cache;
405

406
static int rbd_major;
407 408
static DEFINE_IDA(rbd_dev_id_ida);

409 410
static struct workqueue_struct *rbd_wq;

411 412 413 414 415 416 417 418
/*
 * Default to false for now, as single-major requires >= 0.75 version of
 * userspace rbd utility.
 */
static bool single_major = false;
module_param(single_major, bool, S_IRUGO);
MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");

419 420
static int rbd_img_request_submit(struct rbd_img_request *img_request);

A
Alex Elder 已提交
421 422 423 424
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
		       size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
			  size_t count);
425 426 427 428
static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
				    size_t count);
static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
				       size_t count);
429
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
430
static void rbd_spec_put(struct rbd_spec *spec);
A
Alex Elder 已提交
431

432 433
static int rbd_dev_id_to_minor(int dev_id)
{
434
	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
435 436 437 438
}

static int minor_to_rbd_dev_id(int minor)
{
439
	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
440 441
}

442 443
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
444 445
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
446 447 448 449

static struct attribute *rbd_bus_attrs[] = {
	&bus_attr_add.attr,
	&bus_attr_remove.attr,
450 451
	&bus_attr_add_single_major.attr,
	&bus_attr_remove_single_major.attr,
452
	NULL,
A
Alex Elder 已提交
453
};
454 455 456 457

static umode_t rbd_bus_is_visible(struct kobject *kobj,
				  struct attribute *attr, int index)
{
458 459 460 461 462
	if (!single_major &&
	    (attr == &bus_attr_add_single_major.attr ||
	     attr == &bus_attr_remove_single_major.attr))
		return 0;

463 464 465 466 467 468 469 470
	return attr->mode;
}

static const struct attribute_group rbd_bus_group = {
	.attrs = rbd_bus_attrs,
	.is_visible = rbd_bus_is_visible,
};
__ATTRIBUTE_GROUPS(rbd_bus);
A
Alex Elder 已提交
471 472 473

static struct bus_type rbd_bus_type = {
	.name		= "rbd",
474
	.bus_groups	= rbd_bus_groups,
A
Alex Elder 已提交
475 476 477 478 479 480 481 482 483 484 485
};

static void rbd_root_dev_release(struct device *dev)
{
}

static struct device rbd_root_dev = {
	.init_name =    "rbd",
	.release =      rbd_root_dev_release,
};

A
Alex Elder 已提交
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;

	if (!rbd_dev)
		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
	else if (rbd_dev->disk)
		printk(KERN_WARNING "%s: %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_name)
		printk(KERN_WARNING "%s: image %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
	else if (rbd_dev->spec && rbd_dev->spec->image_id)
		printk(KERN_WARNING "%s: id %s: %pV\n",
			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
	else	/* punt */
		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
			RBD_DRV_NAME, rbd_dev, &vaf);
	va_end(args);
}

A
Alex Elder 已提交
513 514 515 516 517 518 519 520 521 522 523 524
#ifdef RBD_DEBUG
#define rbd_assert(expr)						\
		if (unlikely(!(expr))) {				\
			printk(KERN_ERR "\nAssertion failure in %s() "	\
						"at line %d:\n\n"	\
					"\trbd_assert(%s);\n\n",	\
					__func__, __LINE__, #expr);	\
			BUG();						\
		}
#else /* !RBD_DEBUG */
#  define rbd_assert(expr)	((void) 0)
#endif /* !RBD_DEBUG */
525

I
Ilya Dryomov 已提交
526
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
527
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
528 529
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
A
Alex Elder 已提交
530

A
Alex Elder 已提交
531
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
532
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
533
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
534
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
535 536
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id);
537 538 539 540
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features);
541

542 543
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
A
Alex Elder 已提交
544
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
545
	bool removing = false;
546

A
Alex Elder 已提交
547
	if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
548 549
		return -EROFS;

550
	spin_lock_irq(&rbd_dev->lock);
551 552 553 554
	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
		removing = true;
	else
		rbd_dev->open_count++;
555
	spin_unlock_irq(&rbd_dev->lock);
556 557 558
	if (removing)
		return -ENOENT;

A
Alex Elder 已提交
559
	(void) get_device(&rbd_dev->dev);
560

561 562 563
	return 0;
}

564
static void rbd_release(struct gendisk *disk, fmode_t mode)
565 566
{
	struct rbd_device *rbd_dev = disk->private_data;
567 568
	unsigned long open_count_before;

569
	spin_lock_irq(&rbd_dev->lock);
570
	open_count_before = rbd_dev->open_count--;
571
	spin_unlock_irq(&rbd_dev->lock);
572
	rbd_assert(open_count_before > 0);
573

A
Alex Elder 已提交
574
	put_device(&rbd_dev->dev);
575 576
}

G
Guangliang Zhao 已提交
577 578
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
579
	int ret = 0;
G
Guangliang Zhao 已提交
580 581
	int val;
	bool ro;
582
	bool ro_changed = false;
G
Guangliang Zhao 已提交
583

584
	/* get_user() may sleep, so call it before taking rbd_dev->lock */
G
Guangliang Zhao 已提交
585 586 587 588 589 590 591 592
	if (get_user(val, (int __user *)(arg)))
		return -EFAULT;

	ro = val ? true : false;
	/* Snapshot doesn't allow to write*/
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
		return -EROFS;

593 594 595 596 597 598 599
	spin_lock_irq(&rbd_dev->lock);
	/* prevent others open this device */
	if (rbd_dev->open_count > 1) {
		ret = -EBUSY;
		goto out;
	}

G
Guangliang Zhao 已提交
600 601
	if (rbd_dev->mapping.read_only != ro) {
		rbd_dev->mapping.read_only = ro;
602
		ro_changed = true;
G
Guangliang Zhao 已提交
603 604
	}

605 606 607 608 609 610 611
out:
	spin_unlock_irq(&rbd_dev->lock);
	/* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
	if (ret == 0 && ro_changed)
		set_disk_ro(rbd_dev->disk, ro ? 1 : 0);

	return ret;
G
Guangliang Zhao 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
}

static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
	int ret = 0;

	switch (cmd) {
	case BLKROSET:
		ret = rbd_ioctl_set_ro(rbd_dev, arg);
		break;
	default:
		ret = -ENOTTY;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
				unsigned int cmd, unsigned long arg)
{
	return rbd_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */

639 640 641
static const struct block_device_operations rbd_bd_ops = {
	.owner			= THIS_MODULE,
	.open			= rbd_open,
642
	.release		= rbd_release,
G
Guangliang Zhao 已提交
643 644 645 646
	.ioctl			= rbd_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl		= rbd_compat_ioctl,
#endif
647 648 649
};

/*
650
 * Initialize an rbd client instance.  Success or not, this function
651
 * consumes ceph_opts.  Caller holds client_mutex.
652
 */
653
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
654 655 656 657
{
	struct rbd_client *rbdc;
	int ret = -ENOMEM;

A
Alex Elder 已提交
658
	dout("%s:\n", __func__);
659 660 661 662 663 664 665
	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
	if (!rbdc)
		goto out_opt;

	kref_init(&rbdc->kref);
	INIT_LIST_HEAD(&rbdc->node);

A
Alex Elder 已提交
666
	rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
667
	if (IS_ERR(rbdc->client))
668
		goto out_rbdc;
A
Alex Elder 已提交
669
	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
670 671 672

	ret = ceph_open_session(rbdc->client);
	if (ret < 0)
673
		goto out_client;
674

A
Alex Elder 已提交
675
	spin_lock(&rbd_client_list_lock);
676
	list_add_tail(&rbdc->node, &rbd_client_list);
A
Alex Elder 已提交
677
	spin_unlock(&rbd_client_list_lock);
678

A
Alex Elder 已提交
679
	dout("%s: rbdc %p\n", __func__, rbdc);
680

681
	return rbdc;
682
out_client:
683
	ceph_destroy_client(rbdc->client);
684
out_rbdc:
685 686
	kfree(rbdc);
out_opt:
A
Alex Elder 已提交
687 688
	if (ceph_opts)
		ceph_destroy_options(ceph_opts);
A
Alex Elder 已提交
689 690
	dout("%s: error %d\n", __func__, ret);

V
Vasiliy Kulikov 已提交
691
	return ERR_PTR(ret);
692 693
}

694 695 696 697 698 699 700
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
	kref_get(&rbdc->kref);

	return rbdc;
}

701
/*
702 703
 * Find a ceph client with specific addr and configuration.  If
 * found, bump its reference count.
704
 */
705
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
706 707
{
	struct rbd_client *client_node;
708
	bool found = false;
709

A
Alex Elder 已提交
710
	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
711 712
		return NULL;

713 714 715
	spin_lock(&rbd_client_list_lock);
	list_for_each_entry(client_node, &rbd_client_list, node) {
		if (!ceph_compare_options(ceph_opts, client_node->client)) {
716 717
			__rbd_get_client(client_node);

718 719 720 721 722 723 724
			found = true;
			break;
		}
	}
	spin_unlock(&rbd_client_list_lock);

	return found ? client_node : NULL;
725 726
}

727
/*
728
 * (Per device) rbd map options
729 730
 */
enum {
I
Ilya Dryomov 已提交
731
	Opt_queue_depth,
732 733 734 735
	Opt_last_int,
	/* int args above */
	Opt_last_string,
	/* string args above */
A
Alex Elder 已提交
736 737
	Opt_read_only,
	Opt_read_write,
738
	Opt_err
739 740
};

A
Alex Elder 已提交
741
static match_table_t rbd_opts_tokens = {
I
Ilya Dryomov 已提交
742
	{Opt_queue_depth, "queue_depth=%d"},
743 744
	/* int args above */
	/* string args above */
A
Alex Elder 已提交
745
	{Opt_read_only, "read_only"},
A
Alex Elder 已提交
746 747 748
	{Opt_read_only, "ro"},		/* Alternate spelling */
	{Opt_read_write, "read_write"},
	{Opt_read_write, "rw"},		/* Alternate spelling */
749
	{Opt_err, NULL}
750 751
};

A
Alex Elder 已提交
752
struct rbd_options {
I
Ilya Dryomov 已提交
753
	int	queue_depth;
A
Alex Elder 已提交
754 755 756
	bool	read_only;
};

I
Ilya Dryomov 已提交
757
#define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
A
Alex Elder 已提交
758 759
#define RBD_READ_ONLY_DEFAULT	false

760 761
static int parse_rbd_opts_token(char *c, void *private)
{
A
Alex Elder 已提交
762
	struct rbd_options *rbd_opts = private;
763 764 765
	substring_t argstr[MAX_OPT_ARGS];
	int token, intval, ret;

A
Alex Elder 已提交
766
	token = match_token(c, rbd_opts_tokens, argstr);
767 768 769
	if (token < Opt_last_int) {
		ret = match_int(&argstr[0], &intval);
		if (ret < 0) {
770
			pr_err("bad mount option arg (not int) at '%s'\n", c);
771 772 773 774
			return ret;
		}
		dout("got int token %d val %d\n", token, intval);
	} else if (token > Opt_last_int && token < Opt_last_string) {
775
		dout("got string token %d val %s\n", token, argstr[0].from);
776 777 778 779 780
	} else {
		dout("got token %d\n", token);
	}

	switch (token) {
I
Ilya Dryomov 已提交
781 782 783 784 785 786 787
	case Opt_queue_depth:
		if (intval < 1) {
			pr_err("queue_depth out of range\n");
			return -EINVAL;
		}
		rbd_opts->queue_depth = intval;
		break;
A
Alex Elder 已提交
788 789 790 791 792 793
	case Opt_read_only:
		rbd_opts->read_only = true;
		break;
	case Opt_read_write:
		rbd_opts->read_only = false;
		break;
794
	default:
795 796
		/* libceph prints "bad option" msg */
		return -EINVAL;
797
	}
798

799 800 801
	return 0;
}

G
Guangliang Zhao 已提交
802 803 804 805 806 807 808
static char* obj_op_name(enum obj_operation_type op_type)
{
	switch (op_type) {
	case OBJ_OP_READ:
		return "read";
	case OBJ_OP_WRITE:
		return "write";
809 810
	case OBJ_OP_DISCARD:
		return "discard";
G
Guangliang Zhao 已提交
811 812 813 814 815
	default:
		return "???";
	}
}

816 817
/*
 * Get a ceph client with specific addr and configuration, if one does
818 819
 * not exist create it.  Either way, ceph_opts is consumed by this
 * function.
820
 */
821
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
822
{
823
	struct rbd_client *rbdc;
824

825
	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
826
	rbdc = rbd_client_find(ceph_opts);
827
	if (rbdc)	/* using an existing client */
A
Alex Elder 已提交
828
		ceph_destroy_options(ceph_opts);
829
	else
830
		rbdc = rbd_client_create(ceph_opts);
831
	mutex_unlock(&client_mutex);
832

833
	return rbdc;
834 835 836 837
}

/*
 * Destroy ceph client
A
Alex Elder 已提交
838
 *
A
Alex Elder 已提交
839
 * Caller must hold rbd_client_list_lock.
840 841 842 843 844
 */
static void rbd_client_release(struct kref *kref)
{
	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);

A
Alex Elder 已提交
845
	dout("%s: rbdc %p\n", __func__, rbdc);
846
	spin_lock(&rbd_client_list_lock);
847
	list_del(&rbdc->node);
848
	spin_unlock(&rbd_client_list_lock);
849 850 851 852 853 854 855 856 857

	ceph_destroy_client(rbdc->client);
	kfree(rbdc);
}

/*
 * Drop reference to ceph client node. If it's not referenced anymore, release
 * it.
 */
858
static void rbd_put_client(struct rbd_client *rbdc)
859
{
860 861
	if (rbdc)
		kref_put(&rbdc->kref, rbd_client_release);
862 863
}

864 865 866 867 868
static bool rbd_image_format_valid(u32 image_format)
{
	return image_format == 1 || image_format == 2;
}

869 870
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
871 872 873 874 875 876 877
	size_t size;
	u32 snap_count;

	/* The header has to start with the magic rbd header text */
	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
		return false;

A
Alex Elder 已提交
878 879 880 881 882 883 884 885 886 887
	/* The bio layer requires at least sector-sized I/O */

	if (ondisk->options.order < SECTOR_SHIFT)
		return false;

	/* If we use u64 in a few spots we may be able to loosen this */

	if (ondisk->options.order > 8 * sizeof (int) - 1)
		return false;

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
	/*
	 * The size of a snapshot header has to fit in a size_t, and
	 * that limits the number of snapshots.
	 */
	snap_count = le32_to_cpu(ondisk->snap_count);
	size = SIZE_MAX - sizeof (struct ceph_snap_context);
	if (snap_count > size / sizeof (__le64))
		return false;

	/*
	 * Not only that, but the size of the entire the snapshot
	 * header must also be representable in a size_t.
	 */
	size -= snap_count * sizeof (__le64);
	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
		return false;

	return true;
906 907
}

908
/*
909 910
 * Fill an rbd image header with information from the given format 1
 * on-disk header.
911
 */
A
Alex Elder 已提交
912
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
913
				 struct rbd_image_header_ondisk *ondisk)
914
{
A
Alex Elder 已提交
915
	struct rbd_image_header *header = &rbd_dev->header;
916 917 918 919 920
	bool first_time = header->object_prefix == NULL;
	struct ceph_snap_context *snapc;
	char *object_prefix = NULL;
	char *snap_names = NULL;
	u64 *snap_sizes = NULL;
921
	u32 snap_count;
922
	size_t size;
923
	int ret = -ENOMEM;
924
	u32 i;
925

926
	/* Allocate this now to avoid having to handle failure below */
A
Alex Elder 已提交
927

928 929
	if (first_time) {
		size_t len;
930

931 932 933 934 935 936 937 938
		len = strnlen(ondisk->object_prefix,
				sizeof (ondisk->object_prefix));
		object_prefix = kmalloc(len + 1, GFP_KERNEL);
		if (!object_prefix)
			return -ENOMEM;
		memcpy(object_prefix, ondisk->object_prefix, len);
		object_prefix[len] = '\0';
	}
A
Alex Elder 已提交
939

940
	/* Allocate the snapshot context and fill it in */
A
Alex Elder 已提交
941

942 943 944 945 946
	snap_count = le32_to_cpu(ondisk->snap_count);
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
	if (!snapc)
		goto out_err;
	snapc->seq = le64_to_cpu(ondisk->snap_seq);
947
	if (snap_count) {
948
		struct rbd_image_snap_ondisk *snaps;
A
Alex Elder 已提交
949 950
		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);

951
		/* We'll keep a copy of the snapshot names... */
952

953 954 955 956
		if (snap_names_len > (u64)SIZE_MAX)
			goto out_2big;
		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
		if (!snap_names)
A
Alex Elder 已提交
957 958
			goto out_err;

959
		/* ...as well as the array of their sizes. */
960

961
		size = snap_count * sizeof (*header->snap_sizes);
962 963
		snap_sizes = kmalloc(size, GFP_KERNEL);
		if (!snap_sizes)
A
Alex Elder 已提交
964
			goto out_err;
965

A
Alex Elder 已提交
966
		/*
967 968 969
		 * Copy the names, and fill in each snapshot's id
		 * and size.
		 *
970
		 * Note that rbd_dev_v1_header_info() guarantees the
971
		 * ondisk buffer we're working with has
A
Alex Elder 已提交
972 973 974
		 * snap_names_len bytes beyond the end of the
		 * snapshot id array, this memcpy() is safe.
		 */
975 976 977 978 979 980
		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
		snaps = ondisk->snaps;
		for (i = 0; i < snap_count; i++) {
			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
		}
981
	}
A
Alex Elder 已提交
982

983
	/* We won't fail any more, fill in the header */
984

985 986 987 988 989 990 991 992 993
	if (first_time) {
		header->object_prefix = object_prefix;
		header->obj_order = ondisk->options.order;
		header->crypt_type = ondisk->options.crypt_type;
		header->comp_type = ondisk->options.comp_type;
		/* The rest aren't used for format 1 images */
		header->stripe_unit = 0;
		header->stripe_count = 0;
		header->features = 0;
994
	} else {
A
Alex Elder 已提交
995 996 997
		ceph_put_snap_context(header->snapc);
		kfree(header->snap_names);
		kfree(header->snap_sizes);
998
	}
999

1000
	/* The remaining fields always get updated (when we refresh) */
1001

A
Alex Elder 已提交
1002
	header->image_size = le64_to_cpu(ondisk->image_size);
1003 1004 1005
	header->snapc = snapc;
	header->snap_names = snap_names;
	header->snap_sizes = snap_sizes;
1006

1007
	return 0;
1008 1009
out_2big:
	ret = -EIO;
A
Alex Elder 已提交
1010
out_err:
1011 1012 1013 1014
	kfree(snap_sizes);
	kfree(snap_names);
	ceph_put_snap_context(snapc);
	kfree(object_prefix);
1015

1016
	return ret;
1017 1018
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
	const char *snap_name;

	rbd_assert(which < rbd_dev->header.snapc->num_snaps);

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which--)
		snap_name += strlen(snap_name) + 1;

	return kstrdup(snap_name, GFP_KERNEL);
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
/*
 * Snapshot id comparison function for use with qsort()/bsearch().
 * Note that result is for snapshots in *descending* order.
 */
static int snapid_compare_reverse(const void *s1, const void *s2)
{
	u64 snap_id1 = *(u64 *)s1;
	u64 snap_id2 = *(u64 *)s2;

	if (snap_id1 < snap_id2)
		return 1;
	return snap_id1 == snap_id2 ? 0 : -1;
}

/*
 * Search a snapshot context to see if the given snapshot id is
 * present.
 *
 * Returns the position of the snapshot id in the array if it's found,
 * or BAD_SNAP_INDEX otherwise.
 *
 * Note: The snapshot array is in kept sorted (by the osd) in
 * reverse order, highest snapshot id first.
 */
1058 1059 1060
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1061
	u64 *found;
1062

1063 1064
	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
				sizeof (snap_id), snapid_compare_reverse);
1065

1066
	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1067 1068
}

1069 1070
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
1071
{
1072
	u32 which;
1073
	const char *snap_name;
1074

1075 1076
	which = rbd_dev_snap_index(rbd_dev, snap_id);
	if (which == BAD_SNAP_INDEX)
1077
		return ERR_PTR(-ENOENT);
1078

1079 1080
	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1081 1082 1083 1084
}

static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
1085 1086 1087
	if (snap_id == CEPH_NOSNAP)
		return RBD_SNAP_HEAD_NAME;

1088 1089 1090
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1091

1092
	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1093 1094
}

1095 1096
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u64 *snap_size)
1097
{
1098 1099 1100 1101 1102
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_size = rbd_dev->header.image_size;
	} else if (rbd_dev->image_format == 1) {
		u32 which;
1103

1104 1105 1106
		which = rbd_dev_snap_index(rbd_dev, snap_id);
		if (which == BAD_SNAP_INDEX)
			return -ENOENT;
1107

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
		*snap_size = rbd_dev->header.snap_sizes[which];
	} else {
		u64 size = 0;
		int ret;

		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
		if (ret)
			return ret;

		*snap_size = size;
	}
	return 0;
1120 1121
}

1122 1123
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
			u64 *snap_features)
1124
{
1125 1126 1127 1128 1129
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
	if (snap_id == CEPH_NOSNAP) {
		*snap_features = rbd_dev->header.features;
	} else if (rbd_dev->image_format == 1) {
		*snap_features = 0;	/* No features for format 1 */
1130
	} else {
1131 1132
		u64 features = 0;
		int ret;
1133

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
		if (ret)
			return ret;

		*snap_features = features;
	}
	return 0;
}

static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
1145
	u64 snap_id = rbd_dev->spec->snap_id;
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	u64 size = 0;
	u64 features = 0;
	int ret;

	ret = rbd_snap_size(rbd_dev, snap_id, &size);
	if (ret)
		return ret;
	ret = rbd_snap_features(rbd_dev, snap_id, &features);
	if (ret)
		return ret;

	rbd_dev->mapping.size = size;
	rbd_dev->mapping.features = features;

1160
	return 0;
1161 1162
}

A
Alex Elder 已提交
1163 1164 1165 1166
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
	rbd_dev->mapping.size = 0;
	rbd_dev->mapping.features = 0;
1167 1168
}

1169 1170 1171 1172 1173 1174 1175
static void rbd_segment_name_free(const char *name)
{
	/* The explicit cast here is needed to drop the const qualifier */

	kmem_cache_free(rbd_segment_name_cache, (void *)name);
}

A
Alex Elder 已提交
1176
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1177
{
A
Alex Elder 已提交
1178 1179 1180
	char *name;
	u64 segment;
	int ret;
1181
	char *name_format;
1182

1183
	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
A
Alex Elder 已提交
1184 1185 1186
	if (!name)
		return NULL;
	segment = offset >> rbd_dev->header.obj_order;
1187 1188 1189
	name_format = "%s.%012llx";
	if (rbd_dev->image_format == 2)
		name_format = "%s.%016llx";
1190
	ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
A
Alex Elder 已提交
1191
			rbd_dev->header.object_prefix, segment);
1192
	if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
A
Alex Elder 已提交
1193 1194
		pr_err("error formatting segment name for #%llu (%d)\n",
			segment, ret);
1195
		rbd_segment_name_free(name);
A
Alex Elder 已提交
1196 1197
		name = NULL;
	}
1198

A
Alex Elder 已提交
1199 1200
	return name;
}
1201

A
Alex Elder 已提交
1202 1203 1204
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1205

A
Alex Elder 已提交
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	return offset & (segment_size - 1);
}

static u64 rbd_segment_length(struct rbd_device *rbd_dev,
				u64 offset, u64 length)
{
	u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;

	offset &= segment_size - 1;

A
Alex Elder 已提交
1216
	rbd_assert(length <= U64_MAX - offset);
A
Alex Elder 已提交
1217 1218 1219 1220
	if (offset + length > segment_size)
		length = segment_size - offset;

	return length;
1221 1222
}

1223 1224 1225 1226 1227 1228 1229 1230
/*
 * returns the size of an object in the image
 */
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
	return 1 << header->obj_order;
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
/*
 * bio helpers
 */

static void bio_chain_put(struct bio *chain)
{
	struct bio *tmp;

	while (chain) {
		tmp = chain;
		chain = chain->bi_next;
		bio_put(tmp);
	}
}

/*
 * zeros a bio chain, starting at specific offset
 */
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
1251 1252
	struct bio_vec bv;
	struct bvec_iter iter;
1253 1254 1255 1256 1257
	unsigned long flags;
	void *buf;
	int pos = 0;

	while (chain) {
1258 1259
		bio_for_each_segment(bv, chain, iter) {
			if (pos + bv.bv_len > start_ofs) {
1260
				int remainder = max(start_ofs - pos, 0);
1261
				buf = bvec_kmap_irq(&bv, &flags);
1262
				memset(buf + remainder, 0,
1263 1264
				       bv.bv_len - remainder);
				flush_dcache_page(bv.bv_page);
1265
				bvec_kunmap_irq(buf, &flags);
1266
			}
1267
			pos += bv.bv_len;
1268 1269 1270 1271 1272 1273
		}

		chain = chain->bi_next;
	}
}

A
Alex Elder 已提交
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
/*
 * similar to zero_bio_chain(), zeros data defined by a page array,
 * starting at the given byte offset from the start of the array and
 * continuing up to the given end offset.  The pages array is
 * assumed to be big enough to hold all bytes up to the end.
 */
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
	struct page **page = &pages[offset >> PAGE_SHIFT];

	rbd_assert(end > offset);
	rbd_assert(end - offset <= (u64)SIZE_MAX);
	while (offset < end) {
		size_t page_offset;
		size_t length;
		unsigned long flags;
		void *kaddr;

1292 1293
		page_offset = offset & ~PAGE_MASK;
		length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
A
Alex Elder 已提交
1294 1295 1296
		local_irq_save(flags);
		kaddr = kmap_atomic(*page);
		memset(kaddr + page_offset, 0, length);
1297
		flush_dcache_page(*page);
A
Alex Elder 已提交
1298 1299 1300 1301 1302 1303 1304 1305
		kunmap_atomic(kaddr);
		local_irq_restore(flags);

		offset += length;
		page++;
	}
}

1306
/*
A
Alex Elder 已提交
1307 1308
 * Clone a portion of a bio, starting at the given byte offset
 * and continuing for the number of bytes indicated.
1309
 */
A
Alex Elder 已提交
1310 1311 1312 1313
static struct bio *bio_clone_range(struct bio *bio_src,
					unsigned int offset,
					unsigned int len,
					gfp_t gfpmask)
1314
{
A
Alex Elder 已提交
1315 1316
	struct bio *bio;

K
Kent Overstreet 已提交
1317
	bio = bio_clone(bio_src, gfpmask);
A
Alex Elder 已提交
1318 1319
	if (!bio)
		return NULL;	/* ENOMEM */
1320

K
Kent Overstreet 已提交
1321
	bio_advance(bio, offset);
1322
	bio->bi_iter.bi_size = len;
A
Alex Elder 已提交
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352

	return bio;
}

/*
 * Clone a portion of a bio chain, starting at the given byte offset
 * into the first bio in the source chain and continuing for the
 * number of bytes indicated.  The result is another bio chain of
 * exactly the given length, or a null pointer on error.
 *
 * The bio_src and offset parameters are both in-out.  On entry they
 * refer to the first source bio and the offset into that bio where
 * the start of data to be cloned is located.
 *
 * On return, bio_src is updated to refer to the bio in the source
 * chain that contains first un-cloned byte, and *offset will
 * contain the offset of that byte within that bio.
 */
static struct bio *bio_chain_clone_range(struct bio **bio_src,
					unsigned int *offset,
					unsigned int len,
					gfp_t gfpmask)
{
	struct bio *bi = *bio_src;
	unsigned int off = *offset;
	struct bio *chain = NULL;
	struct bio **end;

	/* Build up a chain of clone bios up to the limit */

1353
	if (!bi || off >= bi->bi_iter.bi_size || !len)
A
Alex Elder 已提交
1354
		return NULL;		/* Nothing to clone */
1355

A
Alex Elder 已提交
1356 1357 1358 1359 1360
	end = &chain;
	while (len) {
		unsigned int bi_size;
		struct bio *bio;

1361 1362
		if (!bi) {
			rbd_warn(NULL, "bio_chain exhausted with %u left", len);
A
Alex Elder 已提交
1363
			goto out_err;	/* EINVAL; ran out of bio's */
1364
		}
1365
		bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
A
Alex Elder 已提交
1366 1367 1368 1369 1370 1371
		bio = bio_clone_range(bi, off, bi_size, gfpmask);
		if (!bio)
			goto out_err;	/* ENOMEM */

		*end = bio;
		end = &bio->bi_next;
1372

A
Alex Elder 已提交
1373
		off += bi_size;
1374
		if (off == bi->bi_iter.bi_size) {
A
Alex Elder 已提交
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
			bi = bi->bi_next;
			off = 0;
		}
		len -= bi_size;
	}
	*bio_src = bi;
	*offset = off;

	return chain;
out_err:
	bio_chain_put(chain);
1386 1387 1388 1389

	return NULL;
}

1390 1391 1392 1393 1394
/*
 * The default/initial value for all object request flags is 0.  For
 * each flag, once its value is set to 1 it is never reset to 0
 * again.
 */
A
Alex Elder 已提交
1395
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1396
{
A
Alex Elder 已提交
1397
	if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1398 1399
		struct rbd_device *rbd_dev;

A
Alex Elder 已提交
1400
		rbd_dev = obj_request->img_request->rbd_dev;
1401
		rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1402 1403 1404 1405
			obj_request);
	}
}

A
Alex Elder 已提交
1406
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1407 1408
{
	smp_mb();
A
Alex Elder 已提交
1409
	return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1410 1411
}

A
Alex Elder 已提交
1412
static void obj_request_done_set(struct rbd_obj_request *obj_request)
1413
{
A
Alex Elder 已提交
1414 1415
	if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
		struct rbd_device *rbd_dev = NULL;
1416

A
Alex Elder 已提交
1417 1418
		if (obj_request_img_data_test(obj_request))
			rbd_dev = obj_request->img_request->rbd_dev;
1419
		rbd_warn(rbd_dev, "obj_request %p already marked done",
1420 1421 1422 1423
			obj_request);
	}
}

A
Alex Elder 已提交
1424
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1425 1426
{
	smp_mb();
A
Alex Elder 已提交
1427
	return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1428 1429
}

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
/*
 * This sets the KNOWN flag after (possibly) setting the EXISTS
 * flag.  The latter is set based on the "exists" value provided.
 *
 * Note that for our purposes once an object exists it never goes
 * away again.  It's possible that the response from two existence
 * checks are separated by the creation of the target object, and
 * the first ("doesn't exist") response arrives *after* the second
 * ("does exist").  In that case we ignore the second one.
 */
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
				bool exists)
{
	if (exists)
		set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
	set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
	smp_mb();
}

static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}

static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
	smp_mb();
	return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}

1461 1462 1463 1464 1465 1466 1467 1468
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
	struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;

	return obj_request->img_offset <
	    round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}

A
Alex Elder 已提交
1469 1470
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1471 1472
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1473 1474 1475 1476 1477 1478 1479
	kref_get(&obj_request->kref);
}

static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request != NULL);
A
Alex Elder 已提交
1480 1481
	dout("%s: obj %p (was %d)\n", __func__, obj_request,
		atomic_read(&obj_request->kref.refcount));
A
Alex Elder 已提交
1482 1483 1484
	kref_put(&obj_request->kref, rbd_obj_request_destroy);
}

1485 1486 1487 1488 1489 1490 1491
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
	dout("%s: img %p (was %d)\n", __func__, img_request,
	     atomic_read(&img_request->kref.refcount));
	kref_get(&img_request->kref);
}

1492 1493
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
A
Alex Elder 已提交
1494 1495 1496 1497
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
	rbd_assert(img_request != NULL);
A
Alex Elder 已提交
1498 1499
	dout("%s: img %p (was %d)\n", __func__, img_request,
		atomic_read(&img_request->kref.refcount));
1500 1501 1502 1503
	if (img_request_child_test(img_request))
		kref_put(&img_request->kref, rbd_parent_request_destroy);
	else
		kref_put(&img_request->kref, rbd_img_request_destroy);
A
Alex Elder 已提交
1504 1505 1506 1507 1508
}

static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
1509 1510
	rbd_assert(obj_request->img_request == NULL);

1511
	/* Image request now owns object's original reference */
A
Alex Elder 已提交
1512
	obj_request->img_request = img_request;
1513
	obj_request->which = img_request->obj_request_count;
1514 1515
	rbd_assert(!obj_request_img_data_test(obj_request));
	obj_request_img_data_set(obj_request);
A
Alex Elder 已提交
1516
	rbd_assert(obj_request->which != BAD_WHICH);
1517 1518
	img_request->obj_request_count++;
	list_add_tail(&obj_request->links, &img_request->obj_requests);
A
Alex Elder 已提交
1519 1520
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1521 1522 1523 1524 1525 1526
}

static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
					struct rbd_obj_request *obj_request)
{
	rbd_assert(obj_request->which != BAD_WHICH);
1527

A
Alex Elder 已提交
1528 1529
	dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
		obj_request->which);
A
Alex Elder 已提交
1530
	list_del(&obj_request->links);
1531 1532 1533 1534
	rbd_assert(img_request->obj_request_count > 0);
	img_request->obj_request_count--;
	rbd_assert(obj_request->which == img_request->obj_request_count);
	obj_request->which = BAD_WHICH;
1535
	rbd_assert(obj_request_img_data_test(obj_request));
A
Alex Elder 已提交
1536 1537
	rbd_assert(obj_request->img_request == img_request);
	obj_request->img_request = NULL;
1538
	obj_request->callback = NULL;
A
Alex Elder 已提交
1539 1540 1541 1542 1543 1544
	rbd_obj_request_put(obj_request);
}

static bool obj_request_type_valid(enum obj_request_type type)
{
	switch (type) {
1545
	case OBJ_REQUEST_NODATA:
A
Alex Elder 已提交
1546
	case OBJ_REQUEST_BIO:
1547
	case OBJ_REQUEST_PAGES:
A
Alex Elder 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556
		return true;
	default:
		return false;
	}
}

static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
				struct rbd_obj_request *obj_request)
{
1557
	dout("%s %p\n", __func__, obj_request);
A
Alex Elder 已提交
1558 1559 1560
	return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}

1561 1562 1563 1564 1565 1566 1567 1568 1569
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
{
	dout("%s %p\n", __func__, obj_request);
	ceph_osdc_cancel_request(obj_request->osd_req);
}

/*
 * Wait for an object request to complete.  If interrupted, cancel the
 * underlying osd request.
1570 1571
 *
 * @timeout: in jiffies, 0 means "wait forever"
1572
 */
1573 1574
static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
				  unsigned long timeout)
1575
{
1576
	long ret;
1577 1578

	dout("%s %p\n", __func__, obj_request);
1579 1580 1581 1582 1583 1584
	ret = wait_for_completion_interruptible_timeout(
					&obj_request->completion,
					ceph_timeout_jiffies(timeout));
	if (ret <= 0) {
		if (ret == 0)
			ret = -ETIMEDOUT;
1585
		rbd_obj_request_end(obj_request);
1586 1587
	} else {
		ret = 0;
1588 1589
	}

1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
	return ret;
}

static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
	return __rbd_obj_request_wait(obj_request, 0);
}

static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
					unsigned long timeout)
{
	return __rbd_obj_request_wait(obj_request, timeout);
1603 1604
}

A
Alex Elder 已提交
1605 1606
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
1607

A
Alex Elder 已提交
1608
	dout("%s: img %p\n", __func__, img_request);
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624

	/*
	 * If no error occurred, compute the aggregate transfer
	 * count for the image request.  We could instead use
	 * atomic64_cmpxchg() to update it as each object request
	 * completes; not clear which way is better off hand.
	 */
	if (!img_request->result) {
		struct rbd_obj_request *obj_request;
		u64 xferred = 0;

		for_each_obj_request(img_request, obj_request)
			xferred += obj_request->xferred;
		img_request->xferred = xferred;
	}

A
Alex Elder 已提交
1625 1626 1627 1628 1629 1630
	if (img_request->callback)
		img_request->callback(img_request);
	else
		rbd_img_request_put(img_request);
}

A
Alex Elder 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
/*
 * The default/initial value for all image request flags is 0.  Each
 * is conditionally set to 1 at image request initialization time
 * and currently never change thereafter.
 */
static void img_request_write_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_WRITE, &img_request->flags);
	smp_mb();
}

static bool img_request_write_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
/*
 * Set the discard flag when the img_request is an discard request
 */
static void img_request_discard_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_DISCARD, &img_request->flags);
	smp_mb();
}

static bool img_request_discard_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
}

1663 1664 1665 1666 1667 1668
static void img_request_child_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1669 1670 1671 1672 1673 1674
static void img_request_child_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_CHILD, &img_request->flags);
	smp_mb();
}

1675 1676 1677 1678 1679 1680
static bool img_request_child_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}

1681 1682 1683 1684 1685 1686
static void img_request_layered_set(struct rbd_img_request *img_request)
{
	set_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1687 1688 1689 1690 1691 1692
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
	smp_mb();
}

1693 1694 1695 1696 1697 1698
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
	smp_mb();
	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}

1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
static enum obj_operation_type
rbd_img_request_op_type(struct rbd_img_request *img_request)
{
	if (img_request_write_test(img_request))
		return OBJ_OP_WRITE;
	else if (img_request_discard_test(img_request))
		return OBJ_OP_DISCARD;
	else
		return OBJ_OP_READ;
}

1710 1711 1712
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1713 1714 1715
	u64 xferred = obj_request->xferred;
	u64 length = obj_request->length;

1716 1717
	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, obj_request->img_request, obj_request->result,
A
Alex Elder 已提交
1718
		xferred, length);
1719
	/*
1720 1721 1722 1723 1724 1725
	 * ENOENT means a hole in the image.  We zero-fill the entire
	 * length of the request.  A short read also implies zero-fill
	 * to the end of the request.  An error requires the whole
	 * length of the request to be reported finished with an error
	 * to the block layer.  In each case we update the xferred
	 * count to indicate the whole request was satisfied.
1726
	 */
A
Alex Elder 已提交
1727
	rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1728
	if (obj_request->result == -ENOENT) {
A
Alex Elder 已提交
1729 1730 1731 1732
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, 0);
		else
			zero_pages(obj_request->pages, 0, length);
1733
		obj_request->result = 0;
A
Alex Elder 已提交
1734 1735 1736 1737 1738
	} else if (xferred < length && !obj_request->result) {
		if (obj_request->type == OBJ_REQUEST_BIO)
			zero_bio_chain(obj_request->bio_list, xferred);
		else
			zero_pages(obj_request->pages, xferred, length);
1739
	}
1740
	obj_request->xferred = length;
1741 1742 1743
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1744 1745
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
A
Alex Elder 已提交
1746 1747
	dout("%s: obj %p cb %p\n", __func__, obj_request,
		obj_request->callback);
A
Alex Elder 已提交
1748 1749
	if (obj_request->callback)
		obj_request->callback(obj_request);
1750 1751
	else
		complete_all(&obj_request->completion);
A
Alex Elder 已提交
1752 1753
}

1754
static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1755 1756 1757 1758 1759
{
	dout("%s: obj %p\n", __func__, obj_request);
	obj_request_done_set(obj_request);
}

1760
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1761
{
A
Alex Elder 已提交
1762
	struct rbd_img_request *img_request = NULL;
A
Alex Elder 已提交
1763
	struct rbd_device *rbd_dev = NULL;
A
Alex Elder 已提交
1764 1765 1766 1767 1768
	bool layered = false;

	if (obj_request_img_data_test(obj_request)) {
		img_request = obj_request->img_request;
		layered = img_request && img_request_layered_test(img_request);
A
Alex Elder 已提交
1769
		rbd_dev = img_request->rbd_dev;
A
Alex Elder 已提交
1770
	}
A
Alex Elder 已提交
1771 1772 1773 1774

	dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
		obj_request, img_request, obj_request->result,
		obj_request->xferred, obj_request->length);
A
Alex Elder 已提交
1775 1776
	if (layered && obj_request->result == -ENOENT &&
			obj_request->img_offset < rbd_dev->parent_overlap)
A
Alex Elder 已提交
1777 1778
		rbd_img_parent_read(obj_request);
	else if (img_request)
1779 1780 1781
		rbd_img_obj_request_read_callback(obj_request);
	else
		obj_request_done_set(obj_request);
A
Alex Elder 已提交
1782 1783
}

1784
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1785
{
1786 1787 1788
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
A
Alex Elder 已提交
1789 1790
	 * There is no such thing as a successful short write.  Set
	 * it to our originally-requested length.
1791 1792
	 */
	obj_request->xferred = obj_request->length;
1793
	obj_request_done_set(obj_request);
A
Alex Elder 已提交
1794 1795
}

1796 1797 1798 1799 1800 1801 1802 1803 1804
static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p result %d %llu\n", __func__, obj_request,
		obj_request->result, obj_request->length);
	/*
	 * There is no such thing as a successful short discard.  Set
	 * it to our originally-requested length.
	 */
	obj_request->xferred = obj_request->length;
1805 1806 1807
	/* discarding a non-existent object is not a problem */
	if (obj_request->result == -ENOENT)
		obj_request->result = 0;
1808 1809 1810
	obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1811 1812 1813 1814
/*
 * For a simple stat call there's nothing to do.  We'll do more if
 * this is part of a write sequence for a layered image.
 */
1815
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1816
{
A
Alex Elder 已提交
1817
	dout("%s: obj %p\n", __func__, obj_request);
A
Alex Elder 已提交
1818 1819 1820
	obj_request_done_set(obj_request);
}

I
Ilya Dryomov 已提交
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
{
	dout("%s: obj %p\n", __func__, obj_request);

	if (obj_request_img_data_test(obj_request))
		rbd_osd_copyup_callback(obj_request);
	else
		obj_request_done_set(obj_request);
}

A
Alex Elder 已提交
1831 1832 1833 1834 1835 1836
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
				struct ceph_msg *msg)
{
	struct rbd_obj_request *obj_request = osd_req->r_priv;
	u16 opcode;

A
Alex Elder 已提交
1837
	dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
A
Alex Elder 已提交
1838
	rbd_assert(osd_req == obj_request->osd_req);
A
Alex Elder 已提交
1839 1840 1841 1842 1843 1844
	if (obj_request_img_data_test(obj_request)) {
		rbd_assert(obj_request->img_request);
		rbd_assert(obj_request->which != BAD_WHICH);
	} else {
		rbd_assert(obj_request->which == BAD_WHICH);
	}
A
Alex Elder 已提交
1845

1846 1847
	if (osd_req->r_result < 0)
		obj_request->result = osd_req->r_result;
A
Alex Elder 已提交
1848

1849 1850
	/*
	 * We support a 64-bit length, but ultimately it has to be
C
Christoph Hellwig 已提交
1851 1852
	 * passed to the block layer, which just supports a 32-bit
	 * length field.
1853
	 */
1854
	obj_request->xferred = osd_req->r_ops[0].outdata_len;
A
Alex Elder 已提交
1855
	rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1856

1857
	opcode = osd_req->r_ops[0].op;
A
Alex Elder 已提交
1858 1859
	switch (opcode) {
	case CEPH_OSD_OP_READ:
1860
		rbd_osd_read_callback(obj_request);
A
Alex Elder 已提交
1861
		break;
1862
	case CEPH_OSD_OP_SETALLOCHINT:
1863 1864
		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1865
		/* fall through */
A
Alex Elder 已提交
1866
	case CEPH_OSD_OP_WRITE:
1867
	case CEPH_OSD_OP_WRITEFULL:
1868
		rbd_osd_write_callback(obj_request);
A
Alex Elder 已提交
1869
		break;
A
Alex Elder 已提交
1870
	case CEPH_OSD_OP_STAT:
1871
		rbd_osd_stat_callback(obj_request);
A
Alex Elder 已提交
1872
		break;
1873 1874 1875 1876 1877
	case CEPH_OSD_OP_DELETE:
	case CEPH_OSD_OP_TRUNCATE:
	case CEPH_OSD_OP_ZERO:
		rbd_osd_discard_callback(obj_request);
		break;
1878
	case CEPH_OSD_OP_CALL:
I
Ilya Dryomov 已提交
1879 1880
		rbd_osd_call_callback(obj_request);
		break;
A
Alex Elder 已提交
1881
	case CEPH_OSD_OP_NOTIFY_ACK:
1882
	case CEPH_OSD_OP_WATCH:
1883
		rbd_osd_trivial_callback(obj_request);
1884
		break;
A
Alex Elder 已提交
1885
	default:
1886
		rbd_warn(NULL, "%s: unsupported op %hu",
A
Alex Elder 已提交
1887 1888 1889 1890
			obj_request->object_name, (unsigned short) opcode);
		break;
	}

1891
	if (obj_request_done_test(obj_request))
A
Alex Elder 已提交
1892 1893 1894
		rbd_obj_request_complete(obj_request);
}

1895
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1896 1897
{
	struct rbd_img_request *img_request = obj_request->img_request;
1898
	struct ceph_osd_request *osd_req = obj_request->osd_req;
1899
	u64 snap_id;
A
Alex Elder 已提交
1900

1901
	rbd_assert(osd_req != NULL);
A
Alex Elder 已提交
1902

1903
	snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1904
	ceph_osdc_build_request(osd_req, obj_request->offset,
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
			NULL, snap_id, NULL);
}

static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct ceph_osd_request *osd_req = obj_request->osd_req;
	struct ceph_snap_context *snapc;
	struct timespec mtime = CURRENT_TIME;

	rbd_assert(osd_req != NULL);

	snapc = img_request ? img_request->snapc : NULL;
	ceph_osdc_build_request(osd_req, obj_request->offset,
			snapc, CEPH_NOSNAP, &mtime);
A
Alex Elder 已提交
1920 1921
}

1922 1923 1924 1925 1926 1927
/*
 * Create an osd request.  A read request has one osd op (read).
 * A write request has either one (watch) or two (hint+write) osd ops.
 * (All rbd data writes are prefixed with an allocation hint op, but
 * technically osd watch is a write request, hence this distinction.)
 */
A
Alex Elder 已提交
1928 1929
static struct ceph_osd_request *rbd_osd_req_create(
					struct rbd_device *rbd_dev,
G
Guangliang Zhao 已提交
1930
					enum obj_operation_type op_type,
1931
					unsigned int num_ops,
A
Alex Elder 已提交
1932
					struct rbd_obj_request *obj_request)
A
Alex Elder 已提交
1933 1934 1935 1936 1937
{
	struct ceph_snap_context *snapc = NULL;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;

1938 1939
	if (obj_request_img_data_test(obj_request) &&
		(op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1940
		struct rbd_img_request *img_request = obj_request->img_request;
1941 1942 1943 1944 1945
		if (op_type == OBJ_OP_WRITE) {
			rbd_assert(img_request_write_test(img_request));
		} else {
			rbd_assert(img_request_discard_test(img_request));
		}
G
Guangliang Zhao 已提交
1946
		snapc = img_request->snapc;
A
Alex Elder 已提交
1947 1948
	}

G
Guangliang Zhao 已提交
1949
	rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1950 1951

	/* Allocate and initialize the request, for the num_ops ops */
A
Alex Elder 已提交
1952 1953

	osdc = &rbd_dev->rbd_client->client->osdc;
1954
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1955
					  GFP_NOIO);
A
Alex Elder 已提交
1956
	if (!osd_req)
1957
		goto fail;
A
Alex Elder 已提交
1958

1959
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
A
Alex Elder 已提交
1960
		osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
A
Alex Elder 已提交
1961
	else
A
Alex Elder 已提交
1962 1963 1964 1965 1966
		osd_req->r_flags = CEPH_OSD_FLAG_READ;

	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

1967 1968
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
A
Alex Elder 已提交
1969

1970 1971 1972
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

A
Alex Elder 已提交
1973
	return osd_req;
1974 1975 1976 1977

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
A
Alex Elder 已提交
1978 1979
}

1980
/*
1981 1982 1983 1984
 * Create a copyup osd request based on the information in the object
 * request supplied.  A copyup request has two or three osd ops, a
 * copyup method call, potentially a hint op, and a write or truncate
 * or zero op.
1985 1986 1987 1988 1989 1990 1991 1992 1993
 */
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	struct ceph_snap_context *snapc;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct ceph_osd_request *osd_req;
1994
	int num_osd_ops = 3;
1995 1996 1997 1998

	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);
1999 2000
	rbd_assert(img_request_write_test(img_request) ||
			img_request_discard_test(img_request));
2001

2002 2003 2004 2005
	if (img_request_discard_test(img_request))
		num_osd_ops = 2;

	/* Allocate and initialize the request, for all the ops */
2006 2007 2008 2009

	snapc = img_request->snapc;
	rbd_dev = img_request->rbd_dev;
	osdc = &rbd_dev->rbd_client->client->osdc;
2010
	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2011
						false, GFP_NOIO);
2012
	if (!osd_req)
2013
		goto fail;
2014 2015 2016 2017 2018

	osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
	osd_req->r_callback = rbd_osd_req_callback;
	osd_req->r_priv = obj_request;

2019 2020
	osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
	ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
2021

2022 2023 2024
	if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
		goto fail;

2025
	return osd_req;
2026 2027 2028 2029

fail:
	ceph_osdc_put_request(osd_req);
	return NULL;
2030 2031 2032
}


A
Alex Elder 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
	ceph_osdc_put_request(osd_req);
}

/* object_name is assumed to be a non-null pointer and NUL-terminated */

static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
						u64 offset, u64 length,
						enum obj_request_type type)
{
	struct rbd_obj_request *obj_request;
	size_t size;
	char *name;

	rbd_assert(obj_request_type_valid(type));

	size = strlen(object_name) + 1;
2051
	name = kmalloc(size, GFP_NOIO);
2052
	if (!name)
A
Alex Elder 已提交
2053 2054
		return NULL;

2055
	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2056 2057 2058 2059 2060
	if (!obj_request) {
		kfree(name);
		return NULL;
	}

A
Alex Elder 已提交
2061 2062 2063
	obj_request->object_name = memcpy(name, object_name, size);
	obj_request->offset = offset;
	obj_request->length = length;
2064
	obj_request->flags = 0;
A
Alex Elder 已提交
2065 2066 2067
	obj_request->which = BAD_WHICH;
	obj_request->type = type;
	INIT_LIST_HEAD(&obj_request->links);
2068
	init_completion(&obj_request->completion);
A
Alex Elder 已提交
2069 2070
	kref_init(&obj_request->kref);

A
Alex Elder 已提交
2071 2072 2073
	dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
		offset, length, (int)type, obj_request);

A
Alex Elder 已提交
2074 2075 2076 2077 2078 2079 2080 2081 2082
	return obj_request;
}

static void rbd_obj_request_destroy(struct kref *kref)
{
	struct rbd_obj_request *obj_request;

	obj_request = container_of(kref, struct rbd_obj_request, kref);

A
Alex Elder 已提交
2083 2084
	dout("%s: obj %p\n", __func__, obj_request);

A
Alex Elder 已提交
2085 2086 2087 2088 2089 2090 2091 2092
	rbd_assert(obj_request->img_request == NULL);
	rbd_assert(obj_request->which == BAD_WHICH);

	if (obj_request->osd_req)
		rbd_osd_req_destroy(obj_request->osd_req);

	rbd_assert(obj_request_type_valid(obj_request->type));
	switch (obj_request->type) {
2093 2094
	case OBJ_REQUEST_NODATA:
		break;		/* Nothing to do */
A
Alex Elder 已提交
2095 2096 2097 2098
	case OBJ_REQUEST_BIO:
		if (obj_request->bio_list)
			bio_chain_put(obj_request->bio_list);
		break;
2099 2100 2101 2102 2103
	case OBJ_REQUEST_PAGES:
		if (obj_request->pages)
			ceph_release_page_vector(obj_request->pages,
						obj_request->page_count);
		break;
A
Alex Elder 已提交
2104 2105
	}

2106
	kfree(obj_request->object_name);
2107 2108
	obj_request->object_name = NULL;
	kmem_cache_free(rbd_obj_request_cache, obj_request);
A
Alex Elder 已提交
2109 2110
}

A
Alex Elder 已提交
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
/* It's OK to call this for a device with no parent */

static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
	rbd_dev_remove_parent(rbd_dev);
	rbd_spec_put(rbd_dev->parent_spec);
	rbd_dev->parent_spec = NULL;
	rbd_dev->parent_overlap = 0;
}

2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
/*
 * Parent image reference counting is used to determine when an
 * image's parent fields can be safely torn down--after there are no
 * more in-flight requests to the parent image.  When the last
 * reference is dropped, cleaning them up is safe.
 */
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
	int counter;

	if (!rbd_dev->parent_spec)
		return;

	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
	if (counter > 0)
		return;

	/* Last reference; clean up parent data structures */

	if (!counter)
		rbd_dev_unparent(rbd_dev);
	else
2144
		rbd_warn(rbd_dev, "parent reference underflow");
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
}

/*
 * If an image has a non-zero parent overlap, get a reference to its
 * parent.
 *
 * Returns true if the rbd device has a parent with a non-zero
 * overlap and a reference for it was successfully taken, or
 * false otherwise.
 */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
2157
	int counter = 0;
2158 2159 2160 2161

	if (!rbd_dev->parent_spec)
		return false;

2162 2163 2164 2165
	down_read(&rbd_dev->header_rwsem);
	if (rbd_dev->parent_overlap)
		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
	up_read(&rbd_dev->header_rwsem);
2166 2167

	if (counter < 0)
2168
		rbd_warn(rbd_dev, "parent reference overflow");
2169

2170
	return counter > 0;
2171 2172
}

A
Alex Elder 已提交
2173 2174 2175 2176 2177
/*
 * Caller is responsible for filling in the list of object requests
 * that comprises the image request, and the Linux request pointer
 * (if there is one).
 */
A
Alex Elder 已提交
2178 2179
static struct rbd_img_request *rbd_img_request_create(
					struct rbd_device *rbd_dev,
A
Alex Elder 已提交
2180
					u64 offset, u64 length,
G
Guangliang Zhao 已提交
2181
					enum obj_operation_type op_type,
2182
					struct ceph_snap_context *snapc)
A
Alex Elder 已提交
2183 2184 2185
{
	struct rbd_img_request *img_request;

2186
	img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
A
Alex Elder 已提交
2187 2188 2189 2190 2191 2192 2193
	if (!img_request)
		return NULL;

	img_request->rq = NULL;
	img_request->rbd_dev = rbd_dev;
	img_request->offset = offset;
	img_request->length = length;
A
Alex Elder 已提交
2194
	img_request->flags = 0;
2195 2196 2197 2198
	if (op_type == OBJ_OP_DISCARD) {
		img_request_discard_set(img_request);
		img_request->snapc = snapc;
	} else if (op_type == OBJ_OP_WRITE) {
A
Alex Elder 已提交
2199
		img_request_write_set(img_request);
2200
		img_request->snapc = snapc;
A
Alex Elder 已提交
2201
	} else {
A
Alex Elder 已提交
2202
		img_request->snap_id = rbd_dev->spec->snap_id;
A
Alex Elder 已提交
2203
	}
2204
	if (rbd_dev_parent_get(rbd_dev))
2205
		img_request_layered_set(img_request);
A
Alex Elder 已提交
2206 2207 2208
	spin_lock_init(&img_request->completion_lock);
	img_request->next_completion = 0;
	img_request->callback = NULL;
2209
	img_request->result = 0;
A
Alex Elder 已提交
2210 2211 2212 2213
	img_request->obj_request_count = 0;
	INIT_LIST_HEAD(&img_request->obj_requests);
	kref_init(&img_request->kref);

A
Alex Elder 已提交
2214
	dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
G
Guangliang Zhao 已提交
2215
		obj_op_name(op_type), offset, length, img_request);
A
Alex Elder 已提交
2216

A
Alex Elder 已提交
2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	return img_request;
}

static void rbd_img_request_destroy(struct kref *kref)
{
	struct rbd_img_request *img_request;
	struct rbd_obj_request *obj_request;
	struct rbd_obj_request *next_obj_request;

	img_request = container_of(kref, struct rbd_img_request, kref);

A
Alex Elder 已提交
2228 2229
	dout("%s: img %p\n", __func__, img_request);

A
Alex Elder 已提交
2230 2231
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
		rbd_img_obj_request_del(img_request, obj_request);
2232
	rbd_assert(img_request->obj_request_count == 0);
A
Alex Elder 已提交
2233

2234 2235 2236 2237 2238
	if (img_request_layered_test(img_request)) {
		img_request_layered_clear(img_request);
		rbd_dev_parent_put(img_request->rbd_dev);
	}

2239 2240
	if (img_request_write_test(img_request) ||
		img_request_discard_test(img_request))
2241
		ceph_put_snap_context(img_request->snapc);
A
Alex Elder 已提交
2242

2243
	kmem_cache_free(rbd_img_request_cache, img_request);
A
Alex Elder 已提交
2244 2245
}

2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
static struct rbd_img_request *rbd_parent_request_create(
					struct rbd_obj_request *obj_request,
					u64 img_offset, u64 length)
{
	struct rbd_img_request *parent_request;
	struct rbd_device *rbd_dev;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;

2256
	parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
G
Guangliang Zhao 已提交
2257
						length, OBJ_OP_READ, NULL);
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	if (!parent_request)
		return NULL;

	img_request_child_set(parent_request);
	rbd_obj_request_get(obj_request);
	parent_request->obj_request = obj_request;

	return parent_request;
}

static void rbd_parent_request_destroy(struct kref *kref)
{
	struct rbd_img_request *parent_request;
	struct rbd_obj_request *orig_request;

	parent_request = container_of(kref, struct rbd_img_request, kref);
	orig_request = parent_request->obj_request;

	parent_request->obj_request = NULL;
	rbd_obj_request_put(orig_request);
	img_request_child_clear(parent_request);

	rbd_img_request_destroy(kref);
}

2283 2284
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
2285
	struct rbd_img_request *img_request;
2286 2287
	unsigned int xferred;
	int result;
A
Alex Elder 已提交
2288
	bool more;
2289

2290 2291 2292
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;

2293 2294 2295 2296 2297
	rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
	xferred = (unsigned int)obj_request->xferred;
	result = obj_request->result;
	if (result) {
		struct rbd_device *rbd_dev = img_request->rbd_dev;
G
Guangliang Zhao 已提交
2298 2299
		enum obj_operation_type op_type;

2300 2301 2302 2303 2304 2305
		if (img_request_discard_test(img_request))
			op_type = OBJ_OP_DISCARD;
		else if (img_request_write_test(img_request))
			op_type = OBJ_OP_WRITE;
		else
			op_type = OBJ_OP_READ;
2306

2307
		rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
G
Guangliang Zhao 已提交
2308 2309
			obj_op_name(op_type), obj_request->length,
			obj_request->img_offset, obj_request->offset);
2310
		rbd_warn(rbd_dev, "  result %d xferred %x",
2311 2312 2313
			result, xferred);
		if (!img_request->result)
			img_request->result = result;
2314 2315 2316 2317 2318
		/*
		 * Need to end I/O on the entire obj_request worth of
		 * bytes in case of error.
		 */
		xferred = obj_request->length;
2319 2320
	}

2321 2322 2323 2324 2325 2326 2327
	/* Image object requests don't own their page array */

	if (obj_request->type == OBJ_REQUEST_PAGES) {
		obj_request->pages = NULL;
		obj_request->page_count = 0;
	}

A
Alex Elder 已提交
2328 2329 2330 2331 2332
	if (img_request_child_test(img_request)) {
		rbd_assert(img_request->obj_request != NULL);
		more = obj_request->which < img_request->obj_request_count - 1;
	} else {
		rbd_assert(img_request->rq != NULL);
C
Christoph Hellwig 已提交
2333 2334 2335 2336

		more = blk_update_request(img_request->rq, result, xferred);
		if (!more)
			__blk_mq_end_request(img_request->rq, result);
A
Alex Elder 已提交
2337 2338 2339
	}

	return more;
2340 2341
}

2342 2343 2344 2345 2346 2347
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	u32 which = obj_request->which;
	bool more = true;

2348
	rbd_assert(obj_request_img_data_test(obj_request));
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
	img_request = obj_request->img_request;

	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
	rbd_assert(img_request != NULL);
	rbd_assert(img_request->obj_request_count > 0);
	rbd_assert(which != BAD_WHICH);
	rbd_assert(which < img_request->obj_request_count);

	spin_lock_irq(&img_request->completion_lock);
	if (which != img_request->next_completion)
		goto out;

	for_each_obj_request_from(img_request, obj_request) {
		rbd_assert(more);
		rbd_assert(which < img_request->obj_request_count);

		if (!obj_request_done_test(obj_request))
			break;
2367
		more = rbd_img_obj_end_request(obj_request);
2368 2369 2370 2371 2372 2373 2374
		which++;
	}

	rbd_assert(more ^ (which == img_request->obj_request_count));
	img_request->next_completion = which;
out:
	spin_unlock_irq(&img_request->completion_lock);
2375
	rbd_img_request_put(img_request);
2376 2377 2378 2379 2380

	if (!more)
		rbd_img_request_complete(img_request);
}

2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399
/*
 * Add individual osd ops to the given ceph_osd_request and prepare
 * them for submission. num_ops is the current number of
 * osd operations already to the object request.
 */
static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
				struct ceph_osd_request *osd_request,
				enum obj_operation_type op_type,
				unsigned int num_ops)
{
	struct rbd_img_request *img_request = obj_request->img_request;
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	u64 object_size = rbd_obj_bytes(&rbd_dev->header);
	u64 offset = obj_request->offset;
	u64 length = obj_request->length;
	u64 img_end;
	u16 opcode;

	if (op_type == OBJ_OP_DISCARD) {
2400 2401 2402
		if (!offset && length == object_size &&
		    (!img_request_layered_test(img_request) ||
		     !obj_request_overlaps_parent(obj_request))) {
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
			opcode = CEPH_OSD_OP_DELETE;
		} else if ((offset + length == object_size)) {
			opcode = CEPH_OSD_OP_TRUNCATE;
		} else {
			down_read(&rbd_dev->header_rwsem);
			img_end = rbd_dev->header.image_size;
			up_read(&rbd_dev->header_rwsem);

			if (obj_request->img_offset + length == img_end)
				opcode = CEPH_OSD_OP_TRUNCATE;
			else
				opcode = CEPH_OSD_OP_ZERO;
		}
	} else if (op_type == OBJ_OP_WRITE) {
2417 2418 2419 2420
		if (!offset && length == object_size)
			opcode = CEPH_OSD_OP_WRITEFULL;
		else
			opcode = CEPH_OSD_OP_WRITE;
2421 2422 2423 2424 2425 2426 2427
		osd_req_op_alloc_hint_init(osd_request, num_ops,
					object_size, object_size);
		num_ops++;
	} else {
		opcode = CEPH_OSD_OP_READ;
	}

2428
	if (opcode == CEPH_OSD_OP_DELETE)
2429
		osd_req_op_init(osd_request, num_ops, opcode, 0);
2430 2431 2432 2433
	else
		osd_req_op_extent_init(osd_request, num_ops, opcode,
				       offset, length, 0, 0);

2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	if (obj_request->type == OBJ_REQUEST_BIO)
		osd_req_op_extent_osd_data_bio(osd_request, num_ops,
					obj_request->bio_list, length);
	else if (obj_request->type == OBJ_REQUEST_PAGES)
		osd_req_op_extent_osd_data_pages(osd_request, num_ops,
					obj_request->pages, length,
					offset & ~PAGE_MASK, false, false);

	/* Discards are also writes */
	if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
		rbd_osd_req_format_write(obj_request);
	else
		rbd_osd_req_format_read(obj_request);
}

2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
/*
 * Split up an image request into one or more object requests, each
 * to a different object.  The "type" parameter indicates whether
 * "data_desc" is the pointer to the head of a list of bio
 * structures, or the base of a page array.  In either case this
 * function assumes data_desc describes memory sufficient to hold
 * all data described by the image request.
 */
static int rbd_img_request_fill(struct rbd_img_request *img_request,
					enum obj_request_type type,
					void *data_desc)
A
Alex Elder 已提交
2460 2461 2462 2463
{
	struct rbd_device *rbd_dev = img_request->rbd_dev;
	struct rbd_obj_request *obj_request = NULL;
	struct rbd_obj_request *next_obj_request;
J
Jingoo Han 已提交
2464
	struct bio *bio_list = NULL;
2465
	unsigned int bio_offset = 0;
J
Jingoo Han 已提交
2466
	struct page **pages = NULL;
G
Guangliang Zhao 已提交
2467
	enum obj_operation_type op_type;
2468
	u64 img_offset;
A
Alex Elder 已提交
2469 2470
	u64 resid;

2471 2472
	dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
		(int)type, data_desc);
A
Alex Elder 已提交
2473

2474
	img_offset = img_request->offset;
A
Alex Elder 已提交
2475
	resid = img_request->length;
A
Alex Elder 已提交
2476
	rbd_assert(resid > 0);
2477
	op_type = rbd_img_request_op_type(img_request);
2478 2479 2480

	if (type == OBJ_REQUEST_BIO) {
		bio_list = data_desc;
2481 2482
		rbd_assert(img_offset ==
			   bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2483
	} else if (type == OBJ_REQUEST_PAGES) {
2484 2485 2486
		pages = data_desc;
	}

A
Alex Elder 已提交
2487
	while (resid) {
2488
		struct ceph_osd_request *osd_req;
A
Alex Elder 已提交
2489 2490 2491 2492
		const char *object_name;
		u64 offset;
		u64 length;

2493
		object_name = rbd_segment_name(rbd_dev, img_offset);
A
Alex Elder 已提交
2494 2495
		if (!object_name)
			goto out_unwind;
2496 2497
		offset = rbd_segment_offset(rbd_dev, img_offset);
		length = rbd_segment_length(rbd_dev, img_offset, resid);
A
Alex Elder 已提交
2498
		obj_request = rbd_obj_request_create(object_name,
2499
						offset, length, type);
2500 2501
		/* object request has its own copy of the object name */
		rbd_segment_name_free(object_name);
A
Alex Elder 已提交
2502 2503
		if (!obj_request)
			goto out_unwind;
2504

2505 2506 2507 2508 2509
		/*
		 * set obj_request->img_request before creating the
		 * osd_request so that it gets the right snapc
		 */
		rbd_img_obj_request_add(img_request, obj_request);
A
Alex Elder 已提交
2510

2511 2512 2513 2514 2515 2516 2517 2518 2519
		if (type == OBJ_REQUEST_BIO) {
			unsigned int clone_size;

			rbd_assert(length <= (u64)UINT_MAX);
			clone_size = (unsigned int)length;
			obj_request->bio_list =
					bio_chain_clone_range(&bio_list,
								&bio_offset,
								clone_size,
2520
								GFP_NOIO);
2521
			if (!obj_request->bio_list)
2522
				goto out_unwind;
2523
		} else if (type == OBJ_REQUEST_PAGES) {
2524 2525 2526 2527 2528 2529 2530 2531 2532
			unsigned int page_count;

			obj_request->pages = pages;
			page_count = (u32)calc_pages_for(offset, length);
			obj_request->page_count = page_count;
			if ((offset + length) & ~PAGE_MASK)
				page_count--;	/* more on last page */
			pages += page_count;
		}
A
Alex Elder 已提交
2533

G
Guangliang Zhao 已提交
2534 2535 2536
		osd_req = rbd_osd_req_create(rbd_dev, op_type,
					(op_type == OBJ_OP_WRITE) ? 2 : 1,
					obj_request);
2537
		if (!osd_req)
2538
			goto out_unwind;
2539

2540
		obj_request->osd_req = osd_req;
2541
		obj_request->callback = rbd_img_obj_callback;
2542
		obj_request->img_offset = img_offset;
2543

2544
		rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
A
Alex Elder 已提交
2545

2546
		rbd_img_request_get(img_request);
A
Alex Elder 已提交
2547

2548
		img_offset += length;
A
Alex Elder 已提交
2549 2550 2551 2552 2553 2554 2555
		resid -= length;
	}

	return 0;

out_unwind:
	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2556
		rbd_img_obj_request_del(img_request, obj_request);
A
Alex Elder 已提交
2557 2558 2559 2560

	return -ENOMEM;
}

2561
static void
I
Ilya Dryomov 已提交
2562
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2563 2564 2565
{
	struct rbd_img_request *img_request;
	struct rbd_device *rbd_dev;
2566
	struct page **pages;
2567 2568
	u32 page_count;

I
Ilya Dryomov 已提交
2569 2570
	dout("%s: obj %p\n", __func__, obj_request);

2571 2572
	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
		obj_request->type == OBJ_REQUEST_NODATA);
2573 2574 2575 2576 2577 2578 2579
	rbd_assert(obj_request_img_data_test(obj_request));
	img_request = obj_request->img_request;
	rbd_assert(img_request);

	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev);

2580 2581
	pages = obj_request->copyup_pages;
	rbd_assert(pages != NULL);
2582
	obj_request->copyup_pages = NULL;
2583 2584 2585 2586
	page_count = obj_request->copyup_page_count;
	rbd_assert(page_count);
	obj_request->copyup_page_count = 0;
	ceph_release_page_vector(pages, page_count);
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596

	/*
	 * We want the transfer count to reflect the size of the
	 * original write request.  There is no such thing as a
	 * successful short write, so if the request was successful
	 * we can just set it to the originally-requested length.
	 */
	if (!obj_request->result)
		obj_request->xferred = obj_request->length;

I
Ilya Dryomov 已提交
2597
	obj_request_done_set(obj_request);
2598 2599
}

2600 2601 2602 2603
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *orig_request;
2604 2605 2606
	struct ceph_osd_request *osd_req;
	struct ceph_osd_client *osdc;
	struct rbd_device *rbd_dev;
2607
	struct page **pages;
2608
	enum obj_operation_type op_type;
2609
	u32 page_count;
2610
	int img_result;
2611
	u64 parent_length;
2612 2613 2614 2615 2616 2617 2618 2619

	rbd_assert(img_request_child_test(img_request));

	/* First get what we need from the image request */

	pages = img_request->copyup_pages;
	rbd_assert(pages != NULL);
	img_request->copyup_pages = NULL;
2620 2621 2622
	page_count = img_request->copyup_page_count;
	rbd_assert(page_count);
	img_request->copyup_page_count = 0;
2623 2624 2625

	orig_request = img_request->obj_request;
	rbd_assert(orig_request != NULL);
2626
	rbd_assert(obj_request_type_valid(orig_request->type));
2627
	img_result = img_request->result;
2628 2629
	parent_length = img_request->length;
	rbd_assert(parent_length == img_request->xferred);
2630
	rbd_img_request_put(img_request);
2631

2632 2633
	rbd_assert(orig_request->img_request);
	rbd_dev = orig_request->img_request->rbd_dev;
2634 2635
	rbd_assert(rbd_dev);

2636 2637 2638 2639 2640 2641 2642
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;
2643

2644 2645 2646 2647 2648 2649
		ceph_release_page_vector(pages, page_count);
		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, orig_request);
		if (!img_result)
			return;
	}
2650

2651
	if (img_result)
2652 2653
		goto out_err;

2654 2655
	/*
	 * The original osd request is of no use to use any more.
2656
	 * We need a new one that can hold the three ops in a copyup
2657 2658 2659
	 * request.  Allocate the new copyup osd request for the
	 * original request, and release the old one.
	 */
2660
	img_result = -ENOMEM;
2661 2662 2663
	osd_req = rbd_osd_req_create_copyup(orig_request);
	if (!osd_req)
		goto out_err;
2664
	rbd_osd_req_destroy(orig_request->osd_req);
2665 2666
	orig_request->osd_req = osd_req;
	orig_request->copyup_pages = pages;
2667
	orig_request->copyup_page_count = page_count;
2668

2669
	/* Initialize the copyup op */
2670

2671
	osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2672
	osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2673
						false, false);
2674

2675
	/* Add the other op(s) */
2676

2677 2678
	op_type = rbd_img_request_op_type(orig_request->img_request);
	rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2679 2680 2681 2682

	/* All set, send it off. */

	osdc = &rbd_dev->rbd_client->client->osdc;
2683 2684
	img_result = rbd_obj_request_submit(osdc, orig_request);
	if (!img_result)
2685 2686 2687 2688
		return;
out_err:
	/* Record the error code and complete the request */

2689
	orig_request->result = img_result;
2690 2691 2692
	orig_request->xferred = 0;
	obj_request_done_set(orig_request);
	rbd_obj_request_complete(orig_request);
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
}

/*
 * Read from the parent image the range of data that covers the
 * entire target of the given object request.  This is used for
 * satisfying a layered image write request when the target of an
 * object request from the image request does not exist.
 *
 * A page array big enough to hold the returned data is allocated
 * and supplied to rbd_img_request_fill() as the "data descriptor."
 * When the read completes, this page array will be transferred to
 * the original object request for the copyup operation.
 *
 * If an error occurs, record it as the result of the original
 * object request and mark it done so it gets completed.
 */
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request = NULL;
	struct rbd_img_request *parent_request = NULL;
	struct rbd_device *rbd_dev;
	u64 img_offset;
	u64 length;
	struct page **pages = NULL;
	u32 page_count;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
2721
	rbd_assert(obj_request_type_valid(obj_request->type));
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734

	img_request = obj_request->img_request;
	rbd_assert(img_request != NULL);
	rbd_dev = img_request->rbd_dev;
	rbd_assert(rbd_dev->parent != NULL);

	/*
	 * Determine the byte range covered by the object in the
	 * child image to which the original request was to be sent.
	 */
	img_offset = obj_request->img_offset - obj_request->offset;
	length = (u64)1 << rbd_dev->header.obj_order;

A
Alex Elder 已提交
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
	/*
	 * There is no defined parent data beyond the parent
	 * overlap, so limit what we read at that boundary if
	 * necessary.
	 */
	if (img_offset + length > rbd_dev->parent_overlap) {
		rbd_assert(img_offset < rbd_dev->parent_overlap);
		length = rbd_dev->parent_overlap - img_offset;
	}

2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
	/*
	 * Allocate a page array big enough to receive the data read
	 * from the parent.
	 */
	page_count = (u32)calc_pages_for(0, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages)) {
		result = PTR_ERR(pages);
		pages = NULL;
		goto out_err;
	}

	result = -ENOMEM;
2758 2759
	parent_request = rbd_parent_request_create(obj_request,
						img_offset, length);
2760 2761 2762 2763 2764 2765 2766
	if (!parent_request)
		goto out_err;

	result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
	if (result)
		goto out_err;
	parent_request->copyup_pages = pages;
2767
	parent_request->copyup_page_count = page_count;
2768 2769 2770 2771 2772 2773 2774

	parent_request->callback = rbd_img_obj_parent_read_full_callback;
	result = rbd_img_request_submit(parent_request);
	if (!result)
		return 0;

	parent_request->copyup_pages = NULL;
2775
	parent_request->copyup_page_count = 0;
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
	parent_request->obj_request = NULL;
	rbd_obj_request_put(obj_request);
out_err:
	if (pages)
		ceph_release_page_vector(pages, page_count);
	if (parent_request)
		rbd_img_request_put(parent_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);

	return result;
}

2790 2791 2792
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *orig_request;
2793
	struct rbd_device *rbd_dev;
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
	int result;

	rbd_assert(!obj_request_img_data_test(obj_request));

	/*
	 * All we need from the object request is the original
	 * request and the result of the STAT op.  Grab those, then
	 * we're done with the request.
	 */
	orig_request = obj_request->obj_request;
	obj_request->obj_request = NULL;
2805
	rbd_obj_request_put(orig_request);
2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
	rbd_assert(orig_request);
	rbd_assert(orig_request->img_request);

	result = obj_request->result;
	obj_request->result = 0;

	dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
		obj_request, orig_request, result,
		obj_request->xferred, obj_request->length);
	rbd_obj_request_put(obj_request);

2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to free the pages
	 * and re-submit the original write request.
	 */
	rbd_dev = orig_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		result = rbd_obj_request_submit(osdc, orig_request);
		if (!result)
			return;
	}
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843

	/*
	 * Our only purpose here is to determine whether the object
	 * exists, and we don't want to treat the non-existence as
	 * an error.  If something else comes back, transfer the
	 * error to the original request and complete it now.
	 */
	if (!result) {
		obj_request_existence_set(orig_request, true);
	} else if (result == -ENOENT) {
		obj_request_existence_set(orig_request, false);
	} else if (result) {
		orig_request->result = result;
2844
		goto out;
2845 2846 2847 2848 2849 2850
	}

	/*
	 * Resubmit the original request now that we have recorded
	 * whether the target object exists.
	 */
2851
	orig_request->result = rbd_img_obj_request_submit(orig_request);
2852
out:
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
	if (orig_request->result)
		rbd_obj_request_complete(orig_request);
}

static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
	struct rbd_obj_request *stat_request;
	struct rbd_device *rbd_dev;
	struct ceph_osd_client *osdc;
	struct page **pages = NULL;
	u32 page_count;
	size_t size;
	int ret;

	/*
	 * The response data for a STAT call consists of:
	 *     le64 length;
	 *     struct {
	 *         le32 tv_sec;
	 *         le32 tv_nsec;
	 *     } mtime;
	 */
	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
	page_count = (u32)calc_pages_for(0, size);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
	stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
							OBJ_REQUEST_PAGES);
	if (!stat_request)
		goto out;

	rbd_obj_request_get(obj_request);
	stat_request->obj_request = obj_request;
	stat_request->pages = pages;
	stat_request->page_count = page_count;

	rbd_assert(obj_request->img_request);
	rbd_dev = obj_request->img_request->rbd_dev;
G
Guangliang Zhao 已提交
2894
	stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2895
						   stat_request);
2896 2897 2898 2899
	if (!stat_request->osd_req)
		goto out;
	stat_request->callback = rbd_img_obj_exists_callback;

2900
	osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2901 2902
	osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
					false, false);
2903
	rbd_osd_req_format_read(stat_request);
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913

	osdc = &rbd_dev->rbd_client->client->osdc;
	ret = rbd_obj_request_submit(osdc, stat_request);
out:
	if (ret)
		rbd_obj_request_put(obj_request);

	return ret;
}

2914
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2915 2916
{
	struct rbd_img_request *img_request;
A
Alex Elder 已提交
2917
	struct rbd_device *rbd_dev;
2918 2919 2920 2921 2922

	rbd_assert(obj_request_img_data_test(obj_request));

	img_request = obj_request->img_request;
	rbd_assert(img_request);
A
Alex Elder 已提交
2923
	rbd_dev = img_request->rbd_dev;
2924

2925
	/* Reads */
2926 2927
	if (!img_request_write_test(img_request) &&
	    !img_request_discard_test(img_request))
2928 2929 2930 2931 2932 2933
		return true;

	/* Non-layered writes */
	if (!img_request_layered_test(img_request))
		return true;

2934
	/*
2935 2936
	 * Layered writes outside of the parent overlap range don't
	 * share any data with the parent.
2937
	 */
2938 2939
	if (!obj_request_overlaps_parent(obj_request))
		return true;
2940

2941 2942 2943 2944 2945 2946 2947 2948
	/*
	 * Entire-object layered writes - we will overwrite whatever
	 * parent data there is anyway.
	 */
	if (!obj_request->offset &&
	    obj_request->length == rbd_obj_bytes(&rbd_dev->header))
		return true;

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962
	/*
	 * If the object is known to already exist, its parent data has
	 * already been copied.
	 */
	if (obj_request_known_test(obj_request) &&
	    obj_request_exists_test(obj_request))
		return true;

	return false;
}

static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
	if (img_obj_request_simple(obj_request)) {
2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
		struct rbd_device *rbd_dev;
		struct ceph_osd_client *osdc;

		rbd_dev = obj_request->img_request->rbd_dev;
		osdc = &rbd_dev->rbd_client->client->osdc;

		return rbd_obj_request_submit(osdc, obj_request);
	}

	/*
2973 2974 2975 2976
	 * It's a layered write.  The target object might exist but
	 * we may not know that yet.  If we know it doesn't exist,
	 * start by reading the data for the full target object from
	 * the parent so we can use it for a copyup to the target.
2977
	 */
2978
	if (obj_request_known_test(obj_request))
2979 2980 2981
		return rbd_img_obj_parent_read_full(obj_request);

	/* We don't know whether the target exists.  Go find out. */
2982 2983 2984 2985

	return rbd_img_obj_exists_submit(obj_request);
}

A
Alex Elder 已提交
2986 2987 2988
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
2989
	struct rbd_obj_request *next_obj_request;
2990
	int ret = 0;
A
Alex Elder 已提交
2991

A
Alex Elder 已提交
2992
	dout("%s: img %p\n", __func__, img_request);
A
Alex Elder 已提交
2993

2994 2995
	rbd_img_request_get(img_request);
	for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2996
		ret = rbd_img_obj_request_submit(obj_request);
A
Alex Elder 已提交
2997
		if (ret)
2998
			goto out_put_ireq;
A
Alex Elder 已提交
2999 3000
	}

3001 3002 3003
out_put_ireq:
	rbd_img_request_put(img_request);
	return ret;
A
Alex Elder 已提交
3004
}
A
Alex Elder 已提交
3005 3006 3007 3008

static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
	struct rbd_obj_request *obj_request;
A
Alex Elder 已提交
3009 3010
	struct rbd_device *rbd_dev;
	u64 obj_end;
3011 3012
	u64 img_xferred;
	int img_result;
A
Alex Elder 已提交
3013 3014 3015

	rbd_assert(img_request_child_test(img_request));

3016 3017
	/* First get what we need from the image request and release it */

A
Alex Elder 已提交
3018
	obj_request = img_request->obj_request;
3019 3020 3021 3022 3023 3024 3025 3026 3027
	img_xferred = img_request->xferred;
	img_result = img_request->result;
	rbd_img_request_put(img_request);

	/*
	 * If the overlap has become 0 (most likely because the
	 * image has been flattened) we need to re-submit the
	 * original request.
	 */
A
Alex Elder 已提交
3028 3029
	rbd_assert(obj_request);
	rbd_assert(obj_request->img_request);
3030 3031 3032 3033 3034 3035 3036 3037 3038
	rbd_dev = obj_request->img_request->rbd_dev;
	if (!rbd_dev->parent_overlap) {
		struct ceph_osd_client *osdc;

		osdc = &rbd_dev->rbd_client->client->osdc;
		img_result = rbd_obj_request_submit(osdc, obj_request);
		if (!img_result)
			return;
	}
A
Alex Elder 已提交
3039

3040
	obj_request->result = img_result;
A
Alex Elder 已提交
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
	if (obj_request->result)
		goto out;

	/*
	 * We need to zero anything beyond the parent overlap
	 * boundary.  Since rbd_img_obj_request_read_callback()
	 * will zero anything beyond the end of a short read, an
	 * easy way to do this is to pretend the data from the
	 * parent came up short--ending at the overlap boundary.
	 */
	rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
	obj_end = obj_request->img_offset + obj_request->length;
	if (obj_end > rbd_dev->parent_overlap) {
		u64 xferred = 0;

		if (obj_request->img_offset < rbd_dev->parent_overlap)
			xferred = rbd_dev->parent_overlap -
					obj_request->img_offset;
A
Alex Elder 已提交
3059

3060
		obj_request->xferred = min(img_xferred, xferred);
A
Alex Elder 已提交
3061
	} else {
3062
		obj_request->xferred = img_xferred;
A
Alex Elder 已提交
3063 3064
	}
out:
A
Alex Elder 已提交
3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
	rbd_img_obj_request_read_callback(obj_request);
	rbd_obj_request_complete(obj_request);
}

static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
	struct rbd_img_request *img_request;
	int result;

	rbd_assert(obj_request_img_data_test(obj_request));
	rbd_assert(obj_request->img_request != NULL);
	rbd_assert(obj_request->result == (s32) -ENOENT);
3077
	rbd_assert(obj_request_type_valid(obj_request->type));
A
Alex Elder 已提交
3078 3079

	/* rbd_read_finish(obj_request, obj_request->length); */
3080
	img_request = rbd_parent_request_create(obj_request,
A
Alex Elder 已提交
3081
						obj_request->img_offset,
3082
						obj_request->length);
A
Alex Elder 已提交
3083 3084 3085 3086
	result = -ENOMEM;
	if (!img_request)
		goto out_err;

3087 3088 3089 3090 3091 3092
	if (obj_request->type == OBJ_REQUEST_BIO)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
						obj_request->bio_list);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
						obj_request->pages);
A
Alex Elder 已提交
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108
	if (result)
		goto out_err;

	img_request->callback = rbd_img_parent_read_callback;
	result = rbd_img_request_submit(img_request);
	if (result)
		goto out_err;

	return;
out_err:
	if (img_request)
		rbd_img_request_put(img_request);
	obj_request->result = result;
	obj_request->xferred = 0;
	obj_request_done_set(obj_request);
}
A
Alex Elder 已提交
3109

3110
static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
A
Alex Elder 已提交
3111 3112
{
	struct rbd_obj_request *obj_request;
3113
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
A
Alex Elder 已提交
3114 3115 3116 3117 3118 3119 3120 3121
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
							OBJ_REQUEST_NODATA);
	if (!obj_request)
		return -ENOMEM;

	ret = -ENOMEM;
G
Guangliang Zhao 已提交
3122
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3123
						  obj_request);
A
Alex Elder 已提交
3124 3125 3126
	if (!obj_request->osd_req)
		goto out;

3127
	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
A
Alex Elder 已提交
3128
					notify_id, 0, 0);
3129
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3130

A
Alex Elder 已提交
3131
	ret = rbd_obj_request_submit(osdc, obj_request);
A
Alex Elder 已提交
3132
	if (ret)
3133 3134 3135 3136
		goto out;
	ret = rbd_obj_request_wait(obj_request);
out:
	rbd_obj_request_put(obj_request);
A
Alex Elder 已提交
3137 3138 3139 3140 3141 3142 3143

	return ret;
}

static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
	struct rbd_device *rbd_dev = (struct rbd_device *)data;
3144
	int ret;
A
Alex Elder 已提交
3145

A
Alex Elder 已提交
3146
	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
A
Alex Elder 已提交
3147 3148
		rbd_dev->header_name, (unsigned long long)notify_id,
		(unsigned int)opcode);
3149 3150 3151 3152 3153 3154 3155

	/*
	 * Until adequate refresh error handling is in place, there is
	 * not much we can do here, except warn.
	 *
	 * See http://tracker.ceph.com/issues/5040
	 */
3156 3157
	ret = rbd_dev_refresh(rbd_dev);
	if (ret)
3158
		rbd_warn(rbd_dev, "refresh failed: %d", ret);
A
Alex Elder 已提交
3159

3160 3161
	ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
	if (ret)
3162
		rbd_warn(rbd_dev, "notify_ack ret %d", ret);
A
Alex Elder 已提交
3163 3164
}

3165 3166 3167 3168 3169 3170 3171 3172 3173
/*
 * Send a (un)watch request and wait for the ack.  Return a request
 * with a ref held on success or error.
 */
static struct rbd_obj_request *rbd_obj_watch_request_helper(
						struct rbd_device *rbd_dev,
						bool watch)
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3174
	struct ceph_options *opts = osdc->client->options;
3175 3176 3177 3178 3179 3180 3181 3182
	struct rbd_obj_request *obj_request;
	int ret;

	obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
					     OBJ_REQUEST_NODATA);
	if (!obj_request)
		return ERR_PTR(-ENOMEM);

G
Guangliang Zhao 已提交
3183
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
						  obj_request);
	if (!obj_request->osd_req) {
		ret = -ENOMEM;
		goto out;
	}

	osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
			      rbd_dev->watch_event->cookie, 0, watch);
	rbd_osd_req_format_write(obj_request);

	if (watch)
		ceph_osdc_set_request_linger(osdc, obj_request->osd_req);

	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;

3201
	ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret) {
		if (watch)
			rbd_obj_request_end(obj_request);
		goto out;
	}

	return obj_request;

out:
	rbd_obj_request_put(obj_request);
	return ERR_PTR(ret);
}

3219
/*
3220
 * Initiate a watch request, synchronously.
3221
 */
3222
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3223 3224 3225 3226 3227
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_obj_request *obj_request;
	int ret;

3228 3229
	rbd_assert(!rbd_dev->watch_event);
	rbd_assert(!rbd_dev->watch_request);
3230

3231 3232 3233 3234 3235
	ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
				     &rbd_dev->watch_event);
	if (ret < 0)
		return ret;

3236 3237 3238 3239 3240
	obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
	if (IS_ERR(obj_request)) {
		ceph_osdc_cancel_event(rbd_dev->watch_event);
		rbd_dev->watch_event = NULL;
		return PTR_ERR(obj_request);
3241
	}
3242

3243 3244 3245 3246
	/*
	 * A watch request is set to linger, so the underlying osd
	 * request won't go away until we unregister it.  We retain
	 * a pointer to the object request during that time (in
3247 3248 3249
	 * rbd_dev->watch_request), so we'll keep a reference to it.
	 * We'll drop that reference after we've unregistered it in
	 * rbd_dev_header_unwatch_sync().
3250
	 */
3251
	rbd_dev->watch_request = obj_request;
3252

3253 3254 3255 3256 3257 3258
	return 0;
}

/*
 * Tear down a watch request, synchronously.
 */
3259
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3260 3261 3262 3263 3264 3265
{
	struct rbd_obj_request *obj_request;

	rbd_assert(rbd_dev->watch_event);
	rbd_assert(rbd_dev->watch_request);

3266
	rbd_obj_request_end(rbd_dev->watch_request);
3267 3268
	rbd_obj_request_put(rbd_dev->watch_request);
	rbd_dev->watch_request = NULL;
3269

3270 3271 3272 3273 3274 3275 3276
	obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
	if (!IS_ERR(obj_request))
		rbd_obj_request_put(obj_request);
	else
		rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
			 PTR_ERR(obj_request));

3277 3278
	ceph_osdc_cancel_event(rbd_dev->watch_event);
	rbd_dev->watch_event = NULL;
I
Ilya Dryomov 已提交
3279 3280 3281

	dout("%s flushing notifies\n", __func__);
	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3282 3283
}

3284
/*
3285 3286
 * Synchronous osd object method call.  Returns the number of bytes
 * returned in the outbound buffer, or a negative error code.
3287 3288 3289 3290 3291
 */
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
			     const char *object_name,
			     const char *class_name,
			     const char *method_name,
3292
			     const void *outbound,
3293
			     size_t outbound_size,
3294
			     void *inbound,
3295
			     size_t inbound_size)
3296
{
3297
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3298 3299 3300 3301 3302 3303
	struct rbd_obj_request *obj_request;
	struct page **pages;
	u32 page_count;
	int ret;

	/*
3304 3305 3306 3307 3308
	 * Method calls are ultimately read operations.  The result
	 * should placed into the inbound buffer provided.  They
	 * also supply outbound data--parameters for the object
	 * method.  Currently if this is present it will be a
	 * snapshot id.
3309
	 */
3310
	page_count = (u32)calc_pages_for(0, inbound_size);
3311 3312 3313 3314 3315
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = -ENOMEM;
3316
	obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3317 3318 3319 3320 3321 3322 3323
							OBJ_REQUEST_PAGES);
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3324
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3325
						  obj_request);
3326 3327 3328
	if (!obj_request->osd_req)
		goto out;

3329
	osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
					class_name, method_name);
	if (outbound_size) {
		struct ceph_pagelist *pagelist;

		pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
		if (!pagelist)
			goto out;

		ceph_pagelist_init(pagelist);
		ceph_pagelist_append(pagelist, outbound, outbound_size);
		osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
						pagelist);
	}
3343 3344
	osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
					obj_request->pages, inbound_size,
3345
					0, false, false);
3346
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3347

3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3358 3359 3360

	rbd_assert(obj_request->xferred < (u64)INT_MAX);
	ret = (int)obj_request->xferred;
3361
	ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3362 3363 3364 3365 3366 3367 3368 3369 3370
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

C
Christoph Hellwig 已提交
3371
static void rbd_queue_workfn(struct work_struct *work)
A
Alex Elder 已提交
3372
{
C
Christoph Hellwig 已提交
3373 3374
	struct request *rq = blk_mq_rq_from_pdu(work);
	struct rbd_device *rbd_dev = rq->q->queuedata;
I
Ilya Dryomov 已提交
3375
	struct rbd_img_request *img_request;
3376
	struct ceph_snap_context *snapc = NULL;
I
Ilya Dryomov 已提交
3377 3378
	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
	u64 length = blk_rq_bytes(rq);
G
Guangliang Zhao 已提交
3379
	enum obj_operation_type op_type;
3380
	u64 mapping_size;
A
Alex Elder 已提交
3381 3382
	int result;

C
Christoph Hellwig 已提交
3383 3384 3385 3386 3387 3388 3389
	if (rq->cmd_type != REQ_TYPE_FS) {
		dout("%s: non-fs request type %d\n", __func__,
			(int) rq->cmd_type);
		result = -EIO;
		goto err;
	}

3390 3391 3392
	if (rq->cmd_flags & REQ_DISCARD)
		op_type = OBJ_OP_DISCARD;
	else if (rq->cmd_flags & REQ_WRITE)
G
Guangliang Zhao 已提交
3393 3394 3395 3396
		op_type = OBJ_OP_WRITE;
	else
		op_type = OBJ_OP_READ;

I
Ilya Dryomov 已提交
3397
	/* Ignore/skip any zero-length requests */
A
Alex Elder 已提交
3398

I
Ilya Dryomov 已提交
3399 3400 3401 3402 3403
	if (!length) {
		dout("%s: zero-length request\n", __func__);
		result = 0;
		goto err_rq;
	}
A
Alex Elder 已提交
3404

G
Guangliang Zhao 已提交
3405
	/* Only reads are allowed to a read-only device */
I
Ilya Dryomov 已提交
3406

G
Guangliang Zhao 已提交
3407
	if (op_type != OBJ_OP_READ) {
I
Ilya Dryomov 已提交
3408 3409 3410
		if (rbd_dev->mapping.read_only) {
			result = -EROFS;
			goto err_rq;
A
Alex Elder 已提交
3411
		}
I
Ilya Dryomov 已提交
3412 3413
		rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
	}
A
Alex Elder 已提交
3414

I
Ilya Dryomov 已提交
3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
	/*
	 * Quit early if the mapped snapshot no longer exists.  It's
	 * still possible the snapshot will have disappeared by the
	 * time our request arrives at the osd, but there's no sense in
	 * sending it if we already know.
	 */
	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
		dout("request for non-existent snapshot");
		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
		result = -ENXIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3427

I
Ilya Dryomov 已提交
3428 3429 3430 3431 3432 3433
	if (offset && length > U64_MAX - offset + 1) {
		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
			 length);
		result = -EINVAL;
		goto err_rq;	/* Shouldn't happen */
	}
A
Alex Elder 已提交
3434

C
Christoph Hellwig 已提交
3435 3436
	blk_mq_start_request(rq);

3437 3438
	down_read(&rbd_dev->header_rwsem);
	mapping_size = rbd_dev->mapping.size;
G
Guangliang Zhao 已提交
3439
	if (op_type != OBJ_OP_READ) {
3440 3441 3442 3443 3444 3445
		snapc = rbd_dev->header.snapc;
		ceph_get_snap_context(snapc);
	}
	up_read(&rbd_dev->header_rwsem);

	if (offset + length > mapping_size) {
I
Ilya Dryomov 已提交
3446
		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3447
			 length, mapping_size);
I
Ilya Dryomov 已提交
3448 3449 3450
		result = -EIO;
		goto err_rq;
	}
A
Alex Elder 已提交
3451

G
Guangliang Zhao 已提交
3452
	img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3453
					     snapc);
I
Ilya Dryomov 已提交
3454 3455 3456 3457 3458
	if (!img_request) {
		result = -ENOMEM;
		goto err_rq;
	}
	img_request->rq = rq;
3459
	snapc = NULL; /* img_request consumes a ref */
A
Alex Elder 已提交
3460

3461 3462 3463 3464 3465 3466
	if (op_type == OBJ_OP_DISCARD)
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
					      NULL);
	else
		result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
					      rq->bio);
I
Ilya Dryomov 已提交
3467 3468
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3469

I
Ilya Dryomov 已提交
3470 3471 3472
	result = rbd_img_request_submit(img_request);
	if (result)
		goto err_img_request;
A
Alex Elder 已提交
3473

I
Ilya Dryomov 已提交
3474
	return;
A
Alex Elder 已提交
3475

I
Ilya Dryomov 已提交
3476 3477 3478 3479 3480
err_img_request:
	rbd_img_request_put(img_request);
err_rq:
	if (result)
		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
G
Guangliang Zhao 已提交
3481
			 obj_op_name(op_type), length, offset, result);
3482
	ceph_put_snap_context(snapc);
C
Christoph Hellwig 已提交
3483 3484
err:
	blk_mq_end_request(rq, result);
I
Ilya Dryomov 已提交
3485
}
A
Alex Elder 已提交
3486

C
Christoph Hellwig 已提交
3487 3488
static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
I
Ilya Dryomov 已提交
3489
{
C
Christoph Hellwig 已提交
3490 3491
	struct request *rq = bd->rq;
	struct work_struct *work = blk_mq_rq_to_pdu(rq);
A
Alex Elder 已提交
3492

C
Christoph Hellwig 已提交
3493 3494
	queue_work(rbd_wq, work);
	return BLK_MQ_RQ_QUEUE_OK;
A
Alex Elder 已提交
3495 3496
}

3497 3498 3499 3500 3501 3502 3503
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk = rbd_dev->disk;

	if (!disk)
		return;

3504 3505
	rbd_dev->disk = NULL;
	if (disk->flags & GENHD_FL_UP) {
3506
		del_gendisk(disk);
3507 3508
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
C
Christoph Hellwig 已提交
3509
		blk_mq_free_tag_set(&rbd_dev->tag_set);
3510
	}
3511 3512 3513
	put_disk(disk);
}

3514 3515
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
				const char *object_name,
3516
				u64 offset, u64 length, void *buf)
3517 3518

{
3519
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3520 3521 3522
	struct rbd_obj_request *obj_request;
	struct page **pages = NULL;
	u32 page_count;
3523
	size_t size;
3524 3525 3526 3527 3528
	int ret;

	page_count = (u32) calc_pages_for(offset, length);
	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
	if (IS_ERR(pages))
3529
		return PTR_ERR(pages);
3530 3531 3532

	ret = -ENOMEM;
	obj_request = rbd_obj_request_create(object_name, offset, length,
3533
							OBJ_REQUEST_PAGES);
3534 3535 3536 3537 3538 3539
	if (!obj_request)
		goto out;

	obj_request->pages = pages;
	obj_request->page_count = page_count;

G
Guangliang Zhao 已提交
3540
	obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3541
						  obj_request);
3542 3543 3544
	if (!obj_request->osd_req)
		goto out;

3545 3546
	osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
					offset, length, 0, 0);
3547
	osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3548
					obj_request->pages,
3549 3550 3551
					obj_request->length,
					obj_request->offset & ~PAGE_MASK,
					false, false);
3552
	rbd_osd_req_format_read(obj_request);
A
Alex Elder 已提交
3553

3554 3555 3556 3557 3558 3559 3560 3561 3562 3563
	ret = rbd_obj_request_submit(osdc, obj_request);
	if (ret)
		goto out;
	ret = rbd_obj_request_wait(obj_request);
	if (ret)
		goto out;

	ret = obj_request->result;
	if (ret < 0)
		goto out;
3564 3565 3566

	rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
	size = (size_t) obj_request->xferred;
3567
	ceph_copy_from_page_vector(pages, buf, 0, size);
3568 3569
	rbd_assert(size <= (size_t)INT_MAX);
	ret = (int)size;
3570 3571 3572 3573 3574 3575 3576 3577 3578
out:
	if (obj_request)
		rbd_obj_request_put(obj_request);
	else
		ceph_release_page_vector(pages, page_count);

	return ret;
}

3579
/*
A
Alex Elder 已提交
3580 3581 3582
 * Read the complete header for the given rbd device.  On successful
 * return, the rbd_dev->header field will contain up-to-date
 * information about the image.
3583
 */
3584
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3585
{
3586
	struct rbd_image_header_ondisk *ondisk = NULL;
3587
	u32 snap_count = 0;
3588 3589 3590
	u64 names_size = 0;
	u32 want_count;
	int ret;
3591

A
Alex Elder 已提交
3592
	/*
3593 3594 3595 3596 3597
	 * The complete header will include an array of its 64-bit
	 * snapshot ids, followed by the names of those snapshots as
	 * a contiguous block of NUL-terminated strings.  Note that
	 * the number of snapshots could change by the time we read
	 * it in, in which case we re-read it.
A
Alex Elder 已提交
3598
	 */
3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
	do {
		size_t size;

		kfree(ondisk);

		size = sizeof (*ondisk);
		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
		size += names_size;
		ondisk = kmalloc(size, GFP_KERNEL);
		if (!ondisk)
A
Alex Elder 已提交
3609
			return -ENOMEM;
3610

3611
		ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3612
				       0, size, ondisk);
3613
		if (ret < 0)
A
Alex Elder 已提交
3614
			goto out;
A
Alex Elder 已提交
3615
		if ((size_t)ret < size) {
3616
			ret = -ENXIO;
A
Alex Elder 已提交
3617 3618
			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
				size, ret);
A
Alex Elder 已提交
3619
			goto out;
3620 3621 3622
		}
		if (!rbd_dev_ondisk_valid(ondisk)) {
			ret = -ENXIO;
A
Alex Elder 已提交
3623
			rbd_warn(rbd_dev, "invalid header");
A
Alex Elder 已提交
3624
			goto out;
3625
		}
3626

3627 3628 3629 3630
		names_size = le64_to_cpu(ondisk->snap_names_len);
		want_count = snap_count;
		snap_count = le32_to_cpu(ondisk->snap_count);
	} while (snap_count != want_count);
A
Alex Elder 已提交
3631

A
Alex Elder 已提交
3632 3633
	ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
3634 3635 3636
	kfree(ondisk);

	return ret;
3637 3638
}

3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657
/*
 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
 * has disappeared from the (just updated) snapshot context.
 */
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
	u64 snap_id;

	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
		return;

	snap_id = rbd_dev->spec->snap_id;
	if (snap_id == CEPH_NOSNAP)
		return;

	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}

3658 3659 3660 3661 3662
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
	sector_t size;

	/*
I
Ilya Dryomov 已提交
3663 3664 3665
	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
	 * try to update its size.  If REMOVING is set, updating size
	 * is just useless work since the device can't be opened.
3666
	 */
I
Ilya Dryomov 已提交
3667 3668
	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3669 3670 3671 3672 3673 3674 3675
		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
		dout("setting size to %llu sectors", (unsigned long long)size);
		set_capacity(rbd_dev->disk, size);
		revalidate_disk(rbd_dev->disk);
	}
}

A
Alex Elder 已提交
3676
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
3677
{
3678
	u64 mapping_size;
A
Alex Elder 已提交
3679 3680
	int ret;

3681
	down_write(&rbd_dev->header_rwsem);
3682
	mapping_size = rbd_dev->mapping.size;
3683 3684

	ret = rbd_dev_header_info(rbd_dev);
3685
	if (ret)
3686
		goto out;
3687

3688 3689 3690 3691 3692 3693 3694
	/*
	 * If there is a parent, see if it has disappeared due to the
	 * mapped image getting flattened.
	 */
	if (rbd_dev->parent) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
3695
			goto out;
3696 3697
	}

3698
	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3699
		rbd_dev->mapping.size = rbd_dev->header.image_size;
3700 3701 3702 3703
	} else {
		/* validate mapped snapshot's EXISTS flag */
		rbd_exists_validate(rbd_dev);
	}
3704

3705
out:
3706
	up_write(&rbd_dev->header_rwsem);
3707
	if (!ret && mapping_size != rbd_dev->mapping.size)
3708
		rbd_dev_update_size(rbd_dev);
A
Alex Elder 已提交
3709

3710
	return ret;
A
Alex Elder 已提交
3711 3712
}

C
Christoph Hellwig 已提交
3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
static int rbd_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
{
	struct work_struct *work = blk_mq_rq_to_pdu(rq);

	INIT_WORK(work, rbd_queue_workfn);
	return 0;
}

static struct blk_mq_ops rbd_mq_ops = {
	.queue_rq	= rbd_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.init_request	= rbd_init_request,
};

3729 3730 3731 3732
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
	struct gendisk *disk;
	struct request_queue *q;
A
Alex Elder 已提交
3733
	u64 segment_size;
C
Christoph Hellwig 已提交
3734
	int err;
3735 3736

	/* create gendisk info */
3737 3738 3739
	disk = alloc_disk(single_major ?
			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
			  RBD_MINORS_PER_MAJOR);
3740
	if (!disk)
A
Alex Elder 已提交
3741
		return -ENOMEM;
3742

A
Alex Elder 已提交
3743
	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
A
Alex Elder 已提交
3744
		 rbd_dev->dev_id);
3745
	disk->major = rbd_dev->major;
3746
	disk->first_minor = rbd_dev->minor;
3747 3748
	if (single_major)
		disk->flags |= GENHD_FL_EXT_DEVT;
3749 3750 3751
	disk->fops = &rbd_bd_ops;
	disk->private_data = rbd_dev;

C
Christoph Hellwig 已提交
3752 3753
	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
	rbd_dev->tag_set.ops = &rbd_mq_ops;
I
Ilya Dryomov 已提交
3754
	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
C
Christoph Hellwig 已提交
3755
	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
I
Ilya Dryomov 已提交
3756
	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
C
Christoph Hellwig 已提交
3757 3758 3759 3760 3761
	rbd_dev->tag_set.nr_hw_queues = 1;
	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);

	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
	if (err)
3762
		goto out_disk;
3763

C
Christoph Hellwig 已提交
3764 3765 3766 3767 3768 3769
	q = blk_mq_init_queue(&rbd_dev->tag_set);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}

3770 3771
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
A
Alex Elder 已提交
3772

3773
	/* set io sizes to object size */
A
Alex Elder 已提交
3774 3775
	segment_size = rbd_obj_bytes(&rbd_dev->header);
	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
I
Ilya Dryomov 已提交
3776
	q->limits.max_sectors = queue_max_hw_sectors(q);
I
Ilya Dryomov 已提交
3777
	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
A
Alex Elder 已提交
3778 3779 3780
	blk_queue_max_segment_size(q, segment_size);
	blk_queue_io_min(q, segment_size);
	blk_queue_io_opt(q, segment_size);
3781

3782 3783 3784 3785
	/* enable the discard support */
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.discard_granularity = segment_size;
	q->limits.discard_alignment = segment_size;
3786
	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3787
	q->limits.discard_zeroes_data = 1;
3788

3789 3790 3791
	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;

3792 3793 3794 3795 3796 3797 3798
	disk->queue = q;

	q->queuedata = rbd_dev;

	rbd_dev->disk = disk;

	return 0;
C
Christoph Hellwig 已提交
3799 3800
out_tag_set:
	blk_mq_free_tag_set(&rbd_dev->tag_set);
3801 3802
out_disk:
	put_disk(disk);
C
Christoph Hellwig 已提交
3803
	return err;
3804 3805
}

3806 3807 3808 3809
/*
  sysfs
*/

A
Alex Elder 已提交
3810 3811 3812 3813 3814
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
	return container_of(dev, struct rbd_device, dev);
}

3815 3816 3817
static ssize_t rbd_size_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3818
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3819

A
Alex Elder 已提交
3820 3821
	return sprintf(buf, "%llu\n",
		(unsigned long long)rbd_dev->mapping.size);
3822 3823
}

A
Alex Elder 已提交
3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
/*
 * Note this shows the features for whatever's mapped, which is not
 * necessarily the base image.
 */
static ssize_t rbd_features_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

	return sprintf(buf, "0x%016llx\n",
A
Alex Elder 已提交
3834
			(unsigned long long)rbd_dev->mapping.features);
A
Alex Elder 已提交
3835 3836
}

3837 3838 3839
static ssize_t rbd_major_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3840
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3841

A
Alex Elder 已提交
3842 3843 3844 3845
	if (rbd_dev->major)
		return sprintf(buf, "%d\n", rbd_dev->major);

	return sprintf(buf, "(none)\n");
3846 3847 3848 3849 3850 3851
}

static ssize_t rbd_minor_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
A
Alex Elder 已提交
3852

3853
	return sprintf(buf, "%d\n", rbd_dev->minor);
3854 3855 3856 3857
}

static ssize_t rbd_client_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
3858
{
A
Alex Elder 已提交
3859
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3860

3861 3862
	return sprintf(buf, "client%lld\n",
			ceph_client_id(rbd_dev->rbd_client->client));
3863 3864
}

3865 3866
static ssize_t rbd_pool_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
3867
{
A
Alex Elder 已提交
3868
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3869

3870
	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3871 3872
}

3873 3874 3875 3876 3877
static ssize_t rbd_pool_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3878
	return sprintf(buf, "%llu\n",
A
Alex Elder 已提交
3879
			(unsigned long long) rbd_dev->spec->pool_id);
3880 3881
}

3882 3883 3884
static ssize_t rbd_name_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
A
Alex Elder 已提交
3885
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3886

A
Alex Elder 已提交
3887 3888 3889 3890
	if (rbd_dev->spec->image_name)
		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);

	return sprintf(buf, "(unknown)\n");
3891 3892
}

A
Alex Elder 已提交
3893 3894 3895 3896 3897
static ssize_t rbd_image_id_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);

3898
	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
A
Alex Elder 已提交
3899 3900
}

A
Alex Elder 已提交
3901 3902 3903 3904
/*
 * Shows the name of the currently-mapped snapshot (or
 * RBD_SNAP_HEAD_NAME for the base image).
 */
3905 3906 3907 3908
static ssize_t rbd_snap_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
A
Alex Elder 已提交
3909
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3910

3911
	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3912 3913
}

3914
/*
3915 3916 3917
 * For a v2 image, shows the chain of parent images, separated by empty
 * lines.  For v1 images or if there is no parent, shows "(no parent
 * image)".
3918 3919
 */
static ssize_t rbd_parent_show(struct device *dev,
3920 3921
			       struct device_attribute *attr,
			       char *buf)
3922 3923
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3924
	ssize_t count = 0;
3925

3926
	if (!rbd_dev->parent)
3927 3928
		return sprintf(buf, "(no parent image)\n");

3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944
	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
		struct rbd_spec *spec = rbd_dev->parent_spec;

		count += sprintf(&buf[count], "%s"
			    "pool_id %llu\npool_name %s\n"
			    "image_id %s\nimage_name %s\n"
			    "snap_id %llu\nsnap_name %s\n"
			    "overlap %llu\n",
			    !count ? "" : "\n", /* first? */
			    spec->pool_id, spec->pool_name,
			    spec->image_id, spec->image_name ?: "(unknown)",
			    spec->snap_id, spec->snap_name,
			    rbd_dev->parent_overlap);
	}

	return count;
3945 3946
}

3947 3948 3949 3950 3951
static ssize_t rbd_image_refresh(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t size)
{
A
Alex Elder 已提交
3952
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3953
	int ret;
3954

A
Alex Elder 已提交
3955
	ret = rbd_dev_refresh(rbd_dev);
3956
	if (ret)
3957
		return ret;
3958

3959
	return size;
3960
}
3961

3962
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
A
Alex Elder 已提交
3963
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3964
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3965
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3966 3967
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3968
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3969
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
A
Alex Elder 已提交
3970
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3971 3972
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3973
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3974 3975 3976

static struct attribute *rbd_attrs[] = {
	&dev_attr_size.attr,
A
Alex Elder 已提交
3977
	&dev_attr_features.attr,
3978
	&dev_attr_major.attr,
3979
	&dev_attr_minor.attr,
3980 3981
	&dev_attr_client_id.attr,
	&dev_attr_pool.attr,
3982
	&dev_attr_pool_id.attr,
3983
	&dev_attr_name.attr,
A
Alex Elder 已提交
3984
	&dev_attr_image_id.attr,
3985
	&dev_attr_current_snap.attr,
3986
	&dev_attr_parent.attr,
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999
	&dev_attr_refresh.attr,
	NULL
};

static struct attribute_group rbd_attr_group = {
	.attrs = rbd_attrs,
};

static const struct attribute_group *rbd_attr_groups[] = {
	&rbd_attr_group,
	NULL
};

4000
static void rbd_dev_release(struct device *dev);
4001 4002 4003 4004

static struct device_type rbd_device_type = {
	.name		= "rbd",
	.groups		= rbd_attr_groups,
4005
	.release	= rbd_dev_release,
4006 4007
};

4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
	kref_get(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
	if (spec)
		kref_put(&spec->kref, rbd_spec_free);
}

static struct rbd_spec *rbd_spec_alloc(void)
{
	struct rbd_spec *spec;

	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
	if (!spec)
		return NULL;
4029 4030 4031

	spec->pool_id = CEPH_NOPOOL;
	spec->snap_id = CEPH_NOSNAP;
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047
	kref_init(&spec->kref);

	return spec;
}

static void rbd_spec_free(struct kref *kref)
{
	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);

	kfree(spec->pool_name);
	kfree(spec->image_id);
	kfree(spec->image_name);
	kfree(spec->snap_name);
	kfree(spec);
}

4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066
static void rbd_dev_release(struct device *dev)
{
	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
	bool need_put = !!rbd_dev->opts;

	rbd_put_client(rbd_dev->rbd_client);
	rbd_spec_put(rbd_dev->spec);
	kfree(rbd_dev->opts);
	kfree(rbd_dev);

	/*
	 * This is racy, but way better than putting module outside of
	 * the release callback.  The race window is pretty small, so
	 * doing something similar to dm (dm-builtin.c) is overkill.
	 */
	if (need_put)
		module_put(THIS_MODULE);
}

A
Alex Elder 已提交
4067
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4068 4069
					 struct rbd_spec *spec,
					 struct rbd_options *opts)
4070 4071 4072 4073 4074 4075 4076 4077
{
	struct rbd_device *rbd_dev;

	rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
	if (!rbd_dev)
		return NULL;

	spin_lock_init(&rbd_dev->lock);
4078
	rbd_dev->flags = 0;
4079
	atomic_set(&rbd_dev->parent_ref, 0);
4080 4081 4082
	INIT_LIST_HEAD(&rbd_dev->node);
	init_rwsem(&rbd_dev->header_rwsem);

4083 4084 4085 4086 4087
	rbd_dev->dev.bus = &rbd_bus_type;
	rbd_dev->dev.type = &rbd_device_type;
	rbd_dev->dev.parent = &rbd_root_dev;
	device_initialize(&rbd_dev->dev);

4088
	rbd_dev->rbd_client = rbdc;
4089 4090
	rbd_dev->spec = spec;
	rbd_dev->opts = opts;
4091

4092 4093 4094 4095 4096 4097 4098
	/* Initialize the layout used for all rbd requests */

	rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
	rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
	rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);

4099 4100 4101 4102 4103 4104 4105 4106
	/*
	 * If this is a mapping rbd_dev (as opposed to a parent one),
	 * pin our module.  We have a ref from do_rbd_add(), so use
	 * __module_get().
	 */
	if (rbd_dev->opts)
		__module_get(THIS_MODULE);

4107 4108 4109 4110 4111
	return rbd_dev;
}

static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
4112 4113
	if (rbd_dev)
		put_device(&rbd_dev->dev);
4114 4115
}

4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130
/*
 * Get the size and object order for an image snapshot, or if
 * snap_id is CEPH_NOSNAP, gets this information for the base
 * image.
 */
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
				u8 *order, u64 *snap_size)
{
	__le64 snapid = cpu_to_le64(snap_id);
	int ret;
	struct {
		u8 order;
		__le64 size;
	} __attribute__ ((packed)) size_buf = { 0 };

4131
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4132
				"rbd", "get_size",
4133
				&snapid, sizeof (snapid),
4134
				&size_buf, sizeof (size_buf));
4135
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4136 4137
	if (ret < 0)
		return ret;
4138 4139
	if (ret < sizeof (size_buf))
		return -ERANGE;
4140

J
Josh Durgin 已提交
4141
	if (order) {
4142
		*order = size_buf.order;
J
Josh Durgin 已提交
4143 4144
		dout("  order %u", (unsigned int)*order);
	}
4145 4146
	*snap_size = le64_to_cpu(size_buf.size);

J
Josh Durgin 已提交
4147 4148
	dout("  snap_id 0x%016llx snap_size = %llu\n",
		(unsigned long long)snap_id,
4149
		(unsigned long long)*snap_size);
4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160

	return 0;
}

static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
					&rbd_dev->header.obj_order,
					&rbd_dev->header.image_size);
}

4161 4162 4163 4164 4165 4166 4167 4168 4169 4170
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
	void *reply_buf;
	int ret;
	void *p;

	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4171
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4172
				"rbd", "get_object_prefix", NULL, 0,
4173
				reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4174
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4175 4176 4177 4178 4179
	if (ret < 0)
		goto out;

	p = reply_buf;
	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4180 4181
						p + ret, NULL, GFP_NOIO);
	ret = 0;
4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194

	if (IS_ERR(rbd_dev->header.object_prefix)) {
		ret = PTR_ERR(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	} else {
		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
	}
out:
	kfree(reply_buf);

	return ret;
}

4195 4196 4197 4198 4199 4200 4201
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
		u64 *snap_features)
{
	__le64 snapid = cpu_to_le64(snap_id);
	struct {
		__le64 features;
		__le64 incompat;
4202
	} __attribute__ ((packed)) features_buf = { 0 };
4203
	u64 unsup;
4204 4205
	int ret;

4206
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4207
				"rbd", "get_features",
4208
				&snapid, sizeof (snapid),
4209
				&features_buf, sizeof (features_buf));
4210
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4211 4212
	if (ret < 0)
		return ret;
4213 4214
	if (ret < sizeof (features_buf))
		return -ERANGE;
A
Alex Elder 已提交
4215

4216 4217 4218 4219
	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
	if (unsup) {
		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
			 unsup);
A
Alex Elder 已提交
4220
		return -ENXIO;
4221
	}
A
Alex Elder 已提交
4222

4223 4224 4225
	*snap_features = le64_to_cpu(features_buf.features);

	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4226 4227 4228
		(unsigned long long)snap_id,
		(unsigned long long)*snap_features,
		(unsigned long long)le64_to_cpu(features_buf.incompat));
4229 4230 4231 4232 4233 4234 4235 4236 4237 4238

	return 0;
}

static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
						&rbd_dev->header.features);
}

4239 4240 4241 4242 4243 4244 4245 4246
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
	struct rbd_spec *parent_spec;
	size_t size;
	void *reply_buf = NULL;
	__le64 snapid;
	void *p;
	void *end;
A
Alex Elder 已提交
4247
	u64 pool_id;
4248
	char *image_id;
4249
	u64 snap_id;
4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
	u64 overlap;
	int ret;

	parent_spec = rbd_spec_alloc();
	if (!parent_spec)
		return -ENOMEM;

	size = sizeof (__le64) +				/* pool_id */
		sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +	/* image_id */
		sizeof (__le64) +				/* snap_id */
		sizeof (__le64);				/* overlap */
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf) {
		ret = -ENOMEM;
		goto out_err;
	}

4267
	snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4268
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4269
				"rbd", "get_parent",
4270
				&snapid, sizeof (snapid),
4271
				reply_buf, size);
4272
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4273 4274 4275 4276
	if (ret < 0)
		goto out_err;

	p = reply_buf;
4277 4278
	end = reply_buf + ret;
	ret = -ERANGE;
A
Alex Elder 已提交
4279
	ceph_decode_64_safe(&p, end, pool_id, out_err);
4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296
	if (pool_id == CEPH_NOPOOL) {
		/*
		 * Either the parent never existed, or we have
		 * record of it but the image got flattened so it no
		 * longer has a parent.  When the parent of a
		 * layered image disappears we immediately set the
		 * overlap to 0.  The effect of this is that all new
		 * requests will be treated as if the image had no
		 * parent.
		 */
		if (rbd_dev->parent_overlap) {
			rbd_dev->parent_overlap = 0;
			rbd_dev_parent_put(rbd_dev);
			pr_info("%s: clone image has been flattened\n",
				rbd_dev->disk->disk_name);
		}

4297
		goto out;	/* No parent?  No problem. */
4298
	}
4299

4300 4301 4302
	/* The ceph file layout needs to fit pool id in 32 bits */

	ret = -EIO;
A
Alex Elder 已提交
4303
	if (pool_id > (u64)U32_MAX) {
4304
		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
A
Alex Elder 已提交
4305
			(unsigned long long)pool_id, U32_MAX);
4306
		goto out_err;
A
Alex Elder 已提交
4307
	}
4308

A
Alex Elder 已提交
4309
	image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4310 4311 4312 4313
	if (IS_ERR(image_id)) {
		ret = PTR_ERR(image_id);
		goto out_err;
	}
4314
	ceph_decode_64_safe(&p, end, snap_id, out_err);
4315 4316
	ceph_decode_64_safe(&p, end, overlap, out_err);

4317 4318 4319 4320 4321 4322 4323 4324 4325
	/*
	 * The parent won't change (except when the clone is
	 * flattened, already handled that).  So we only need to
	 * record the parent spec we have not already done so.
	 */
	if (!rbd_dev->parent_spec) {
		parent_spec->pool_id = pool_id;
		parent_spec->image_id = image_id;
		parent_spec->snap_id = snap_id;
A
Alex Elder 已提交
4326 4327
		rbd_dev->parent_spec = parent_spec;
		parent_spec = NULL;	/* rbd_dev now owns this */
4328 4329
	} else {
		kfree(image_id);
4330 4331 4332
	}

	/*
4333 4334
	 * We always update the parent overlap.  If it's zero we issue
	 * a warning, as we will proceed as if there was no parent.
4335 4336 4337
	 */
	if (!overlap) {
		if (parent_spec) {
4338 4339 4340 4341
			/* refresh, careful to warn just once */
			if (rbd_dev->parent_overlap)
				rbd_warn(rbd_dev,
				    "clone now standalone (overlap became 0)");
4342
		} else {
4343 4344
			/* initial probe */
			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4345
		}
A
Alex Elder 已提交
4346
	}
4347 4348
	rbd_dev->parent_overlap = overlap;

4349 4350 4351 4352 4353 4354 4355 4356 4357
out:
	ret = 0;
out_err:
	kfree(reply_buf);
	rbd_spec_put(parent_spec);

	return ret;
}

4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
	struct {
		__le64 stripe_unit;
		__le64 stripe_count;
	} __attribute__ ((packed)) striping_info_buf = { 0 };
	size_t size = sizeof (striping_info_buf);
	void *p;
	u64 obj_size;
	u64 stripe_unit;
	u64 stripe_count;
	int ret;

	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
				"rbd", "get_stripe_unit_count", NULL, 0,
4373
				(char *)&striping_info_buf, size);
4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
	if (ret < 0)
		return ret;
	if (ret < size)
		return -ERANGE;

	/*
	 * We don't actually support the "fancy striping" feature
	 * (STRIPINGV2) yet, but if the striping sizes are the
	 * defaults the behavior is the same as before.  So find
	 * out, and only fail if the image has non-default values.
	 */
	ret = -EINVAL;
	obj_size = (u64)1 << rbd_dev->header.obj_order;
	p = &striping_info_buf;
	stripe_unit = ceph_decode_64(&p);
	if (stripe_unit != obj_size) {
		rbd_warn(rbd_dev, "unsupported stripe unit "
				"(got %llu want %llu)",
				stripe_unit, obj_size);
		return -EINVAL;
	}
	stripe_count = ceph_decode_64(&p);
	if (stripe_count != 1) {
		rbd_warn(rbd_dev, "unsupported stripe count "
				"(got %llu want 1)", stripe_count);
		return -EINVAL;
	}
4402 4403
	rbd_dev->header.stripe_unit = stripe_unit;
	rbd_dev->header.stripe_count = stripe_count;
4404 4405 4406 4407

	return 0;
}

4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
	size_t image_id_size;
	char *image_id;
	void *p;
	void *end;
	size_t size;
	void *reply_buf = NULL;
	size_t len = 0;
	char *image_name = NULL;
	int ret;

	rbd_assert(!rbd_dev->spec->image_name);

A
Alex Elder 已提交
4422 4423
	len = strlen(rbd_dev->spec->image_id);
	image_id_size = sizeof (__le32) + len;
4424 4425 4426 4427 4428
	image_id = kmalloc(image_id_size, GFP_KERNEL);
	if (!image_id)
		return NULL;

	p = image_id;
4429
	end = image_id + image_id_size;
4430
	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4431 4432 4433 4434 4435 4436

	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		goto out;

4437
	ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4438 4439
				"rbd", "dir_get_name",
				image_id, image_id_size,
4440
				reply_buf, size);
4441 4442 4443
	if (ret < 0)
		goto out;
	p = reply_buf;
4444 4445
	end = reply_buf + ret;

4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457
	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
	if (IS_ERR(image_name))
		image_name = NULL;
	else
		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
	kfree(reply_buf);
	kfree(image_id);

	return image_name;
}

4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	const char *snap_name;
	u32 which = 0;

	/* Skip over names until we find the one we are looking for */

	snap_name = rbd_dev->header.snap_names;
	while (which < snapc->num_snaps) {
		if (!strcmp(name, snap_name))
			return snapc->snaps[which];
		snap_name += strlen(snap_name) + 1;
		which++;
	}
	return CEPH_NOSNAP;
}

static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
	u32 which;
	bool found = false;
	u64 snap_id;

	for (which = 0; !found && which < snapc->num_snaps; which++) {
		const char *snap_name;

		snap_id = snapc->snaps[which];
		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4488 4489 4490 4491 4492 4493 4494
		if (IS_ERR(snap_name)) {
			/* ignore no-longer existing snapshots */
			if (PTR_ERR(snap_name) == -ENOENT)
				continue;
			else
				break;
		}
4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
		found = !strcmp(name, snap_name);
		kfree(snap_name);
	}
	return found ? snap_id : CEPH_NOSNAP;
}

/*
 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
 * no snapshot by that name is found, or if an error occurs.
 */
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
	if (rbd_dev->image_format == 1)
		return rbd_v1_snap_id_by_name(rbd_dev, name);

	return rbd_v2_snap_id_by_name(rbd_dev, name);
}

4513
/*
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540
 * An image being mapped will have everything but the snap id.
 */
static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;

	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
	rbd_assert(spec->image_id && spec->image_name);
	rbd_assert(spec->snap_name);

	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
		u64 snap_id;

		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
		if (snap_id == CEPH_NOSNAP)
			return -ENOENT;

		spec->snap_id = snap_id;
	} else {
		spec->snap_id = CEPH_NOSNAP;
	}

	return 0;
}

/*
 * A parent image will have all ids but none of the names.
4541
 *
4542 4543
 * All names in an rbd spec are dynamically allocated.  It's OK if we
 * can't figure out the name for an image id.
4544
 */
4545
static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4546
{
4547 4548 4549 4550 4551
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct rbd_spec *spec = rbd_dev->spec;
	const char *pool_name;
	const char *image_name;
	const char *snap_name;
4552 4553
	int ret;

4554 4555 4556
	rbd_assert(spec->pool_id != CEPH_NOPOOL);
	rbd_assert(spec->image_id);
	rbd_assert(spec->snap_id != CEPH_NOSNAP);
4557

4558
	/* Get the pool name; we have to make our own copy of this */
4559

4560 4561 4562
	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
	if (!pool_name) {
		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4563 4564
		return -EIO;
	}
4565 4566
	pool_name = kstrdup(pool_name, GFP_KERNEL);
	if (!pool_name)
4567 4568 4569 4570
		return -ENOMEM;

	/* Fetch the image name; tolerate failure here */

4571 4572
	image_name = rbd_dev_image_name(rbd_dev);
	if (!image_name)
A
Alex Elder 已提交
4573
		rbd_warn(rbd_dev, "unable to get image name");
4574

4575
	/* Fetch the snapshot name */
4576

4577
	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4578 4579
	if (IS_ERR(snap_name)) {
		ret = PTR_ERR(snap_name);
4580
		goto out_err;
4581 4582 4583 4584 4585
	}

	spec->pool_name = pool_name;
	spec->image_name = image_name;
	spec->snap_name = snap_name;
4586 4587

	return 0;
4588

4589
out_err:
4590 4591
	kfree(image_name);
	kfree(pool_name);
4592 4593 4594
	return ret;
}

A
Alex Elder 已提交
4595
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618
{
	size_t size;
	int ret;
	void *reply_buf;
	void *p;
	void *end;
	u64 seq;
	u32 snap_count;
	struct ceph_snap_context *snapc;
	u32 i;

	/*
	 * We'll need room for the seq value (maximum snapshot id),
	 * snapshot count, and array of that many snapshot ids.
	 * For now we have a fixed upper limit on the number we're
	 * prepared to receive.
	 */
	size = sizeof (__le64) + sizeof (__le32) +
			RBD_MAX_SNAP_COUNT * sizeof (__le64);
	reply_buf = kzalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return -ENOMEM;

4619
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4620
				"rbd", "get_snapcontext", NULL, 0,
4621
				reply_buf, size);
4622
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4623 4624 4625 4626
	if (ret < 0)
		goto out;

	p = reply_buf;
4627 4628
	end = reply_buf + ret;
	ret = -ERANGE;
4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644
	ceph_decode_64_safe(&p, end, seq, out);
	ceph_decode_32_safe(&p, end, snap_count, out);

	/*
	 * Make sure the reported number of snapshot ids wouldn't go
	 * beyond the end of our buffer.  But before checking that,
	 * make sure the computed size of the snapshot context we
	 * allocate is representable in a size_t.
	 */
	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
				 / sizeof (u64)) {
		ret = -EINVAL;
		goto out;
	}
	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
		goto out;
4645
	ret = 0;
4646

4647
	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4648 4649 4650 4651 4652 4653 4654 4655
	if (!snapc) {
		ret = -ENOMEM;
		goto out;
	}
	snapc->seq = seq;
	for (i = 0; i < snap_count; i++)
		snapc->snaps[i] = ceph_decode_64(&p);

4656
	ceph_put_snap_context(rbd_dev->header.snapc);
4657 4658 4659
	rbd_dev->header.snapc = snapc;

	dout("  snap context seq = %llu, snap_count = %u\n",
4660
		(unsigned long long)seq, (unsigned int)snap_count);
4661 4662 4663
out:
	kfree(reply_buf);

4664
	return ret;
4665 4666
}

4667 4668
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
					u64 snap_id)
A
Alex Elder 已提交
4669 4670 4671
{
	size_t size;
	void *reply_buf;
4672
	__le64 snapid;
A
Alex Elder 已提交
4673 4674 4675 4676 4677 4678 4679 4680 4681 4682
	int ret;
	void *p;
	void *end;
	char *snap_name;

	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
	reply_buf = kmalloc(size, GFP_KERNEL);
	if (!reply_buf)
		return ERR_PTR(-ENOMEM);

4683
	snapid = cpu_to_le64(snap_id);
4684
	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
A
Alex Elder 已提交
4685
				"rbd", "get_snapshot_name",
4686
				&snapid, sizeof (snapid),
4687
				reply_buf, size);
4688
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4689 4690
	if (ret < 0) {
		snap_name = ERR_PTR(ret);
A
Alex Elder 已提交
4691
		goto out;
4692
	}
A
Alex Elder 已提交
4693 4694

	p = reply_buf;
4695
	end = reply_buf + ret;
A
Alex Elder 已提交
4696
	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4697
	if (IS_ERR(snap_name))
A
Alex Elder 已提交
4698 4699
		goto out;

4700
	dout("  snap_id 0x%016llx snap_name = %s\n",
4701
		(unsigned long long)snap_id, snap_name);
A
Alex Elder 已提交
4702 4703 4704
out:
	kfree(reply_buf);

4705
	return snap_name;
A
Alex Elder 已提交
4706 4707
}

4708
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
A
Alex Elder 已提交
4709
{
4710
	bool first_time = rbd_dev->header.object_prefix == NULL;
A
Alex Elder 已提交
4711 4712
	int ret;

4713 4714
	ret = rbd_dev_v2_image_size(rbd_dev);
	if (ret)
4715
		return ret;
4716

4717 4718 4719
	if (first_time) {
		ret = rbd_dev_v2_header_onetime(rbd_dev);
		if (ret)
4720
			return ret;
4721 4722
	}

A
Alex Elder 已提交
4723
	ret = rbd_dev_v2_snap_context(rbd_dev);
4724 4725 4726 4727
	if (ret && first_time) {
		kfree(rbd_dev->header.object_prefix);
		rbd_dev->header.object_prefix = NULL;
	}
A
Alex Elder 已提交
4728 4729 4730 4731

	return ret;
}

4732 4733 4734 4735 4736 4737 4738 4739 4740 4741
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
{
	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		return rbd_dev_v1_header_info(rbd_dev);

	return rbd_dev_v2_header_info(rbd_dev);
}

4742
/*
4743
 * Get a unique rbd identifier for the given new rbd_dev, and add
4744
 * the rbd_dev to the global list.
4745
 */
4746
static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4747
{
4748 4749
	int new_dev_id;

4750 4751 4752
	new_dev_id = ida_simple_get(&rbd_dev_id_ida,
				    0, minor_to_rbd_dev_id(1 << MINORBITS),
				    GFP_KERNEL);
4753 4754 4755 4756
	if (new_dev_id < 0)
		return new_dev_id;

	rbd_dev->dev_id = new_dev_id;
4757 4758 4759 4760

	spin_lock(&rbd_dev_list_lock);
	list_add_tail(&rbd_dev->node, &rbd_dev_list);
	spin_unlock(&rbd_dev_list_lock);
4761

4762
	dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4763 4764

	return 0;
4765
}
4766

4767
/*
4768 4769
 * Remove an rbd_dev from the global list, and record that its
 * identifier is no longer in use.
4770
 */
A
Alex Elder 已提交
4771
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4772
{
4773 4774 4775
	spin_lock(&rbd_dev_list_lock);
	list_del_init(&rbd_dev->node);
	spin_unlock(&rbd_dev_list_lock);
4776

4777 4778 4779
	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);

	dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4780 4781
}

4782 4783 4784
/*
 * Skips over white space at *buf, and updates *buf to point to the
 * first found non-space character (if any). Returns the length of
A
Alex Elder 已提交
4785 4786
 * the token (string of non-white space characters) found.  Note
 * that *buf must be terminated with '\0'.
4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800
 */
static inline size_t next_token(const char **buf)
{
        /*
        * These are the characters that produce nonzero for
        * isspace() in the "C" and "POSIX" locales.
        */
        const char *spaces = " \f\n\r\t\v";

        *buf += strspn(*buf, spaces);	/* Find start of token */

	return strcspn(*buf, spaces);   /* Return token length */
}

A
Alex Elder 已提交
4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822
/*
 * Finds the next token in *buf, dynamically allocates a buffer big
 * enough to hold a copy of it, and copies the token into the new
 * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
 * that a duplicate buffer is created even for a zero-length token.
 *
 * Returns a pointer to the newly-allocated duplicate, or a null
 * pointer if memory for the duplicate was not available.  If
 * the lenp argument is a non-null pointer, the length of the token
 * (not including the '\0') is returned in *lenp.
 *
 * If successful, the *buf pointer will be updated to point beyond
 * the end of the found token.
 *
 * Note: uses GFP_KERNEL for allocation.
 */
static inline char *dup_token(const char **buf, size_t *lenp)
{
	char *dup;
	size_t len;

	len = next_token(buf);
A
Alex Elder 已提交
4823
	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
A
Alex Elder 已提交
4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834
	if (!dup)
		return NULL;
	*(dup + len) = '\0';
	*buf += len;

	if (lenp)
		*lenp = len;

	return dup;
}

4835
/*
4836 4837 4838 4839
 * Parse the options provided for an "rbd add" (i.e., rbd image
 * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
 * and the data written is passed here via a NUL-terminated buffer.
 * Returns 0 if successful or an error code otherwise.
A
Alex Elder 已提交
4840
 *
4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874
 * The information extracted from these options is recorded in
 * the other parameters which return dynamically-allocated
 * structures:
 *  ceph_opts
 *      The address of a pointer that will refer to a ceph options
 *      structure.  Caller must release the returned pointer using
 *      ceph_destroy_options() when it is no longer needed.
 *  rbd_opts
 *	Address of an rbd options pointer.  Fully initialized by
 *	this function; caller must release with kfree().
 *  spec
 *	Address of an rbd image specification pointer.  Fully
 *	initialized by this function based on parsed options.
 *	Caller must release with rbd_spec_put().
 *
 * The options passed take this form:
 *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
 * where:
 *  <mon_addrs>
 *      A comma-separated list of one or more monitor addresses.
 *      A monitor address is an ip address, optionally followed
 *      by a port number (separated by a colon).
 *        I.e.:  ip1[:port1][,ip2[:port2]...]
 *  <options>
 *      A comma-separated list of ceph and/or rbd options.
 *  <pool_name>
 *      The name of the rados pool containing the rbd image.
 *  <image_name>
 *      The name of the image in that pool to map.
 *  <snap_id>
 *      An optional snapshot id.  If provided, the mapping will
 *      present data from the image at the time that snapshot was
 *      created.  The image head is used if no snapshot id is
 *      provided.  Snapshot mappings are always read-only.
4875
 */
4876
static int rbd_add_parse_args(const char *buf,
4877
				struct ceph_options **ceph_opts,
4878 4879
				struct rbd_options **opts,
				struct rbd_spec **rbd_spec)
4880
{
A
Alex Elder 已提交
4881
	size_t len;
4882
	char *options;
4883
	const char *mon_addrs;
4884
	char *snap_name;
4885
	size_t mon_addrs_size;
4886
	struct rbd_spec *spec = NULL;
4887
	struct rbd_options *rbd_opts = NULL;
4888
	struct ceph_options *copts;
4889
	int ret;
4890 4891 4892

	/* The first four tokens are required */

4893
	len = next_token(&buf);
4894 4895 4896 4897
	if (!len) {
		rbd_warn(NULL, "no monitor address(es) provided");
		return -EINVAL;
	}
4898
	mon_addrs = buf;
4899
	mon_addrs_size = len + 1;
4900
	buf += len;
4901

4902
	ret = -EINVAL;
4903 4904
	options = dup_token(&buf, NULL);
	if (!options)
4905
		return -ENOMEM;
4906 4907 4908 4909
	if (!*options) {
		rbd_warn(NULL, "no options provided");
		goto out_err;
	}
4910

4911 4912
	spec = rbd_spec_alloc();
	if (!spec)
4913
		goto out_mem;
4914 4915 4916 4917

	spec->pool_name = dup_token(&buf, NULL);
	if (!spec->pool_name)
		goto out_mem;
4918 4919 4920 4921
	if (!*spec->pool_name) {
		rbd_warn(NULL, "no pool name provided");
		goto out_err;
	}
4922

A
Alex Elder 已提交
4923
	spec->image_name = dup_token(&buf, NULL);
4924
	if (!spec->image_name)
4925
		goto out_mem;
4926 4927 4928 4929
	if (!*spec->image_name) {
		rbd_warn(NULL, "no image name provided");
		goto out_err;
	}
4930

4931 4932 4933 4934
	/*
	 * Snapshot name is optional; default is to use "-"
	 * (indicating the head/no snapshot).
	 */
4935
	len = next_token(&buf);
4936
	if (!len) {
4937 4938
		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4939
	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
4940
		ret = -ENAMETOOLONG;
4941
		goto out_err;
4942
	}
4943 4944
	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
	if (!snap_name)
4945
		goto out_mem;
4946 4947
	*(snap_name + len) = '\0';
	spec->snap_name = snap_name;
A
Alex Elder 已提交
4948

4949
	/* Initialize all rbd options to the defaults */
4950

4951 4952 4953 4954 4955
	rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
	if (!rbd_opts)
		goto out_mem;

	rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
I
Ilya Dryomov 已提交
4956
	rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
A
Alex Elder 已提交
4957

4958
	copts = ceph_parse_options(options, mon_addrs,
4959
					mon_addrs + mon_addrs_size - 1,
4960
					parse_rbd_opts_token, rbd_opts);
4961 4962
	if (IS_ERR(copts)) {
		ret = PTR_ERR(copts);
4963 4964
		goto out_err;
	}
4965 4966 4967
	kfree(options);

	*ceph_opts = copts;
4968
	*opts = rbd_opts;
4969
	*rbd_spec = spec;
4970

4971
	return 0;
4972
out_mem:
4973
	ret = -ENOMEM;
A
Alex Elder 已提交
4974
out_err:
4975 4976
	kfree(rbd_opts);
	rbd_spec_put(spec);
4977
	kfree(options);
A
Alex Elder 已提交
4978

4979
	return ret;
4980 4981
}

4982 4983 4984 4985 4986
/*
 * Return pool id (>= 0) or a negative error code.
 */
static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
{
4987
	struct ceph_options *opts = rbdc->client->options;
4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002
	u64 newest_epoch;
	int tries = 0;
	int ret;

again:
	ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
	if (ret == -ENOENT && tries++ < 1) {
		ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
					       &newest_epoch);
		if (ret < 0)
			return ret;

		if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
			ceph_monc_request_next_osdmap(&rbdc->client->monc);
			(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5003 5004
						     newest_epoch,
						     opts->mount_timeout);
5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
			goto again;
		} else {
			/* the osdmap we have is new enough */
			return -ENOENT;
		}
	}

	return ret;
}

A
Alex Elder 已提交
5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034
/*
 * An rbd format 2 image has a unique identifier, distinct from the
 * name given to it by the user.  Internally, that identifier is
 * what's used to specify the names of objects related to the image.
 *
 * A special "rbd id" object is used to map an rbd image name to its
 * id.  If that object doesn't exist, then there is no v2 rbd image
 * with the supplied name.
 *
 * This function will record the given rbd_dev's image_id field if
 * it can be determined, and in that case will return 0.  If any
 * errors occur a negative errno will be returned and the rbd_dev's
 * image_id field will be unchanged (and should be NULL).
 */
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
	int ret;
	size_t size;
	char *object_name;
	void *response;
5035
	char *image_id;
5036

A
Alex Elder 已提交
5037 5038 5039
	/*
	 * When probing a parent image, the image id is already
	 * known (and the image name likely is not).  There's no
5040 5041
	 * need to fetch the image id again in this case.  We
	 * do still need to set the image format though.
A
Alex Elder 已提交
5042
	 */
5043 5044 5045
	if (rbd_dev->spec->image_id) {
		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;

A
Alex Elder 已提交
5046
		return 0;
5047
	}
A
Alex Elder 已提交
5048

A
Alex Elder 已提交
5049 5050 5051 5052
	/*
	 * First, see if the format 2 image id file exists, and if
	 * so, get the image's persistent id from it.
	 */
A
Alex Elder 已提交
5053
	size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
A
Alex Elder 已提交
5054 5055 5056
	object_name = kmalloc(size, GFP_NOIO);
	if (!object_name)
		return -ENOMEM;
5057
	sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
A
Alex Elder 已提交
5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068
	dout("rbd id object name is %s\n", object_name);

	/* Response will be an encoded string, which includes a length */

	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
	response = kzalloc(size, GFP_NOIO);
	if (!response) {
		ret = -ENOMEM;
		goto out;
	}

5069 5070
	/* If it doesn't exist we'll assume it's a format 1 image */

5071
	ret = rbd_obj_method_sync(rbd_dev, object_name,
5072
				"rbd", "get_id", NULL, 0,
5073
				response, RBD_IMAGE_ID_LEN_MAX);
5074
	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5075 5076 5077 5078 5079
	if (ret == -ENOENT) {
		image_id = kstrdup("", GFP_KERNEL);
		ret = image_id ? 0 : -ENOMEM;
		if (!ret)
			rbd_dev->image_format = 1;
5080
	} else if (ret >= 0) {
5081 5082 5083
		void *p = response;

		image_id = ceph_extract_encoded_string(&p, p + ret,
A
Alex Elder 已提交
5084
						NULL, GFP_NOIO);
5085
		ret = PTR_ERR_OR_ZERO(image_id);
5086 5087 5088 5089 5090 5091 5092
		if (!ret)
			rbd_dev->image_format = 2;
	}

	if (!ret) {
		rbd_dev->spec->image_id = image_id;
		dout("image_id is %s\n", image_id);
A
Alex Elder 已提交
5093 5094 5095 5096 5097 5098 5099 5100
	}
out:
	kfree(response);
	kfree(object_name);

	return ret;
}

A
Alex Elder 已提交
5101 5102 5103 5104
/*
 * Undo whatever state changes are made by v1 or v2 header info
 * call.
 */
A
Alex Elder 已提交
5105 5106 5107 5108
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
	struct rbd_image_header	*header;

5109
	rbd_dev_parent_put(rbd_dev);
A
Alex Elder 已提交
5110 5111 5112 5113

	/* Free dynamic fields from the header, then zero it out */

	header = &rbd_dev->header;
5114
	ceph_put_snap_context(header->snapc);
A
Alex Elder 已提交
5115 5116 5117 5118 5119 5120
	kfree(header->snap_sizes);
	kfree(header->snap_names);
	kfree(header->object_prefix);
	memset(header, 0, sizeof (*header));
}

5121
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5122 5123 5124
{
	int ret;

5125
	ret = rbd_dev_v2_object_prefix(rbd_dev);
5126
	if (ret)
5127 5128
		goto out_err;

5129 5130 5131 5132
	/*
	 * Get the and check features for the image.  Currently the
	 * features are assumed to never change.
	 */
5133
	ret = rbd_dev_v2_features(rbd_dev);
5134
	if (ret)
5135
		goto out_err;
5136

5137 5138 5139 5140 5141 5142 5143
	/* If the image supports fancy striping, get its parameters */

	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
		ret = rbd_dev_v2_striping_info(rbd_dev);
		if (ret < 0)
			goto out_err;
	}
5144
	/* No support for crypto and compression type format 2 images */
5145

A
Alex Elder 已提交
5146
	return 0;
5147
out_err:
A
Alex Elder 已提交
5148
	rbd_dev->header.features = 0;
5149 5150
	kfree(rbd_dev->header.object_prefix);
	rbd_dev->header.object_prefix = NULL;
5151 5152

	return ret;
5153 5154
}

5155 5156 5157 5158 5159 5160
/*
 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
 * rbd_dev_image_probe() recursion depth, which means it's also the
 * length of the already discovered part of the parent chain.
 */
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
A
Alex Elder 已提交
5161
{
5162
	struct rbd_device *parent = NULL;
5163 5164 5165 5166 5167
	int ret;

	if (!rbd_dev->parent_spec)
		return 0;

5168 5169 5170 5171 5172 5173
	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
		pr_info("parent chain is too long (%d)\n", depth);
		ret = -EINVAL;
		goto out_err;
	}

5174 5175 5176 5177
	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
				NULL);
	if (!parent) {
		ret = -ENOMEM;
5178
		goto out_err;
5179 5180 5181 5182 5183 5184 5185 5186
	}

	/*
	 * Images related by parent/child relationships always share
	 * rbd_client and spec/parent_spec, so bump their refcounts.
	 */
	__rbd_get_client(rbd_dev->rbd_client);
	rbd_spec_get(rbd_dev->parent_spec);
5187

5188
	ret = rbd_dev_image_probe(parent, depth);
5189 5190
	if (ret < 0)
		goto out_err;
5191

5192
	rbd_dev->parent = parent;
5193
	atomic_set(&rbd_dev->parent_ref, 1);
5194
	return 0;
5195

5196
out_err:
5197
	rbd_dev_unparent(rbd_dev);
5198
	rbd_dev_destroy(parent);
5199 5200 5201
	return ret;
}

I
Ilya Dryomov 已提交
5202 5203 5204 5205
/*
 * rbd_dev->header_rwsem must be locked for write and will be unlocked
 * upon return.
 */
5206
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5207
{
A
Alex Elder 已提交
5208
	int ret;
A
Alex Elder 已提交
5209

5210 5211 5212 5213
	/* Get an id and fill in device name. */

	ret = rbd_dev_id_get(rbd_dev);
	if (ret)
I
Ilya Dryomov 已提交
5214
		goto err_out_unlock;
A
Alex Elder 已提交
5215 5216 5217 5218 5219

	BUILD_BUG_ON(DEV_NAME_LEN
			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
	sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);

5220
	/* Record our major and minor device numbers. */
A
Alex Elder 已提交
5221

5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232
	if (!single_major) {
		ret = register_blkdev(0, rbd_dev->name);
		if (ret < 0)
			goto err_out_id;

		rbd_dev->major = ret;
		rbd_dev->minor = 0;
	} else {
		rbd_dev->major = rbd_major;
		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
	}
A
Alex Elder 已提交
5233 5234 5235 5236 5237 5238 5239

	/* Set up the blkdev mapping. */

	ret = rbd_init_disk(rbd_dev);
	if (ret)
		goto err_out_blkdev;

5240
	ret = rbd_dev_mapping_set(rbd_dev);
A
Alex Elder 已提交
5241 5242
	if (ret)
		goto err_out_disk;
I
Ilya Dryomov 已提交
5243

5244
	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5245
	set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5246

5247 5248
	dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
	ret = device_add(&rbd_dev->dev);
5249
	if (ret)
5250
		goto err_out_mapping;
A
Alex Elder 已提交
5251 5252 5253

	/* Everything's ready.  Announce the disk to the world. */

5254
	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
I
Ilya Dryomov 已提交
5255
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5256

I
Ilya Dryomov 已提交
5257
	add_disk(rbd_dev->disk);
A
Alex Elder 已提交
5258 5259 5260 5261
	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
		(unsigned long long) rbd_dev->mapping.size);

	return ret;
5262

5263 5264
err_out_mapping:
	rbd_dev_mapping_clear(rbd_dev);
A
Alex Elder 已提交
5265 5266 5267
err_out_disk:
	rbd_free_disk(rbd_dev);
err_out_blkdev:
5268 5269
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5270 5271
err_out_id:
	rbd_dev_id_put(rbd_dev);
I
Ilya Dryomov 已提交
5272 5273
err_out_unlock:
	up_write(&rbd_dev->header_rwsem);
A
Alex Elder 已提交
5274 5275 5276
	return ret;
}

A
Alex Elder 已提交
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
	struct rbd_spec *spec = rbd_dev->spec;
	size_t size;

	/* Record the header object name for this rbd image. */

	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));

	if (rbd_dev->image_format == 1)
		size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
	else
		size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);

	rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
	if (!rbd_dev->header_name)
		return -ENOMEM;

	if (rbd_dev->image_format == 1)
		sprintf(rbd_dev->header_name, "%s%s",
			spec->image_name, RBD_SUFFIX);
	else
		sprintf(rbd_dev->header_name, "%s%s",
			RBD_HEADER_PREFIX, spec->image_id);
	return 0;
}

5304 5305
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5306
	rbd_dev_unprobe(rbd_dev);
5307
	kfree(rbd_dev->header_name);
A
Alex Elder 已提交
5308 5309 5310 5311 5312
	rbd_dev->header_name = NULL;
	rbd_dev->image_format = 0;
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;

5313 5314 5315
	rbd_dev_destroy(rbd_dev);
}

5316 5317
/*
 * Probe for the existence of the header object for the given rbd
5318 5319 5320
 * device.  If this image is the one being mapped (i.e., not a
 * parent), initiate a watch on its header object before using that
 * object to get detailed information about the rbd image.
5321
 */
5322
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5323 5324 5325 5326
{
	int ret;

	/*
A
Alex Elder 已提交
5327 5328 5329 5330
	 * Get the id from the image id object.  Unless there's an
	 * error, rbd_dev->spec->image_id will be filled in with
	 * a dynamically-allocated string, and rbd_dev->image_format
	 * will be set to either 1 or 2.
5331 5332 5333
	 */
	ret = rbd_dev_image_id(rbd_dev);
	if (ret)
5334 5335
		return ret;

A
Alex Elder 已提交
5336 5337 5338 5339
	ret = rbd_dev_header_name(rbd_dev);
	if (ret)
		goto err_out_format;

5340
	if (!depth) {
5341
		ret = rbd_dev_header_watch_sync(rbd_dev);
5342 5343 5344 5345 5346
		if (ret) {
			if (ret == -ENOENT)
				pr_info("image %s/%s does not exist\n",
					rbd_dev->spec->pool_name,
					rbd_dev->spec->image_name);
5347
			goto out_header_name;
5348
		}
5349
	}
5350

5351
	ret = rbd_dev_header_info(rbd_dev);
5352
	if (ret)
5353
		goto err_out_watch;
A
Alex Elder 已提交
5354

5355 5356 5357 5358 5359 5360
	/*
	 * If this image is the one being mapped, we have pool name and
	 * id, image name and id, and snap name - need to fill snap id.
	 * Otherwise this is a parent image, identified by pool, image
	 * and snap ids - need to fill in names for those ids.
	 */
5361
	if (!depth)
5362 5363 5364
		ret = rbd_spec_fill_snap_id(rbd_dev);
	else
		ret = rbd_spec_fill_names(rbd_dev);
5365 5366 5367 5368 5369 5370
	if (ret) {
		if (ret == -ENOENT)
			pr_info("snap %s/%s@%s does not exist\n",
				rbd_dev->spec->pool_name,
				rbd_dev->spec->image_name,
				rbd_dev->spec->snap_name);
A
Alex Elder 已提交
5371
		goto err_out_probe;
5372
	}
5373

5374 5375 5376 5377 5378 5379 5380 5381 5382
	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
		ret = rbd_dev_v2_parent_info(rbd_dev);
		if (ret)
			goto err_out_probe;

		/*
		 * Need to warn users if this image is the one being
		 * mapped and has a parent.
		 */
5383
		if (!depth && rbd_dev->parent_spec)
5384 5385 5386 5387
			rbd_warn(rbd_dev,
				 "WARNING: kernel layering is EXPERIMENTAL!");
	}

5388
	ret = rbd_dev_probe_parent(rbd_dev, depth);
A
Alex Elder 已提交
5389 5390 5391 5392 5393 5394
	if (ret)
		goto err_out_probe;

	dout("discovered format %u image, header name is %s\n",
		rbd_dev->image_format, rbd_dev->header_name);
	return 0;
5395

A
Alex Elder 已提交
5396 5397
err_out_probe:
	rbd_dev_unprobe(rbd_dev);
5398
err_out_watch:
5399
	if (!depth)
5400
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5401 5402 5403 5404 5405
out_header_name:
	kfree(rbd_dev->header_name);
	rbd_dev->header_name = NULL;
err_out_format:
	rbd_dev->image_format = 0;
5406 5407
	kfree(rbd_dev->spec->image_id);
	rbd_dev->spec->image_id = NULL;
5408 5409 5410
	return ret;
}

5411 5412 5413
static ssize_t do_rbd_add(struct bus_type *bus,
			  const char *buf,
			  size_t count)
5414
{
5415
	struct rbd_device *rbd_dev = NULL;
5416
	struct ceph_options *ceph_opts = NULL;
5417
	struct rbd_options *rbd_opts = NULL;
5418
	struct rbd_spec *spec = NULL;
5419
	struct rbd_client *rbdc;
5420
	bool read_only;
5421
	int rc;
5422 5423 5424 5425 5426

	if (!try_module_get(THIS_MODULE))
		return -ENODEV;

	/* parse add command */
5427
	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5428
	if (rc < 0)
5429
		goto out;
5430

5431 5432 5433
	rbdc = rbd_get_client(ceph_opts);
	if (IS_ERR(rbdc)) {
		rc = PTR_ERR(rbdc);
5434
		goto err_out_args;
5435
	}
5436 5437

	/* pick the pool */
5438
	rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5439 5440 5441
	if (rc < 0) {
		if (rc == -ENOENT)
			pr_info("pool %s does not exist\n", spec->pool_name);
5442
		goto err_out_client;
5443
	}
A
Alex Elder 已提交
5444
	spec->pool_id = (u64)rc;
5445

5446 5447
	/* The ceph file layout needs to fit pool id in 32 bits */

A
Alex Elder 已提交
5448
	if (spec->pool_id > (u64)U32_MAX) {
5449
		rbd_warn(NULL, "pool id too large (%llu > %u)",
A
Alex Elder 已提交
5450
				(unsigned long long)spec->pool_id, U32_MAX);
5451 5452 5453 5454
		rc = -EIO;
		goto err_out_client;
	}

5455
	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5456 5457
	if (!rbd_dev) {
		rc = -ENOMEM;
5458
		goto err_out_client;
5459
	}
5460 5461
	rbdc = NULL;		/* rbd_dev now owns this */
	spec = NULL;		/* rbd_dev now owns this */
5462
	rbd_opts = NULL;	/* rbd_dev now owns this */
5463

I
Ilya Dryomov 已提交
5464
	down_write(&rbd_dev->header_rwsem);
5465
	rc = rbd_dev_image_probe(rbd_dev, 0);
5466
	if (rc < 0)
5467
		goto err_out_rbd_dev;
5468

5469 5470
	/* If we are mapping a snapshot it must be marked read-only */

5471
	read_only = rbd_dev->opts->read_only;
5472 5473 5474 5475
	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
		read_only = true;
	rbd_dev->mapping.read_only = read_only;

5476
	rc = rbd_dev_device_setup(rbd_dev);
A
Alex Elder 已提交
5477
	if (rc) {
5478 5479 5480 5481 5482 5483
		/*
		 * rbd_dev_header_unwatch_sync() can't be moved into
		 * rbd_dev_image_release() without refactoring, see
		 * commit 1f3ef78861ac.
		 */
		rbd_dev_header_unwatch_sync(rbd_dev);
A
Alex Elder 已提交
5484
		rbd_dev_image_release(rbd_dev);
5485
		goto out;
A
Alex Elder 已提交
5486 5487
	}

5488 5489 5490 5491
	rc = count;
out:
	module_put(THIS_MODULE);
	return rc;
5492

5493
err_out_rbd_dev:
I
Ilya Dryomov 已提交
5494
	up_write(&rbd_dev->header_rwsem);
5495
	rbd_dev_destroy(rbd_dev);
5496
err_out_client:
5497
	rbd_put_client(rbdc);
5498
err_out_args:
5499
	rbd_spec_put(spec);
5500
	kfree(rbd_opts);
5501
	goto out;
5502 5503
}

5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520
static ssize_t rbd_add(struct bus_type *bus,
		       const char *buf,
		       size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_add(bus, buf, count);
}

static ssize_t rbd_add_single_major(struct bus_type *bus,
				    const char *buf,
				    size_t count)
{
	return do_rbd_add(bus, buf, count);
}

5521
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5522 5523
{
	rbd_free_disk(rbd_dev);
5524
	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5525
	device_del(&rbd_dev->dev);
A
Alex Elder 已提交
5526
	rbd_dev_mapping_clear(rbd_dev);
5527 5528
	if (!single_major)
		unregister_blkdev(rbd_dev->major, rbd_dev->name);
A
Alex Elder 已提交
5529
	rbd_dev_id_put(rbd_dev);
5530 5531
}

5532 5533
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
A
Alex Elder 已提交
5534
	while (rbd_dev->parent) {
5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546
		struct rbd_device *first = rbd_dev;
		struct rbd_device *second = first->parent;
		struct rbd_device *third;

		/*
		 * Follow to the parent with no grandparent and
		 * remove it.
		 */
		while (second && (third = second->parent)) {
			first = second;
			second = third;
		}
A
Alex Elder 已提交
5547
		rbd_assert(second);
5548
		rbd_dev_image_release(second);
A
Alex Elder 已提交
5549 5550 5551 5552
		first->parent = NULL;
		first->parent_overlap = 0;

		rbd_assert(first->parent_spec);
5553 5554 5555 5556 5557
		rbd_spec_put(first->parent_spec);
		first->parent_spec = NULL;
	}
}

5558 5559 5560
static ssize_t do_rbd_remove(struct bus_type *bus,
			     const char *buf,
			     size_t count)
5561 5562
{
	struct rbd_device *rbd_dev = NULL;
5563 5564
	struct list_head *tmp;
	int dev_id;
5565
	unsigned long ul;
5566
	bool already = false;
5567
	int ret;
5568

5569
	ret = kstrtoul(buf, 10, &ul);
5570 5571
	if (ret)
		return ret;
5572 5573

	/* convert to int; abort if we lost anything in the conversion */
5574 5575
	dev_id = (int)ul;
	if (dev_id != ul)
5576 5577
		return -EINVAL;

5578 5579 5580 5581 5582 5583 5584 5585
	ret = -ENOENT;
	spin_lock(&rbd_dev_list_lock);
	list_for_each(tmp, &rbd_dev_list) {
		rbd_dev = list_entry(tmp, struct rbd_device, node);
		if (rbd_dev->dev_id == dev_id) {
			ret = 0;
			break;
		}
5586
	}
5587 5588 5589 5590 5591
	if (!ret) {
		spin_lock_irq(&rbd_dev->lock);
		if (rbd_dev->open_count)
			ret = -EBUSY;
		else
5592 5593
			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
							&rbd_dev->flags);
5594 5595 5596
		spin_unlock_irq(&rbd_dev->lock);
	}
	spin_unlock(&rbd_dev_list_lock);
5597
	if (ret < 0 || already)
5598
		return ret;
5599

5600 5601
	rbd_dev_header_unwatch_sync(rbd_dev);

5602 5603 5604 5605 5606 5607
	/*
	 * Don't free anything from rbd_dev->disk until after all
	 * notifies are completely processed. Otherwise
	 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
	 * in a potential use after free of rbd_dev->disk or rbd_dev.
	 */
5608
	rbd_dev_device_release(rbd_dev);
5609
	rbd_dev_image_release(rbd_dev);
A
Alex Elder 已提交
5610

5611
	return count;
5612 5613
}

5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630
static ssize_t rbd_remove(struct bus_type *bus,
			  const char *buf,
			  size_t count)
{
	if (single_major)
		return -EINVAL;

	return do_rbd_remove(bus, buf, count);
}

static ssize_t rbd_remove_single_major(struct bus_type *bus,
				       const char *buf,
				       size_t count)
{
	return do_rbd_remove(bus, buf, count);
}

5631 5632
/*
 * create control files in sysfs
5633
 * /sys/bus/rbd/...
5634 5635 5636
 */
static int rbd_sysfs_init(void)
{
5637
	int ret;
5638

5639
	ret = device_register(&rbd_root_dev);
A
Alex Elder 已提交
5640
	if (ret < 0)
5641
		return ret;
5642

5643 5644 5645
	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
		device_unregister(&rbd_root_dev);
5646 5647 5648 5649 5650 5651

	return ret;
}

static void rbd_sysfs_cleanup(void)
{
5652
	bus_unregister(&rbd_bus_type);
5653
	device_unregister(&rbd_root_dev);
5654 5655
}

5656 5657 5658
static int rbd_slab_init(void)
{
	rbd_assert(!rbd_img_request_cache);
G
Geliang Tang 已提交
5659
	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
5660 5661 5662 5663
	if (!rbd_img_request_cache)
		return -ENOMEM;

	rbd_assert(!rbd_obj_request_cache);
G
Geliang Tang 已提交
5664
	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
5665 5666 5667 5668 5669
	if (!rbd_obj_request_cache)
		goto out_err;

	rbd_assert(!rbd_segment_name_cache);
	rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5670
					CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5671
	if (rbd_segment_name_cache)
5672
		return 0;
5673
out_err:
5674 5675
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;
5676

5677 5678 5679
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;

5680 5681 5682 5683 5684
	return -ENOMEM;
}

static void rbd_slab_exit(void)
{
5685 5686 5687 5688
	rbd_assert(rbd_segment_name_cache);
	kmem_cache_destroy(rbd_segment_name_cache);
	rbd_segment_name_cache = NULL;

5689 5690 5691 5692
	rbd_assert(rbd_obj_request_cache);
	kmem_cache_destroy(rbd_obj_request_cache);
	rbd_obj_request_cache = NULL;

5693 5694 5695 5696 5697
	rbd_assert(rbd_img_request_cache);
	kmem_cache_destroy(rbd_img_request_cache);
	rbd_img_request_cache = NULL;
}

A
Alex Elder 已提交
5698
static int __init rbd_init(void)
5699 5700 5701
{
	int rc;

5702 5703 5704 5705
	if (!libceph_compatible(NULL)) {
		rbd_warn(NULL, "libceph incompatibility (quitting)");
		return -EINVAL;
	}
I
Ilya Dryomov 已提交
5706

5707
	rc = rbd_slab_init();
5708 5709
	if (rc)
		return rc;
I
Ilya Dryomov 已提交
5710

5711 5712
	/*
	 * The number of active work items is limited by the number of
I
Ilya Dryomov 已提交
5713
	 * rbd devices * queue depth, so leave @max_active at default.
5714 5715 5716 5717 5718 5719 5720
	 */
	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
	if (!rbd_wq) {
		rc = -ENOMEM;
		goto err_out_slab;
	}

5721 5722 5723 5724
	if (single_major) {
		rbd_major = register_blkdev(0, RBD_DRV_NAME);
		if (rbd_major < 0) {
			rc = rbd_major;
5725
			goto err_out_wq;
5726 5727 5728
		}
	}

5729 5730
	rc = rbd_sysfs_init();
	if (rc)
5731 5732 5733 5734 5735 5736
		goto err_out_blkdev;

	if (single_major)
		pr_info("loaded (major %d)\n", rbd_major);
	else
		pr_info("loaded\n");
5737

I
Ilya Dryomov 已提交
5738 5739
	return 0;

5740 5741 5742
err_out_blkdev:
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5743 5744
err_out_wq:
	destroy_workqueue(rbd_wq);
I
Ilya Dryomov 已提交
5745 5746
err_out_slab:
	rbd_slab_exit();
5747
	return rc;
5748 5749
}

A
Alex Elder 已提交
5750
static void __exit rbd_exit(void)
5751
{
I
Ilya Dryomov 已提交
5752
	ida_destroy(&rbd_dev_id_ida);
5753
	rbd_sysfs_cleanup();
5754 5755
	if (single_major)
		unregister_blkdev(rbd_major, RBD_DRV_NAME);
5756
	destroy_workqueue(rbd_wq);
5757
	rbd_slab_exit();
5758 5759 5760 5761 5762
}

module_init(rbd_init);
module_exit(rbd_exit);

A
Alex Elder 已提交
5763
MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5764 5765 5766 5767 5768
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");

5769
MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5770
MODULE_LICENSE("GPL");