inode.c 60.0 KB
Newer Older
1
#include <linux/ceph/ceph_debug.h>
S
Sage Weil 已提交
2 3 4 5 6 7 8 9 10

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/writeback.h>
#include <linux/vmalloc.h>
11
#include <linux/xattr.h>
12
#include <linux/posix_acl.h>
13
#include <linux/random.h>
14
#include <linux/sort.h>
S
Sage Weil 已提交
15 16

#include "super.h"
17
#include "mds_client.h"
18
#include "cache.h"
19
#include <linux/ceph/decode.h>
S
Sage Weil 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

/*
 * Ceph inode operations
 *
 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
 * setattr, etc.), xattr helpers, and helpers for assimilating
 * metadata returned by the MDS into our cache.
 *
 * Also define helpers for doing asynchronous writeback, invalidation,
 * and truncation for the benefit of those who can't afford to block
 * (typically because they are in the message handler path).
 */

static const struct inode_operations ceph_symlink_iops;

35 36 37
static void ceph_invalidate_work(struct work_struct *work);
static void ceph_writeback_work(struct work_struct *work);
static void ceph_vmtruncate_work(struct work_struct *work);
S
Sage Weil 已提交
38 39 40 41

/*
 * find or create an inode, given the ceph ino number
 */
Y
Yehuda Sadeh 已提交
42 43 44 45 46 47 48
static int ceph_set_ino_cb(struct inode *inode, void *data)
{
	ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
	inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
	return 0;
}

S
Sage Weil 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
{
	struct inode *inode;
	ino_t t = ceph_vino_to_ino(vino);

	inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
	if (inode == NULL)
		return ERR_PTR(-ENOMEM);
	if (inode->i_state & I_NEW) {
		dout("get_inode created new inode %p %llx.%llx ino %llx\n",
		     inode, ceph_vinop(inode), (u64)inode->i_ino);
		unlock_new_inode(inode);
	}

	dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
	     vino.snap, inode);
	return inode;
}

/*
 * get/constuct snapdir inode for a given directory
 */
struct inode *ceph_get_snapdir(struct inode *parent)
{
	struct ceph_vino vino = {
		.ino = ceph_ino(parent),
		.snap = CEPH_SNAPDIR,
	};
	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
78
	struct ceph_inode_info *ci = ceph_inode(inode);
S
Sage Weil 已提交
79 80 81

	BUG_ON(!S_ISDIR(parent->i_mode));
	if (IS_ERR(inode))
J
Julia Lawall 已提交
82
		return inode;
S
Sage Weil 已提交
83 84 85
	inode->i_mode = parent->i_mode;
	inode->i_uid = parent->i_uid;
	inode->i_gid = parent->i_gid;
86 87
	inode->i_op = &ceph_snapdir_iops;
	inode->i_fop = &ceph_snapdir_fops;
88 89
	ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
	ci->i_rbytes = 0;
S
Sage Weil 已提交
90 91 92 93 94 95 96
	return inode;
}

const struct inode_operations ceph_file_iops = {
	.permission = ceph_permission,
	.setattr = ceph_setattr,
	.getattr = ceph_getattr,
97 98
	.setxattr = generic_setxattr,
	.getxattr = generic_getxattr,
S
Sage Weil 已提交
99
	.listxattr = ceph_listxattr,
100
	.removexattr = generic_removexattr,
G
Guangliang Zhao 已提交
101
	.get_acl = ceph_get_acl,
S
Sage Weil 已提交
102
	.set_acl = ceph_set_acl,
S
Sage Weil 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
};


/*
 * We use a 'frag tree' to keep track of the MDS's directory fragments
 * for a given inode (usually there is just a single fragment).  We
 * need to know when a child frag is delegated to a new MDS, or when
 * it is flagged as replicated, so we can direct our requests
 * accordingly.
 */

/*
 * find/create a frag in the tree
 */
static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
						    u32 f)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct ceph_inode_frag *frag;
	int c;

	p = &ci->i_fragtree.rb_node;
	while (*p) {
		parent = *p;
		frag = rb_entry(parent, struct ceph_inode_frag, node);
		c = ceph_frag_compare(f, frag->frag);
		if (c < 0)
			p = &(*p)->rb_left;
		else if (c > 0)
			p = &(*p)->rb_right;
		else
			return frag;
	}

	frag = kmalloc(sizeof(*frag), GFP_NOFS);
	if (!frag) {
		pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
		       "frag %x\n", &ci->vfs_inode,
		       ceph_vinop(&ci->vfs_inode), f);
		return ERR_PTR(-ENOMEM);
	}
	frag->frag = f;
	frag->split_by = 0;
	frag->mds = -1;
	frag->ndist = 0;

	rb_link_node(&frag->node, parent, p);
	rb_insert_color(&frag->node, &ci->i_fragtree);

	dout("get_or_create_frag added %llx.%llx frag %x\n",
	     ceph_vinop(&ci->vfs_inode), f);
	return frag;
}

/*
 * find a specific frag @f
 */
struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
{
	struct rb_node *n = ci->i_fragtree.rb_node;

	while (n) {
		struct ceph_inode_frag *frag =
			rb_entry(n, struct ceph_inode_frag, node);
		int c = ceph_frag_compare(f, frag->frag);
		if (c < 0)
			n = n->rb_left;
		else if (c > 0)
			n = n->rb_right;
		else
			return frag;
	}
	return NULL;
}

/*
 * Choose frag containing the given value @v.  If @pfrag is
 * specified, copy the frag delegation info to the caller if
 * it is present.
 */
184 185
static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
			      struct ceph_inode_frag *pfrag, int *found)
S
Sage Weil 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
{
	u32 t = ceph_frag_make(0, 0);
	struct ceph_inode_frag *frag;
	unsigned nway, i;
	u32 n;

	if (found)
		*found = 0;

	while (1) {
		WARN_ON(!ceph_frag_contains_value(t, v));
		frag = __ceph_find_frag(ci, t);
		if (!frag)
			break; /* t is a leaf */
		if (frag->split_by == 0) {
			if (pfrag)
				memcpy(pfrag, frag, sizeof(*pfrag));
			if (found)
				*found = 1;
			break;
		}

		/* choose child */
		nway = 1 << frag->split_by;
		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
		     frag->split_by, nway);
		for (i = 0; i < nway; i++) {
			n = ceph_frag_make_child(t, frag->split_by, i);
			if (ceph_frag_contains_value(n, v)) {
				t = n;
				break;
			}
		}
		BUG_ON(i == nway);
	}
	dout("choose_frag(%x) = %x\n", v, t);

	return t;
}

226 227 228 229 230 231 232 233 234 235
u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
		     struct ceph_inode_frag *pfrag, int *found)
{
	u32 ret;
	mutex_lock(&ci->i_fragtree_mutex);
	ret = __ceph_choose_frag(ci, v, pfrag, found);
	mutex_unlock(&ci->i_fragtree_mutex);
	return ret;
}

S
Sage Weil 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248
/*
 * Process dirfrag (delegation) info from the mds.  Include leaf
 * fragment in tree ONLY if ndist > 0.  Otherwise, only
 * branches/splits are included in i_fragtree)
 */
static int ceph_fill_dirfrag(struct inode *inode,
			     struct ceph_mds_reply_dirfrag *dirinfo)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_inode_frag *frag;
	u32 id = le32_to_cpu(dirinfo->frag);
	int mds = le32_to_cpu(dirinfo->auth);
	int ndist = le32_to_cpu(dirinfo->ndist);
249
	int diri_auth = -1;
S
Sage Weil 已提交
250 251 252
	int i;
	int err = 0;

253 254 255 256 257
	spin_lock(&ci->i_ceph_lock);
	if (ci->i_auth_cap)
		diri_auth = ci->i_auth_cap->mds;
	spin_unlock(&ci->i_ceph_lock);

258 259 260
	if (mds == -1) /* CDIR_AUTH_PARENT */
		mds = diri_auth;

S
Sage Weil 已提交
261
	mutex_lock(&ci->i_fragtree_mutex);
262
	if (ndist == 0 && mds == diri_auth) {
S
Sage Weil 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
		/* no delegation info needed. */
		frag = __ceph_find_frag(ci, id);
		if (!frag)
			goto out;
		if (frag->split_by == 0) {
			/* tree leaf, remove */
			dout("fill_dirfrag removed %llx.%llx frag %x"
			     " (no ref)\n", ceph_vinop(inode), id);
			rb_erase(&frag->node, &ci->i_fragtree);
			kfree(frag);
		} else {
			/* tree branch, keep and clear */
			dout("fill_dirfrag cleared %llx.%llx frag %x"
			     " referral\n", ceph_vinop(inode), id);
			frag->mds = -1;
			frag->ndist = 0;
		}
		goto out;
	}


	/* find/add this frag to store mds delegation info */
	frag = __get_or_create_frag(ci, id);
	if (IS_ERR(frag)) {
		/* this is not the end of the world; we can continue
		   with bad/inaccurate delegation info */
		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
		err = -ENOMEM;
		goto out;
	}

	frag->mds = mds;
	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
	for (i = 0; i < frag->ndist; i++)
		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
	     ceph_vinop(inode), frag->frag, frag->ndist);

out:
	mutex_unlock(&ci->i_fragtree_mutex);
	return err;
}

307 308 309 310 311 312 313
static int frag_tree_split_cmp(const void *l, const void *r)
{
	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
	return ceph_frag_compare(ls->frag, rs->frag);
}

314 315 316 317 318 319 320 321 322
static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
{
	if (!frag)
		return f == ceph_frag_make(0, 0);
	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
		return false;
	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
}

323 324 325 326 327
static int ceph_fill_fragtree(struct inode *inode,
			      struct ceph_frag_tree_head *fragtree,
			      struct ceph_mds_reply_dirfrag *dirinfo)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
328
	struct ceph_inode_frag *frag, *prev_frag = NULL;
329
	struct rb_node *rb_node;
330 331
	unsigned i, split_by, nsplits;
	u32 id;
332 333 334 335
	bool update = false;

	mutex_lock(&ci->i_fragtree_mutex);
	nsplits = le32_to_cpu(fragtree->nsplits);
336 337 338
	if (nsplits != ci->i_fragtree_nsplits) {
		update = true;
	} else if (nsplits) {
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		i = prandom_u32() % nsplits;
		id = le32_to_cpu(fragtree->splits[i].frag);
		if (!__ceph_find_frag(ci, id))
			update = true;
	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
		rb_node = rb_first(&ci->i_fragtree);
		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
			update = true;
	}
	if (!update && dirinfo) {
		id = le32_to_cpu(dirinfo->frag);
		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
			update = true;
	}
	if (!update)
		goto out_unlock;

357 358 359 360 361
	if (nsplits > 1) {
		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
		     frag_tree_split_cmp, NULL);
	}

362 363 364 365
	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
	rb_node = rb_first(&ci->i_fragtree);
	for (i = 0; i < nsplits; i++) {
		id = le32_to_cpu(fragtree->splits[i].frag);
366 367 368 369 370 371 372
		split_by = le32_to_cpu(fragtree->splits[i].by);
		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
			       "frag %x split by %d\n", ceph_vinop(inode),
			       i, nsplits, id, split_by);
			continue;
		}
373 374 375 376 377 378 379 380 381 382 383
		frag = NULL;
		while (rb_node) {
			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
			if (ceph_frag_compare(frag->frag, id) >= 0) {
				if (frag->frag != id)
					frag = NULL;
				else
					rb_node = rb_next(rb_node);
				break;
			}
			rb_node = rb_next(rb_node);
384 385 386 387
			/* delete stale split/leaf node */
			if (frag->split_by > 0 ||
			    !is_frag_child(frag->frag, prev_frag)) {
				rb_erase(&frag->node, &ci->i_fragtree);
388 389
				if (frag->split_by > 0)
					ci->i_fragtree_nsplits--;
390 391
				kfree(frag);
			}
392 393 394 395 396 397 398
			frag = NULL;
		}
		if (!frag) {
			frag = __get_or_create_frag(ci, id);
			if (IS_ERR(frag))
				continue;
		}
399 400 401
		if (frag->split_by == 0)
			ci->i_fragtree_nsplits++;
		frag->split_by = split_by;
402
		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
403
		prev_frag = frag;
404 405 406 407
	}
	while (rb_node) {
		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
		rb_node = rb_next(rb_node);
408 409 410 411
		/* delete stale split/leaf node */
		if (frag->split_by > 0 ||
		    !is_frag_child(frag->frag, prev_frag)) {
			rb_erase(&frag->node, &ci->i_fragtree);
412 413
			if (frag->split_by > 0)
				ci->i_fragtree_nsplits--;
414 415
			kfree(frag);
		}
416 417 418 419 420
	}
out_unlock:
	mutex_unlock(&ci->i_fragtree_mutex);
	return 0;
}
S
Sage Weil 已提交
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

/*
 * initialize a newly allocated inode.
 */
struct inode *ceph_alloc_inode(struct super_block *sb)
{
	struct ceph_inode_info *ci;
	int i;

	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
	if (!ci)
		return NULL;

	dout("alloc_inode %p\n", &ci->vfs_inode);

436 437
	spin_lock_init(&ci->i_ceph_lock);

S
Sage Weil 已提交
438
	ci->i_version = 0;
Y
Yan, Zheng 已提交
439
	ci->i_inline_version = 0;
S
Sage Weil 已提交
440 441
	ci->i_time_warp_seq = 0;
	ci->i_ceph_flags = 0;
Y
Yan, Zheng 已提交
442 443 444 445
	atomic64_set(&ci->i_ordered_count, 1);
	atomic64_set(&ci->i_release_count, 1);
	atomic64_set(&ci->i_complete_seq[0], 0);
	atomic64_set(&ci->i_complete_seq[1], 0);
S
Sage Weil 已提交
446 447
	ci->i_symlink = NULL;

S
Sage Weil 已提交
448
	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
449
	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
S
Sage Weil 已提交
450

S
Sage Weil 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	ci->i_fragtree = RB_ROOT;
	mutex_init(&ci->i_fragtree_mutex);

	ci->i_xattrs.blob = NULL;
	ci->i_xattrs.prealloc_blob = NULL;
	ci->i_xattrs.dirty = false;
	ci->i_xattrs.index = RB_ROOT;
	ci->i_xattrs.count = 0;
	ci->i_xattrs.names_size = 0;
	ci->i_xattrs.vals_size = 0;
	ci->i_xattrs.version = 0;
	ci->i_xattrs.index_version = 0;

	ci->i_caps = RB_ROOT;
	ci->i_auth_cap = NULL;
	ci->i_dirty_caps = 0;
	ci->i_flushing_caps = 0;
	INIT_LIST_HEAD(&ci->i_dirty_item);
	INIT_LIST_HEAD(&ci->i_flushing_item);
470
	ci->i_prealloc_cap_flush = NULL;
471
	ci->i_cap_flush_tree = RB_ROOT;
S
Sage Weil 已提交
472 473 474 475 476 477 478 479
	init_waitqueue_head(&ci->i_cap_wq);
	ci->i_hold_caps_min = 0;
	ci->i_hold_caps_max = 0;
	INIT_LIST_HEAD(&ci->i_cap_delay_list);
	INIT_LIST_HEAD(&ci->i_cap_snaps);
	ci->i_head_snapc = NULL;
	ci->i_snap_caps = 0;

480
	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
S
Sage Weil 已提交
481 482
		ci->i_nr_by_mode[i] = 0;

Y
Yan, Zheng 已提交
483
	mutex_init(&ci->i_truncate_mutex);
S
Sage Weil 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496
	ci->i_truncate_seq = 0;
	ci->i_truncate_size = 0;
	ci->i_truncate_pending = 0;

	ci->i_max_size = 0;
	ci->i_reported_size = 0;
	ci->i_wanted_max_size = 0;
	ci->i_requested_max_size = 0;

	ci->i_pin_ref = 0;
	ci->i_rd_ref = 0;
	ci->i_rdcache_ref = 0;
	ci->i_wr_ref = 0;
497
	ci->i_wb_ref = 0;
S
Sage Weil 已提交
498 499 500 501 502 503 504 505
	ci->i_wrbuffer_ref = 0;
	ci->i_wrbuffer_ref_head = 0;
	ci->i_shared_gen = 0;
	ci->i_rdcache_gen = 0;
	ci->i_rdcache_revoking = 0;

	INIT_LIST_HEAD(&ci->i_unsafe_writes);
	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
506
	INIT_LIST_HEAD(&ci->i_unsafe_iops);
S
Sage Weil 已提交
507 508 509 510 511 512
	spin_lock_init(&ci->i_unsafe_lock);

	ci->i_snap_realm = NULL;
	INIT_LIST_HEAD(&ci->i_snap_realm_item);
	INIT_LIST_HEAD(&ci->i_snap_flush_item);

513 514
	INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
	INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
S
Sage Weil 已提交
515 516 517

	INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);

518 519
	ceph_fscache_inode_init(ci);

S
Sage Weil 已提交
520 521 522
	return &ci->vfs_inode;
}

N
Nick Piggin 已提交
523 524 525 526 527 528 529 530
static void ceph_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	struct ceph_inode_info *ci = ceph_inode(inode);

	kmem_cache_free(ceph_inode_cachep, ci);
}

S
Sage Weil 已提交
531 532 533 534 535 536 537 538
void ceph_destroy_inode(struct inode *inode)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_inode_frag *frag;
	struct rb_node *n;

	dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));

539 540
	ceph_fscache_unregister_inode_cookie(ci);

S
Sage Weil 已提交
541 542
	ceph_queue_caps_release(inode);

543 544
	/*
	 * we may still have a snap_realm reference if there are stray
545
	 * caps in i_snap_caps.
546 547 548
	 */
	if (ci->i_snap_realm) {
		struct ceph_mds_client *mdsc =
549
			ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
550 551 552 553 554 555 556 557 558
		struct ceph_snap_realm *realm = ci->i_snap_realm;

		dout(" dropping residual ref to snap realm %p\n", realm);
		spin_lock(&realm->inodes_with_caps_lock);
		list_del_init(&ci->i_snap_realm_item);
		spin_unlock(&realm->inodes_with_caps_lock);
		ceph_put_snap_realm(mdsc, realm);
	}

S
Sage Weil 已提交
559 560 561 562 563 564
	kfree(ci->i_symlink);
	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
		frag = rb_entry(n, struct ceph_inode_frag, node);
		rb_erase(n, &ci->i_fragtree);
		kfree(frag);
	}
565
	ci->i_fragtree_nsplits = 0;
S
Sage Weil 已提交
566 567

	__ceph_destroy_xattrs(ci);
S
Sage Weil 已提交
568 569 570 571
	if (ci->i_xattrs.blob)
		ceph_buffer_put(ci->i_xattrs.blob);
	if (ci->i_xattrs.prealloc_blob)
		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
S
Sage Weil 已提交
572

Y
Yan, Zheng 已提交
573
	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
574

N
Nick Piggin 已提交
575
	call_rcu(&inode->i_rcu, ceph_i_callback);
S
Sage Weil 已提交
576 577
}

Y
Yan, Zheng 已提交
578 579 580 581 582 583 584 585 586 587
int ceph_drop_inode(struct inode *inode)
{
	/*
	 * Positve dentry and corresponding inode are always accompanied
	 * in MDS reply. So no need to keep inode in the cache after
	 * dropping all its aliases.
	 */
	return 1;
}

588 589 590 591 592
static inline blkcnt_t calc_inode_blocks(u64 size)
{
	return (size + (1<<9) - 1) >> 9;
}

S
Sage Weil 已提交
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
/*
 * Helpers to fill in size, ctime, mtime, and atime.  We have to be
 * careful because either the client or MDS may have more up to date
 * info, depending on which capabilities are held, and whether
 * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
 * and size are monotonically increasing, except when utimes() or
 * truncate() increments the corresponding _seq values.)
 */
int ceph_fill_file_size(struct inode *inode, int issued,
			u32 truncate_seq, u64 truncate_size, u64 size)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	int queue_trunc = 0;

	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
		dout("size %lld -> %llu\n", inode->i_size, size);
610 611 612 613
		if (size > 0 && S_ISDIR(inode->i_mode)) {
			pr_err("fill_file_size non-zero size for directory\n");
			size = 0;
		}
614
		i_size_write(inode, size);
615
		inode->i_blocks = calc_inode_blocks(size);
S
Sage Weil 已提交
616 617 618 619 620
		ci->i_reported_size = size;
		if (truncate_seq != ci->i_truncate_seq) {
			dout("truncate_seq %u -> %u\n",
			     ci->i_truncate_seq, truncate_seq);
			ci->i_truncate_seq = truncate_seq;
Y
Yan, Zheng 已提交
621 622 623 624 625 626

			/* the MDS should have revoked these caps */
			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
					       CEPH_CAP_FILE_RD |
					       CEPH_CAP_FILE_WR |
					       CEPH_CAP_FILE_LAZYIO));
627 628 629 630 631 632
			/*
			 * If we hold relevant caps, or in the case where we're
			 * not the only client referencing this file and we
			 * don't hold those caps, then we need to check whether
			 * the file is either opened or mmaped
			 */
Y
Yan, Zheng 已提交
633 634
			if ((issued & (CEPH_CAP_FILE_CACHE|
				       CEPH_CAP_FILE_BUFFER)) ||
635 636
			    mapping_mapped(inode->i_mapping) ||
			    __ceph_caps_file_wanted(ci)) {
S
Sage Weil 已提交
637 638 639 640 641 642 643 644 645 646 647
				ci->i_truncate_pending++;
				queue_trunc = 1;
			}
		}
	}
	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
	    ci->i_truncate_size != truncate_size) {
		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
		     truncate_size);
		ci->i_truncate_size = truncate_size;
	}
648 649 650 651

	if (queue_trunc)
		ceph_fscache_invalidate(inode);

S
Sage Weil 已提交
652 653 654 655 656 657 658 659 660 661 662 663
	return queue_trunc;
}

void ceph_fill_file_time(struct inode *inode, int issued,
			 u64 time_warp_seq, struct timespec *ctime,
			 struct timespec *mtime, struct timespec *atime)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	int warn = 0;

	if (issued & (CEPH_CAP_FILE_EXCL|
		      CEPH_CAP_FILE_WR|
S
Sage Weil 已提交
664 665 666
		      CEPH_CAP_FILE_BUFFER|
		      CEPH_CAP_AUTH_EXCL|
		      CEPH_CAP_XATTR_EXCL)) {
S
Sage Weil 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
		if (timespec_compare(ctime, &inode->i_ctime) > 0) {
			dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
			     ctime->tv_sec, ctime->tv_nsec);
			inode->i_ctime = *ctime;
		}
		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
			/* the MDS did a utimes() */
			dout("mtime %ld.%09ld -> %ld.%09ld "
			     "tw %d -> %d\n",
			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
			     mtime->tv_sec, mtime->tv_nsec,
			     ci->i_time_warp_seq, (int)time_warp_seq);

			inode->i_mtime = *mtime;
			inode->i_atime = *atime;
			ci->i_time_warp_seq = time_warp_seq;
		} else if (time_warp_seq == ci->i_time_warp_seq) {
			/* nobody did utimes(); take the max */
			if (timespec_compare(mtime, &inode->i_mtime) > 0) {
				dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
				     inode->i_mtime.tv_sec,
				     inode->i_mtime.tv_nsec,
				     mtime->tv_sec, mtime->tv_nsec);
				inode->i_mtime = *mtime;
			}
			if (timespec_compare(atime, &inode->i_atime) > 0) {
				dout("atime %ld.%09ld -> %ld.%09ld inc\n",
				     inode->i_atime.tv_sec,
				     inode->i_atime.tv_nsec,
				     atime->tv_sec, atime->tv_nsec);
				inode->i_atime = *atime;
			}
		} else if (issued & CEPH_CAP_FILE_EXCL) {
			/* we did a utimes(); ignore mds values */
		} else {
			warn = 1;
		}
	} else {
S
Sage Weil 已提交
706
		/* we have no write|excl caps; whatever the MDS says is true */
S
Sage Weil 已提交
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
			inode->i_ctime = *ctime;
			inode->i_mtime = *mtime;
			inode->i_atime = *atime;
			ci->i_time_warp_seq = time_warp_seq;
		} else {
			warn = 1;
		}
	}
	if (warn) /* time_warp_seq shouldn't go backwards */
		dout("%p mds time_warp_seq %llu < %u\n",
		     inode, time_warp_seq, ci->i_time_warp_seq);
}

/*
 * Populate an inode based on info from mds.  May be called on new or
 * existing inodes.
 */
725
static int fill_inode(struct inode *inode, struct page *locked_page,
S
Sage Weil 已提交
726 727 728 729 730 731
		      struct ceph_mds_reply_info_in *iinfo,
		      struct ceph_mds_reply_dirfrag *dirinfo,
		      struct ceph_mds_session *session,
		      unsigned long ttl_from, int cap_fmode,
		      struct ceph_cap_reservation *caps_reservation)
{
732
	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
S
Sage Weil 已提交
733 734
	struct ceph_mds_reply_inode *info = iinfo->in;
	struct ceph_inode_info *ci = ceph_inode(inode);
735
	int issued = 0, implemented, new_issued;
S
Sage Weil 已提交
736 737
	struct timespec mtime, atime, ctime;
	struct ceph_buffer *xattr_blob = NULL;
Y
Yan, Zheng 已提交
738
	struct ceph_string *pool_ns = NULL;
739
	struct ceph_cap *new_cap = NULL;
S
Sage Weil 已提交
740
	int err = 0;
741
	bool wake = false;
742 743
	bool queue_trunc = false;
	bool new_version = false;
Y
Yan, Zheng 已提交
744
	bool fill_inline = false;
S
Sage Weil 已提交
745 746 747 748 749

	dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
	     ci->i_version);

750 751 752 753
	/* prealloc new cap struct */
	if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
		new_cap = ceph_get_cap(mdsc, caps_reservation);

S
Sage Weil 已提交
754 755 756 757 758 759
	/*
	 * prealloc xattr data, if it looks like we'll need it.  only
	 * if len > 4 (meaning there are actually xattrs; the first 4
	 * bytes are the xattr count).
	 */
	if (iinfo->xattr_len > 4) {
S
Sage Weil 已提交
760
		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
S
Sage Weil 已提交
761 762 763 764 765
		if (!xattr_blob)
			pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
			       iinfo->xattr_len);
	}

Y
Yan, Zheng 已提交
766 767 768 769
	if (iinfo->pool_ns_len > 0)
		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
						     iinfo->pool_ns_len);

770
	spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
771 772 773

	/*
	 * provided version will be odd if inode value is projected,
774 775 776 777 778 779 780 781
	 * even if stable.  skip the update if we have newer stable
	 * info (ours>=theirs, e.g. due to racing mds replies), unless
	 * we are getting projected (unstable) info (in which case the
	 * version is odd, and we want ours>theirs).
	 *   us   them
	 *   2    2     skip
	 *   3    2     skip
	 *   3    3     update
S
Sage Weil 已提交
782
	 */
783 784 785 786 787
	if (ci->i_version == 0 ||
	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
		new_version = true;

S
Sage Weil 已提交
788 789
	issued = __ceph_caps_issued(ci, &implemented);
	issued |= implemented | __ceph_caps_dirty(ci);
790
	new_issued = ~issued & le32_to_cpu(info->cap.caps);
S
Sage Weil 已提交
791 792 793 794 795

	/* update inode */
	ci->i_version = le64_to_cpu(info->version);
	inode->i_version++;
	inode->i_rdev = le32_to_cpu(info->rdev);
796
	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
S
Sage Weil 已提交
797

798 799
	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
S
Sage Weil 已提交
800
		inode->i_mode = le32_to_cpu(info->mode);
801 802
		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
S
Sage Weil 已提交
803
		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
804 805
		     from_kuid(&init_user_ns, inode->i_uid),
		     from_kgid(&init_user_ns, inode->i_gid));
S
Sage Weil 已提交
806 807
	}

808 809
	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
	    (issued & CEPH_CAP_LINK_EXCL) == 0)
M
Miklos Szeredi 已提交
810
		set_nlink(inode, le32_to_cpu(info->nlink));
S
Sage Weil 已提交
811

812 813 814 815 816 817 818 819 820 821 822 823
	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
		/* be careful with mtime, atime, size */
		ceph_decode_timespec(&atime, &info->atime);
		ceph_decode_timespec(&mtime, &info->mtime);
		ceph_decode_timespec(&ctime, &info->ctime);
		ceph_fill_file_time(inode, issued,
				le32_to_cpu(info->time_warp_seq),
				&ctime, &mtime, &atime);
	}

	if (new_version ||
	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
824
		s64 old_pool = ci->i_layout.pool_id;
Y
Yan, Zheng 已提交
825 826
		struct ceph_string *old_ns;

827
		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
Y
Yan, Zheng 已提交
828 829 830 831 832
		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
					lockdep_is_held(&ci->i_ceph_lock));
		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);

		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
833
			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
834

Y
Yan, Zheng 已提交
835 836
		pool_ns = old_ns;

837 838 839 840 841 842 843 844 845 846 847 848
		queue_trunc = ceph_fill_file_size(inode, issued,
					le32_to_cpu(info->truncate_seq),
					le64_to_cpu(info->truncate_size),
					le64_to_cpu(info->size));
		/* only update max_size on auth cap */
		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
		    ci->i_max_size != le64_to_cpu(info->max_size)) {
			dout("max_size %lld -> %llu\n", ci->i_max_size,
					le64_to_cpu(info->max_size));
			ci->i_max_size = le64_to_cpu(info->max_size);
		}
	}
S
Sage Weil 已提交
849 850 851

	/* xattrs */
	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
852
	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
S
Sage Weil 已提交
853 854 855 856 857 858 859 860
	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
		if (ci->i_xattrs.blob)
			ceph_buffer_put(ci->i_xattrs.blob);
		ci->i_xattrs.blob = xattr_blob;
		if (xattr_blob)
			memcpy(ci->i_xattrs.blob->vec.iov_base,
			       iinfo->xattr_data, iinfo->xattr_len);
		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
G
Guangliang Zhao 已提交
861
		ceph_forget_all_cached_acls(inode);
862
		xattr_blob = NULL;
S
Sage Weil 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
	}

	inode->i_mapping->a_ops = &ceph_aops;

	switch (inode->i_mode & S_IFMT) {
	case S_IFIFO:
	case S_IFBLK:
	case S_IFCHR:
	case S_IFSOCK:
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
		inode->i_op = &ceph_file_iops;
		break;
	case S_IFREG:
		inode->i_op = &ceph_file_iops;
		inode->i_fop = &ceph_file_fops;
		break;
	case S_IFLNK:
		inode->i_op = &ceph_symlink_iops;
		if (!ci->i_symlink) {
882
			u32 symlen = iinfo->symlink_len;
S
Sage Weil 已提交
883 884
			char *sym;

885
			spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
886

887 888 889 890 891 892 893
			if (symlen != i_size_read(inode)) {
				pr_err("fill_inode %llx.%llx BAD symlink "
					"size %lld\n", ceph_vinop(inode),
					i_size_read(inode));
				i_size_write(inode, symlen);
				inode->i_blocks = calc_inode_blocks(symlen);
			}
894

S
Sage Weil 已提交
895
			err = -ENOMEM;
896
			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
S
Sage Weil 已提交
897 898 899
			if (!sym)
				goto out;

900
			spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
901 902 903 904 905
			if (!ci->i_symlink)
				ci->i_symlink = sym;
			else
				kfree(sym); /* lost a race */
		}
A
Al Viro 已提交
906
		inode->i_link = ci->i_symlink;
S
Sage Weil 已提交
907 908 909 910 911
		break;
	case S_IFDIR:
		inode->i_op = &ceph_dir_iops;
		inode->i_fop = &ceph_dir_fops;

912 913
		ci->i_dir_layout = iinfo->dir_layout;

S
Sage Weil 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
		ci->i_files = le64_to_cpu(info->files);
		ci->i_subdirs = le64_to_cpu(info->subdirs);
		ci->i_rbytes = le64_to_cpu(info->rbytes);
		ci->i_rfiles = le64_to_cpu(info->rfiles);
		ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
		ceph_decode_timespec(&ci->i_rctime, &info->rctime);
		break;
	default:
		pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
		       ceph_vinop(inode), inode->i_mode);
	}

	/* were we issued a capability? */
	if (info->cap.caps) {
		if (ceph_snap(inode) == CEPH_NOSNAP) {
929
			unsigned caps = le32_to_cpu(info->cap.caps);
S
Sage Weil 已提交
930 931
			ceph_add_cap(inode, session,
				     le64_to_cpu(info->cap.cap_id),
932
				     cap_fmode, caps,
S
Sage Weil 已提交
933 934 935 936
				     le32_to_cpu(info->cap.wanted),
				     le32_to_cpu(info->cap.seq),
				     le32_to_cpu(info->cap.mseq),
				     le64_to_cpu(info->cap.realm),
937
				     info->cap.flags, &new_cap);
938 939 940 941 942 943 944 945

			/* set dir completion flag? */
			if (S_ISDIR(inode->i_mode) &&
			    ci->i_files == 0 && ci->i_subdirs == 0 &&
			    (caps & CEPH_CAP_FILE_SHARED) &&
			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
			    !__ceph_dir_is_complete(ci)) {
				dout(" marking %p complete (empty)\n", inode);
Y
Yan, Zheng 已提交
946
				i_size_write(inode, 0);
947
				__ceph_dir_set_complete(ci,
Y
Yan, Zheng 已提交
948 949
					atomic64_read(&ci->i_release_count),
					atomic64_read(&ci->i_ordered_count));
950 951
			}

952
			wake = true;
S
Sage Weil 已提交
953 954 955 956 957 958 959
		} else {
			dout(" %p got snap_caps %s\n", inode,
			     ceph_cap_string(le32_to_cpu(info->cap.caps)));
			ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
			if (cap_fmode >= 0)
				__ceph_get_fmode(ci, cap_fmode);
		}
960
	} else if (cap_fmode >= 0) {
961
		pr_warn("mds issued no caps on %llx.%llx\n",
962 963
			   ceph_vinop(inode));
		__ceph_get_fmode(ci, cap_fmode);
S
Sage Weil 已提交
964
	}
Y
Yan, Zheng 已提交
965 966 967 968 969 970

	if (iinfo->inline_version > 0 &&
	    iinfo->inline_version >= ci->i_inline_version) {
		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
		ci->i_inline_version = iinfo->inline_version;
		if (ci->i_inline_version != CEPH_INLINE_NONE &&
971 972
		    (locked_page ||
		     (le32_to_cpu(info->cap.caps) & cache_caps)))
Y
Yan, Zheng 已提交
973 974 975
			fill_inline = true;
	}

976
	spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
977

Y
Yan, Zheng 已提交
978
	if (fill_inline)
979
		ceph_fill_inline_data(inode, locked_page,
Y
Yan, Zheng 已提交
980 981
				      iinfo->inline_data, iinfo->inline_len);

982 983 984
	if (wake)
		wake_up_all(&ci->i_cap_wq);

S
Sage Weil 已提交
985 986
	/* queue truncate if we saw i_size decrease */
	if (queue_trunc)
987
		ceph_queue_vmtruncate(inode);
S
Sage Weil 已提交
988 989

	/* populate frag tree */
990 991
	if (S_ISDIR(inode->i_mode))
		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
S
Sage Weil 已提交
992 993 994 995 996 997 998

	/* update delegation info? */
	if (dirinfo)
		ceph_fill_dirfrag(inode, dirinfo);

	err = 0;
out:
999 1000
	if (new_cap)
		ceph_put_cap(mdsc, new_cap);
S
Sage Weil 已提交
1001 1002
	if (xattr_blob)
		ceph_buffer_put(xattr_blob);
Y
Yan, Zheng 已提交
1003
	ceph_put_string(pool_ns);
S
Sage Weil 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	return err;
}

/*
 * caller should hold session s_mutex.
 */
static void update_dentry_lease(struct dentry *dentry,
				struct ceph_mds_reply_lease *lease,
				struct ceph_mds_session *session,
				unsigned long from_time)
{
	struct ceph_dentry_info *di = ceph_dentry(dentry);
	long unsigned duration = le32_to_cpu(lease->duration_ms);
	long unsigned ttl = from_time + (duration * HZ) / 1000;
	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
	struct inode *dir;

	/* only track leases on regular dentries */
	if (dentry->d_op != &ceph_dentry_ops)
		return;

	spin_lock(&dentry->d_lock);
S
Sage Weil 已提交
1026 1027
	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
	     dentry, duration, ttl);
S
Sage Weil 已提交
1028 1029

	/* make lease_rdcache_gen match directory */
1030
	dir = d_inode(dentry->d_parent);
S
Sage Weil 已提交
1031 1032
	di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;

S
Sage Weil 已提交
1033
	if (duration == 0)
S
Sage Weil 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		goto out_unlock;

	if (di->lease_gen == session->s_cap_gen &&
	    time_before(ttl, dentry->d_time))
		goto out_unlock;  /* we already have a newer lease. */

	if (di->lease_session && di->lease_session != session)
		goto out_unlock;

	ceph_dentry_lru_touch(dentry);

	if (!di->lease_session)
		di->lease_session = ceph_get_mds_session(session);
	di->lease_gen = session->s_cap_gen;
	di->lease_seq = le32_to_cpu(lease->seq);
	di->lease_renew_after = half_ttl;
	di->lease_renew_from = 0;
	dentry->d_time = ttl;
out_unlock:
	spin_unlock(&dentry->d_lock);
	return;
}

/*
 * splice a dentry to an inode.
 * caller must hold directory i_mutex for this to be safe.
 */
1061
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
S
Sage Weil 已提交
1062 1063 1064
{
	struct dentry *realdn;

1065
	BUG_ON(d_inode(dn));
S
Sage Weil 已提交
1066

S
Sage Weil 已提交
1067 1068 1069
	/* dn must be unhashed */
	if (!d_unhashed(dn))
		d_drop(dn);
1070
	realdn = d_splice_alias(in, dn);
S
Sage Weil 已提交
1071
	if (IS_ERR(realdn)) {
1072 1073
		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
S
Sage Weil 已提交
1074 1075 1076 1077 1078
		dn = realdn; /* note realdn contains the error */
		goto out;
	} else if (realdn) {
		dout("dn %p (%d) spliced with %p (%d) "
		     "inode %p ino %llx.%llx\n",
A
Al Viro 已提交
1079 1080
		     dn, d_count(dn),
		     realdn, d_count(realdn),
1081
		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
S
Sage Weil 已提交
1082 1083 1084 1085 1086
		dput(dn);
		dn = realdn;
	} else {
		BUG_ON(!ceph_dentry(dn));
		dout("dn %p attached to %p ino %llx.%llx\n",
1087
		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
S
Sage Weil 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	}
out:
	return dn;
}

/*
 * Incorporate results into the local cache.  This is either just
 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
 * after a lookup).
 *
 * A reply may contain
 *         a directory inode along with a dentry.
 *  and/or a target inode
 *
 * Called with snap_rwsem (read).
 */
int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
		    struct ceph_mds_session *session)
{
	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
	struct inode *in = NULL;
	struct ceph_vino vino;
1110
	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
S
Sage Weil 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	int err = 0;

	dout("fill_trace %p is_dentry %d is_target %d\n", req,
	     rinfo->head->is_dentry, rinfo->head->is_target);

#if 0
	/*
	 * Debugging hook:
	 *
	 * If we resend completed ops to a recovering mds, we get no
	 * trace.  Since that is very rare, pretend this is the case
	 * to ensure the 'no trace' handlers in the callers behave.
	 *
	 * Fill in inodes unconditionally to avoid breaking cap
	 * invariants.
	 */
	if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
		pr_info("fill_trace faking empty trace on %lld %s\n",
			req->r_tid, ceph_mds_op_name(rinfo->head->op));
		if (rinfo->head->is_dentry) {
			rinfo->head->is_dentry = 0;
			err = fill_inode(req->r_locked_dir,
					 &rinfo->diri, rinfo->dirfrag,
					 session, req->r_request_started, -1);
		}
		if (rinfo->head->is_target) {
			rinfo->head->is_target = 0;
			ininfo = rinfo->targeti.in;
			vino.ino = le64_to_cpu(ininfo->ino);
			vino.snap = le64_to_cpu(ininfo->snapid);
			in = ceph_get_inode(sb, vino);
			err = fill_inode(in, &rinfo->targeti, NULL,
					 session, req->r_request_started,
					 req->r_fmode);
			iput(in);
		}
	}
#endif

	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
		dout("fill_trace reply is empty!\n");
1152 1153
		if (rinfo->head->result == 0 && req->r_locked_dir)
			ceph_invalidate_dir_request(req);
S
Sage Weil 已提交
1154 1155 1156 1157
		return 0;
	}

	if (rinfo->head->is_dentry) {
1158 1159
		struct inode *dir = req->r_locked_dir;

1160
		if (dir) {
1161 1162
			err = fill_inode(dir, NULL,
					 &rinfo->diri, rinfo->dirfrag,
1163 1164 1165
					 session, req->r_request_started, -1,
					 &req->r_caps_reservation);
			if (err < 0)
1166
				goto done;
1167 1168 1169
		} else {
			WARN_ON_ONCE(1);
		}
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
			struct qstr dname;
			struct dentry *dn, *parent;

			BUG_ON(!rinfo->head->is_target);
			BUG_ON(req->r_dentry);

			parent = d_find_any_alias(dir);
			BUG_ON(!parent);

			dname.name = rinfo->dname;
			dname.len = rinfo->dname_len;
			dname.hash = full_name_hash(dname.name, dname.len);
			vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
			vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
retry_lookup:
			dn = d_lookup(parent, &dname);
			dout("d_lookup on parent=%p name=%.*s got %p\n",
			     parent, dname.len, dname.name, dn);

			if (!dn) {
				dn = d_alloc(parent, &dname);
				dout("d_alloc %p '%.*s' = %p\n", parent,
				     dname.len, dname.name, dn);
				if (dn == NULL) {
					dput(parent);
					err = -ENOMEM;
					goto done;
				}
				err = ceph_init_dentry(dn);
				if (err < 0) {
					dput(dn);
					dput(parent);
					goto done;
				}
1206 1207 1208
			} else if (d_really_is_positive(dn) &&
				   (ceph_ino(d_inode(dn)) != vino.ino ||
				    ceph_snap(d_inode(dn)) != vino.snap)) {
1209
				dout(" dn %p points to wrong inode %p\n",
1210
				     dn, d_inode(dn));
1211 1212 1213 1214 1215 1216 1217 1218
				d_delete(dn);
				dput(dn);
				goto retry_lookup;
			}

			req->r_dentry = dn;
			dput(parent);
		}
1219 1220
	}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
	if (rinfo->head->is_target) {
		vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
		vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);

		in = ceph_get_inode(sb, vino);
		if (IS_ERR(in)) {
			err = PTR_ERR(in);
			goto done;
		}
		req->r_target_inode = in;

1232
		err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
1233
				session, req->r_request_started,
1234
				(!req->r_aborted && rinfo->head->result == 0) ?
1235 1236 1237 1238 1239 1240 1241 1242 1243
				req->r_fmode : -1,
				&req->r_caps_reservation);
		if (err < 0) {
			pr_err("fill_inode badness %p %llx.%llx\n",
				in, ceph_vinop(in));
			goto done;
		}
	}

1244 1245 1246 1247 1248
	/*
	 * ignore null lease/binding on snapdir ENOENT, or else we
	 * will have trouble splicing in the virtual snapdir later
	 */
	if (rinfo->head->is_dentry && !req->r_aborted &&
1249
	    req->r_locked_dir &&
1250
	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1251
					       fsc->mount_options->snapdir_name,
1252
					       req->r_dentry->d_name.len))) {
S
Sage Weil 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
		/*
		 * lookup link rename   : null -> possibly existing inode
		 * mknod symlink mkdir  : null -> new inode
		 * unlink               : linked -> null
		 */
		struct inode *dir = req->r_locked_dir;
		struct dentry *dn = req->r_dentry;
		bool have_dir_cap, have_lease;

		BUG_ON(!dn);
		BUG_ON(!dir);
1264
		BUG_ON(d_inode(dn->d_parent) != dir);
S
Sage Weil 已提交
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
		BUG_ON(ceph_ino(dir) !=
		       le64_to_cpu(rinfo->diri.in->ino));
		BUG_ON(ceph_snap(dir) !=
		       le64_to_cpu(rinfo->diri.in->snapid));

		/* do we have a lease on the whole dir? */
		have_dir_cap =
			(le32_to_cpu(rinfo->diri.in->cap.caps) &
			 CEPH_CAP_FILE_SHARED);

		/* do we have a dn lease? */
		have_lease = have_dir_cap ||
S
Sage Weil 已提交
1277
			le32_to_cpu(rinfo->dlease->duration_ms);
S
Sage Weil 已提交
1278 1279 1280 1281 1282
		if (!have_lease)
			dout("fill_trace  no dentry lease or dir cap\n");

		/* rename? */
		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1283 1284 1285
			struct inode *olddir = req->r_old_dentry_dir;
			BUG_ON(!olddir);

A
Al Viro 已提交
1286
			dout(" src %p '%pd' dst %p '%pd'\n",
S
Sage Weil 已提交
1287
			     req->r_old_dentry,
A
Al Viro 已提交
1288 1289
			     req->r_old_dentry,
			     dn, dn);
S
Sage Weil 已提交
1290 1291
			dout("fill_trace doing d_move %p -> %p\n",
			     req->r_old_dentry, dn);
S
Sage Weil 已提交
1292

Y
Yan, Zheng 已提交
1293 1294 1295 1296
			/* d_move screws up sibling dentries' offsets */
			ceph_dir_clear_ordered(dir);
			ceph_dir_clear_ordered(olddir);

S
Sage Weil 已提交
1297
			d_move(req->r_old_dentry, dn);
A
Al Viro 已提交
1298 1299
			dout(" src %p '%pd' dst %p '%pd'\n",
			     req->r_old_dentry,
S
Sage Weil 已提交
1300
			     req->r_old_dentry,
A
Al Viro 已提交
1301
			     dn, dn);
1302

1303 1304
			/* ensure target dentry is invalidated, despite
			   rehashing bug in vfs_rename_dir */
1305 1306
			ceph_invalidate_dentry_lease(dn);

1307
			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
S
Sage Weil 已提交
1308
			     ceph_dentry(req->r_old_dentry)->offset);
1309

S
Sage Weil 已提交
1310 1311 1312 1313 1314 1315
			dn = req->r_old_dentry;  /* use old_dentry */
		}

		/* null dentry? */
		if (!rinfo->head->is_target) {
			dout("fill_trace null dentry\n");
1316
			if (d_really_is_positive(dn)) {
1317
				ceph_dir_clear_ordered(dir);
S
Sage Weil 已提交
1318 1319 1320 1321
				dout("d_delete %p\n", dn);
				d_delete(dn);
			} else {
				if (have_lease && d_unhashed(dn))
1322
					d_add(dn, NULL);
S
Sage Weil 已提交
1323 1324 1325 1326 1327 1328 1329 1330
				update_dentry_lease(dn, rinfo->dlease,
						    session,
						    req->r_request_started);
			}
			goto done;
		}

		/* attach proper inode */
1331
		if (d_really_is_negative(dn)) {
1332
			ceph_dir_clear_ordered(dir);
1333
			ihold(in);
1334
			dn = splice_dentry(dn, in);
S
Sage Weil 已提交
1335 1336 1337 1338 1339
			if (IS_ERR(dn)) {
				err = PTR_ERR(dn);
				goto done;
			}
			req->r_dentry = dn;  /* may have spliced */
1340
		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
S
Sage Weil 已提交
1341
			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1342
			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1343
			     ceph_vinop(in));
1344
			d_invalidate(dn);
S
Sage Weil 已提交
1345 1346 1347 1348 1349 1350 1351
			have_lease = false;
		}

		if (have_lease)
			update_dentry_lease(dn, rinfo->dlease, session,
					    req->r_request_started);
		dout(" final dn %p\n", dn);
1352 1353 1354
	} else if (!req->r_aborted &&
		   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
		    req->r_op == CEPH_MDS_OP_MKSNAP)) {
S
Sage Weil 已提交
1355
		struct dentry *dn = req->r_dentry;
1356
		struct inode *dir = req->r_locked_dir;
S
Sage Weil 已提交
1357 1358 1359

		/* fill out a snapdir LOOKUPSNAP dentry */
		BUG_ON(!dn);
1360 1361
		BUG_ON(!dir);
		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
S
Sage Weil 已提交
1362
		dout(" linking snapped dir %p to dn %p\n", in, dn);
1363
		ceph_dir_clear_ordered(dir);
1364
		ihold(in);
1365
		dn = splice_dentry(dn, in);
S
Sage Weil 已提交
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
		if (IS_ERR(dn)) {
			err = PTR_ERR(dn);
			goto done;
		}
		req->r_dentry = dn;  /* may have spliced */
	}
done:
	dout("fill_trace done err=%d\n", err);
	return err;
}

/*
 * Prepopulate our cache with readdir results, leases, etc.
 */
1380 1381 1382 1383 1384 1385 1386
static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
					   struct ceph_mds_session *session)
{
	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
	int i, err = 0;

	for (i = 0; i < rinfo->dir_nr; i++) {
1387
		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1388 1389 1390 1391
		struct ceph_vino vino;
		struct inode *in;
		int rc;

1392 1393
		vino.ino = le64_to_cpu(rde->inode.in->ino);
		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1394 1395 1396 1397 1398 1399 1400

		in = ceph_get_inode(req->r_dentry->d_sb, vino);
		if (IS_ERR(in)) {
			err = PTR_ERR(in);
			dout("new_inode badness got %d\n", err);
			continue;
		}
1401
		rc = fill_inode(in, NULL, &rde->inode, NULL, session,
1402 1403 1404 1405 1406 1407
				req->r_request_started, -1,
				&req->r_caps_reservation);
		if (rc < 0) {
			pr_err("fill_inode badness on %p got %d\n", in, rc);
			err = rc;
		}
Y
Yan, Zheng 已提交
1408
		iput(in);
1409 1410 1411 1412 1413
	}

	return err;
}

Y
Yan, Zheng 已提交
1414 1415 1416 1417
void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
{
	if (ctl->page) {
		kunmap(ctl->page);
1418
		put_page(ctl->page);
Y
Yan, Zheng 已提交
1419 1420 1421 1422 1423 1424 1425 1426 1427
		ctl->page = NULL;
	}
}

static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
			      struct ceph_readdir_cache_control *ctl,
			      struct ceph_mds_request *req)
{
	struct ceph_inode_info *ci = ceph_inode(dir);
1428
	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
Y
Yan, Zheng 已提交
1429 1430 1431 1432 1433
	unsigned idx = ctl->index % nsize;
	pgoff_t pgoff = ctl->index / nsize;

	if (!ctl->page || pgoff != page_index(ctl->page)) {
		ceph_readdir_cache_release(ctl);
1434 1435 1436 1437
		if (idx == 0)
			ctl->page = grab_cache_page(&dir->i_data, pgoff);
		else
			ctl->page = find_lock_page(&dir->i_data, pgoff);
Y
Yan, Zheng 已提交
1438 1439
		if (!ctl->page) {
			ctl->index = -1;
1440
			return idx == 0 ? -ENOMEM : 0;
Y
Yan, Zheng 已提交
1441 1442 1443 1444 1445
		}
		/* reading/filling the cache are serialized by
		 * i_mutex, no need to use page lock */
		unlock_page(ctl->page);
		ctl->dentries = kmap(ctl->page);
1446
		if (idx == 0)
1447
			memset(ctl->dentries, 0, PAGE_SIZE);
Y
Yan, Zheng 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	}

	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
		ctl->dentries[idx] = dn;
		ctl->index++;
	} else {
		dout("disable readdir cache\n");
		ctl->index = -1;
	}
	return 0;
}

S
Sage Weil 已提交
1462 1463 1464 1465
int ceph_readdir_prepopulate(struct ceph_mds_request *req,
			     struct ceph_mds_session *session)
{
	struct dentry *parent = req->r_dentry;
1466
	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
S
Sage Weil 已提交
1467 1468 1469 1470
	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
	struct qstr dname;
	struct dentry *dn;
	struct inode *in;
Y
Yan, Zheng 已提交
1471
	int err = 0, skipped = 0, ret, i;
S
Sage Weil 已提交
1472 1473
	struct inode *snapdir = NULL;
	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1474
	u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1475 1476
	u32 last_hash = 0;
	u32 fpos_offset;
Y
Yan, Zheng 已提交
1477 1478 1479 1480
	struct ceph_readdir_cache_control cache_ctl = {};

	if (req->r_aborted)
		return readdir_prepopulate_inodes_only(req, session);
1481

1482 1483 1484 1485 1486 1487
	if (rinfo->hash_order && req->r_path2) {
		last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
					  req->r_path2, strlen(req->r_path2));
		last_hash = ceph_frag_value(last_hash);
	}

1488 1489 1490 1491 1492
	if (rinfo->dir_dir &&
	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
		dout("readdir_prepopulate got new frag %x -> %x\n",
		     frag, le32_to_cpu(rinfo->dir_dir->frag));
		frag = le32_to_cpu(rinfo->dir_dir->frag);
1493
		if (!rinfo->hash_order)
Y
Yan, Zheng 已提交
1494
			req->r_readdir_offset = 2;
1495
	}
S
Sage Weil 已提交
1496 1497

	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1498
		snapdir = ceph_get_snapdir(d_inode(parent));
S
Sage Weil 已提交
1499 1500 1501 1502 1503 1504 1505
		parent = d_find_alias(snapdir);
		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
		     rinfo->dir_nr, parent);
	} else {
		dout("readdir_prepopulate %d items under dn %p\n",
		     rinfo->dir_nr, parent);
		if (rinfo->dir_dir)
1506
			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
S
Sage Weil 已提交
1507 1508
	}

Y
Yan, Zheng 已提交
1509 1510 1511 1512 1513 1514 1515 1516 1517
	if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
		/* note dir version at start of readdir so we can tell
		 * if any dentries get dropped */
		req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
		req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
		req->r_readdir_cache_idx = 0;
	}

	cache_ctl.index = req->r_readdir_cache_idx;
1518
	fpos_offset = req->r_readdir_offset;
Y
Yan, Zheng 已提交
1519

1520
	/* FIXME: release caps/leases if error occurs */
S
Sage Weil 已提交
1521
	for (i = 0; i < rinfo->dir_nr; i++) {
1522
		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
S
Sage Weil 已提交
1523 1524
		struct ceph_vino vino;

1525 1526
		dname.name = rde->name;
		dname.len = rde->name_len;
S
Sage Weil 已提交
1527 1528
		dname.hash = full_name_hash(dname.name, dname.len);

1529 1530
		vino.ino = le64_to_cpu(rde->inode.in->ino);
		vino.snap = le64_to_cpu(rde->inode.in->snapid);
S
Sage Weil 已提交
1531

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
		if (rinfo->hash_order) {
			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
						 rde->name, rde->name_len);
			hash = ceph_frag_value(hash);
			if (hash != last_hash)
				fpos_offset = 2;
			last_hash = hash;
			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
		} else {
			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
		}
S
Sage Weil 已提交
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

retry_lookup:
		dn = d_lookup(parent, &dname);
		dout("d_lookup on parent=%p name=%.*s got %p\n",
		     parent, dname.len, dname.name, dn);

		if (!dn) {
			dn = d_alloc(parent, &dname);
			dout("d_alloc %p '%.*s' = %p\n", parent,
			     dname.len, dname.name, dn);
			if (dn == NULL) {
				dout("d_alloc badness\n");
				err = -ENOMEM;
				goto out;
			}
1558 1559
			ret = ceph_init_dentry(dn);
			if (ret < 0) {
1560
				dput(dn);
1561
				err = ret;
S
Sage Weil 已提交
1562
				goto out;
1563
			}
1564 1565 1566
		} else if (d_really_is_positive(dn) &&
			   (ceph_ino(d_inode(dn)) != vino.ino ||
			    ceph_snap(d_inode(dn)) != vino.snap)) {
S
Sage Weil 已提交
1567
			dout(" dn %p points to wrong inode %p\n",
1568
			     dn, d_inode(dn));
S
Sage Weil 已提交
1569 1570 1571 1572 1573 1574
			d_delete(dn);
			dput(dn);
			goto retry_lookup;
		}

		/* inode */
1575 1576
		if (d_really_is_positive(dn)) {
			in = d_inode(dn);
S
Sage Weil 已提交
1577 1578
		} else {
			in = ceph_get_inode(parent->d_sb, vino);
1579
			if (IS_ERR(in)) {
S
Sage Weil 已提交
1580
				dout("new_inode badness\n");
1581
				d_drop(dn);
S
Sage Weil 已提交
1582
				dput(dn);
1583
				err = PTR_ERR(in);
S
Sage Weil 已提交
1584 1585 1586 1587
				goto out;
			}
		}

1588
		ret = fill_inode(in, NULL, &rde->inode, NULL, session,
Y
Yan, Zheng 已提交
1589 1590 1591
				 req->r_request_started, -1,
				 &req->r_caps_reservation);
		if (ret < 0) {
S
Sage Weil 已提交
1592
			pr_err("fill_inode badness on %p\n", in);
1593
			if (d_really_is_negative(dn))
1594 1595
				iput(in);
			d_drop(dn);
Y
Yan, Zheng 已提交
1596
			err = ret;
1597
			goto next_item;
S
Sage Weil 已提交
1598
		}
1599

1600
		if (d_really_is_negative(dn)) {
Y
Yan, Zheng 已提交
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
			struct dentry *realdn;

			if (ceph_security_xattr_deadlock(in)) {
				dout(" skip splicing dn %p to inode %p"
				     " (security xattr deadlock)\n", dn, in);
				iput(in);
				skipped++;
				goto next_item;
			}

			realdn = splice_dentry(dn, in);
Y
Yan, Zheng 已提交
1612 1613 1614
			if (IS_ERR(realdn)) {
				err = PTR_ERR(realdn);
				d_drop(dn);
1615 1616 1617
				dn = NULL;
				goto next_item;
			}
Y
Yan, Zheng 已提交
1618
			dn = realdn;
1619 1620
		}

1621
		ceph_dentry(dn)->offset = rde->offset;
1622

1623
		update_dentry_lease(dn, rde->lease, req->r_session,
1624
				    req->r_request_started);
Y
Yan, Zheng 已提交
1625

Y
Yan, Zheng 已提交
1626
		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
Y
Yan, Zheng 已提交
1627 1628 1629 1630 1631
			ret = fill_readdir_cache(d_inode(parent), dn,
						 &cache_ctl, req);
			if (ret < 0)
				err = ret;
		}
1632 1633 1634
next_item:
		if (dn)
			dput(dn);
S
Sage Weil 已提交
1635 1636
	}
out:
Y
Yan, Zheng 已提交
1637
	if (err == 0 && skipped == 0) {
Y
Yan, Zheng 已提交
1638 1639 1640 1641
		req->r_did_prepopulate = true;
		req->r_readdir_cache_idx = cache_ctl.index;
	}
	ceph_readdir_cache_release(&cache_ctl);
S
Sage Weil 已提交
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	if (snapdir) {
		iput(snapdir);
		dput(parent);
	}
	dout("readdir_prepopulate done\n");
	return err;
}

int ceph_inode_set_size(struct inode *inode, loff_t size)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	int ret = 0;

1655
	spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1656
	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1657
	i_size_write(inode, size);
1658
	inode->i_blocks = calc_inode_blocks(size);
S
Sage Weil 已提交
1659 1660 1661 1662 1663 1664

	/* tell the MDS if we are approaching max_size */
	if ((size << 1) >= ci->i_max_size &&
	    (ci->i_reported_size << 1) < ci->i_max_size)
		ret = 1;

1665
	spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1666 1667 1668 1669 1670 1671 1672
	return ret;
}

/*
 * Write back inode data in a worker thread.  (This can't be done
 * in the message handler context.)
 */
1673 1674
void ceph_queue_writeback(struct inode *inode)
{
1675
	ihold(inode);
1676 1677
	if (queue_work(ceph_inode_to_client(inode)->wb_wq,
		       &ceph_inode(inode)->i_wb_work)) {
1678
		dout("ceph_queue_writeback %p\n", inode);
1679
	} else {
1680
		dout("ceph_queue_writeback %p failed\n", inode);
1681
		iput(inode);
1682 1683 1684 1685
	}
}

static void ceph_writeback_work(struct work_struct *work)
S
Sage Weil 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
{
	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
						  i_wb_work);
	struct inode *inode = &ci->vfs_inode;

	dout("writeback %p\n", inode);
	filemap_fdatawrite(&inode->i_data);
	iput(inode);
}

1696 1697 1698 1699 1700
/*
 * queue an async invalidation
 */
void ceph_queue_invalidate(struct inode *inode)
{
1701
	ihold(inode);
1702 1703 1704 1705 1706
	if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
		       &ceph_inode(inode)->i_pg_inv_work)) {
		dout("ceph_queue_invalidate %p\n", inode);
	} else {
		dout("ceph_queue_invalidate %p failed\n", inode);
1707
		iput(inode);
1708 1709 1710
	}
}

S
Sage Weil 已提交
1711 1712 1713 1714
/*
 * Invalidate inode pages in a worker thread.  (This can't be done
 * in the message handler context.)
 */
1715
static void ceph_invalidate_work(struct work_struct *work)
S
Sage Weil 已提交
1716 1717 1718 1719
{
	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
						  i_pg_inv_work);
	struct inode *inode = &ci->vfs_inode;
1720
	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
S
Sage Weil 已提交
1721 1722 1723
	u32 orig_gen;
	int check = 0;

Y
Yan, Zheng 已提交
1724
	mutex_lock(&ci->i_truncate_mutex);
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734

	if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
		pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
				    inode, ceph_ino(inode));
		mapping_set_error(inode->i_mapping, -EIO);
		truncate_pagecache(inode, 0);
		mutex_unlock(&ci->i_truncate_mutex);
		goto out;
	}

1735
	spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1736 1737
	dout("invalidate_pages %p gen %d revoking %d\n", inode,
	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1738
	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
Y
Yan, Zheng 已提交
1739 1740
		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
			check = 1;
1741
		spin_unlock(&ci->i_ceph_lock);
Y
Yan, Zheng 已提交
1742
		mutex_unlock(&ci->i_truncate_mutex);
S
Sage Weil 已提交
1743 1744 1745
		goto out;
	}
	orig_gen = ci->i_rdcache_gen;
1746
	spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1747

1748 1749 1750
	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
		pr_err("invalidate_pages %p fails\n", inode);
	}
S
Sage Weil 已提交
1751

1752
	spin_lock(&ci->i_ceph_lock);
1753 1754
	if (orig_gen == ci->i_rdcache_gen &&
	    orig_gen == ci->i_rdcache_revoking) {
S
Sage Weil 已提交
1755 1756
		dout("invalidate_pages %p gen %d successful\n", inode,
		     ci->i_rdcache_gen);
1757
		ci->i_rdcache_revoking--;
S
Sage Weil 已提交
1758 1759
		check = 1;
	} else {
1760 1761 1762
		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
		     inode, orig_gen, ci->i_rdcache_gen,
		     ci->i_rdcache_revoking);
Y
Yan, Zheng 已提交
1763 1764
		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
			check = 1;
S
Sage Weil 已提交
1765
	}
1766
	spin_unlock(&ci->i_ceph_lock);
Y
Yan, Zheng 已提交
1767
	mutex_unlock(&ci->i_truncate_mutex);
Y
Yan, Zheng 已提交
1768
out:
S
Sage Weil 已提交
1769 1770 1771 1772 1773 1774 1775
	if (check)
		ceph_check_caps(ci, 0, NULL);
	iput(inode);
}


/*
1776
 * called by trunc_wq;
S
Sage Weil 已提交
1777 1778 1779
 *
 * We also truncate in a separate thread as well.
 */
1780
static void ceph_vmtruncate_work(struct work_struct *work)
S
Sage Weil 已提交
1781 1782 1783 1784 1785 1786
{
	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
						  i_vmtruncate_work);
	struct inode *inode = &ci->vfs_inode;

	dout("vmtruncate_work %p\n", inode);
Y
Yan, Zheng 已提交
1787
	__ceph_do_pending_vmtruncate(inode);
S
Sage Weil 已提交
1788 1789 1790
	iput(inode);
}

1791 1792 1793 1794 1795 1796 1797 1798
/*
 * Queue an async vmtruncate.  If we fail to queue work, we will handle
 * the truncation the next time we call __ceph_do_pending_vmtruncate.
 */
void ceph_queue_vmtruncate(struct inode *inode)
{
	struct ceph_inode_info *ci = ceph_inode(inode);

1799
	ihold(inode);
1800

1801
	if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1802 1803 1804 1805 1806
		       &ci->i_vmtruncate_work)) {
		dout("ceph_queue_vmtruncate %p\n", inode);
	} else {
		dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
		     inode, ci->i_truncate_pending);
1807
		iput(inode);
1808 1809 1810
	}
}

S
Sage Weil 已提交
1811 1812 1813 1814
/*
 * Make sure any pending truncation is applied before doing anything
 * that may depend on it.
 */
Y
Yan, Zheng 已提交
1815
void __ceph_do_pending_vmtruncate(struct inode *inode)
S
Sage Weil 已提交
1816 1817 1818
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	u64 to;
1819
	int wrbuffer_refs, finish = 0;
S
Sage Weil 已提交
1820

Y
Yan, Zheng 已提交
1821
	mutex_lock(&ci->i_truncate_mutex);
S
Sage Weil 已提交
1822
retry:
1823
	spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1824 1825
	if (ci->i_truncate_pending == 0) {
		dout("__do_pending_vmtruncate %p none pending\n", inode);
1826
		spin_unlock(&ci->i_ceph_lock);
Y
Yan, Zheng 已提交
1827
		mutex_unlock(&ci->i_truncate_mutex);
S
Sage Weil 已提交
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
		return;
	}

	/*
	 * make sure any dirty snapped pages are flushed before we
	 * possibly truncate them.. so write AND block!
	 */
	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
		dout("__do_pending_vmtruncate %p flushing snaps first\n",
		     inode);
1838
		spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1839 1840 1841 1842 1843
		filemap_write_and_wait_range(&inode->i_data, 0,
					     inode->i_sb->s_maxbytes);
		goto retry;
	}

Y
Yan, Zheng 已提交
1844 1845 1846
	/* there should be no reader or writer */
	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);

S
Sage Weil 已提交
1847 1848 1849 1850
	to = ci->i_truncate_size;
	wrbuffer_refs = ci->i_wrbuffer_ref;
	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
	     ci->i_truncate_pending, to);
1851
	spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1852

1853
	truncate_pagecache(inode, to);
S
Sage Weil 已提交
1854

1855
	spin_lock(&ci->i_ceph_lock);
1856 1857 1858 1859
	if (to == ci->i_truncate_size) {
		ci->i_truncate_pending = 0;
		finish = 1;
	}
1860
	spin_unlock(&ci->i_ceph_lock);
1861 1862
	if (!finish)
		goto retry;
S
Sage Weil 已提交
1863

Y
Yan, Zheng 已提交
1864 1865
	mutex_unlock(&ci->i_truncate_mutex);

S
Sage Weil 已提交
1866 1867
	if (wrbuffer_refs == 0)
		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1868 1869

	wake_up_all(&ci->i_cap_wq);
S
Sage Weil 已提交
1870 1871 1872 1873 1874 1875 1876
}

/*
 * symlinks
 */
static const struct inode_operations ceph_symlink_iops = {
	.readlink = generic_readlink,
1877
	.get_link = simple_get_link,
Y
Yan, Zheng 已提交
1878 1879
	.setattr = ceph_setattr,
	.getattr = ceph_getattr,
1880 1881
	.setxattr = generic_setxattr,
	.getxattr = generic_getxattr,
Y
Yan, Zheng 已提交
1882
	.listxattr = ceph_listxattr,
1883
	.removexattr = generic_removexattr,
S
Sage Weil 已提交
1884 1885
};

1886
int __ceph_setattr(struct inode *inode, struct iattr *attr)
S
Sage Weil 已提交
1887 1888 1889 1890
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	const unsigned int ia_valid = attr->ia_valid;
	struct ceph_mds_request *req;
1891
	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1892
	struct ceph_cap_flush *prealloc_cf;
S
Sage Weil 已提交
1893 1894 1895 1896
	int issued;
	int release = 0, dirtied = 0;
	int mask = 0;
	int err = 0;
1897
	int inode_dirty_flags = 0;
1898
	bool lock_snap_rwsem = false;
S
Sage Weil 已提交
1899 1900 1901 1902 1903 1904 1905 1906

	if (ceph_snap(inode) != CEPH_NOSNAP)
		return -EROFS;

	err = inode_change_ok(inode, attr);
	if (err != 0)
		return err;

1907 1908 1909 1910
	prealloc_cf = ceph_alloc_cap_flush();
	if (!prealloc_cf)
		return -ENOMEM;

S
Sage Weil 已提交
1911 1912
	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
				       USE_AUTH_MDS);
1913 1914
	if (IS_ERR(req)) {
		ceph_free_cap_flush(prealloc_cf);
S
Sage Weil 已提交
1915
		return PTR_ERR(req);
1916
	}
S
Sage Weil 已提交
1917

1918
	spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
1919
	issued = __ceph_caps_issued(ci, NULL);
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

	if (!ci->i_head_snapc &&
	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
		lock_snap_rwsem = true;
		if (!down_read_trylock(&mdsc->snap_rwsem)) {
			spin_unlock(&ci->i_ceph_lock);
			down_read(&mdsc->snap_rwsem);
			spin_lock(&ci->i_ceph_lock);
			issued = __ceph_caps_issued(ci, NULL);
		}
	}

S
Sage Weil 已提交
1932 1933 1934 1935
	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));

	if (ia_valid & ATTR_UID) {
		dout("setattr %p uid %d -> %d\n", inode,
1936 1937
		     from_kuid(&init_user_ns, inode->i_uid),
		     from_kuid(&init_user_ns, attr->ia_uid));
S
Sage Weil 已提交
1938 1939 1940 1941
		if (issued & CEPH_CAP_AUTH_EXCL) {
			inode->i_uid = attr->ia_uid;
			dirtied |= CEPH_CAP_AUTH_EXCL;
		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1942 1943 1944
			   !uid_eq(attr->ia_uid, inode->i_uid)) {
			req->r_args.setattr.uid = cpu_to_le32(
				from_kuid(&init_user_ns, attr->ia_uid));
S
Sage Weil 已提交
1945 1946 1947 1948 1949 1950
			mask |= CEPH_SETATTR_UID;
			release |= CEPH_CAP_AUTH_SHARED;
		}
	}
	if (ia_valid & ATTR_GID) {
		dout("setattr %p gid %d -> %d\n", inode,
1951 1952
		     from_kgid(&init_user_ns, inode->i_gid),
		     from_kgid(&init_user_ns, attr->ia_gid));
S
Sage Weil 已提交
1953 1954 1955 1956
		if (issued & CEPH_CAP_AUTH_EXCL) {
			inode->i_gid = attr->ia_gid;
			dirtied |= CEPH_CAP_AUTH_EXCL;
		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1957 1958 1959
			   !gid_eq(attr->ia_gid, inode->i_gid)) {
			req->r_args.setattr.gid = cpu_to_le32(
				from_kgid(&init_user_ns, attr->ia_gid));
S
Sage Weil 已提交
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
			mask |= CEPH_SETATTR_GID;
			release |= CEPH_CAP_AUTH_SHARED;
		}
	}
	if (ia_valid & ATTR_MODE) {
		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
		     attr->ia_mode);
		if (issued & CEPH_CAP_AUTH_EXCL) {
			inode->i_mode = attr->ia_mode;
			dirtied |= CEPH_CAP_AUTH_EXCL;
		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
			   attr->ia_mode != inode->i_mode) {
G
Guangliang Zhao 已提交
1972
			inode->i_mode = attr->ia_mode;
S
Sage Weil 已提交
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
			mask |= CEPH_SETATTR_MODE;
			release |= CEPH_CAP_AUTH_SHARED;
		}
	}

	if (ia_valid & ATTR_ATIME) {
		dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
		if (issued & CEPH_CAP_FILE_EXCL) {
			ci->i_time_warp_seq++;
			inode->i_atime = attr->ia_atime;
			dirtied |= CEPH_CAP_FILE_EXCL;
		} else if ((issued & CEPH_CAP_FILE_WR) &&
			   timespec_compare(&inode->i_atime,
					    &attr->ia_atime) < 0) {
			inode->i_atime = attr->ia_atime;
			dirtied |= CEPH_CAP_FILE_WR;
		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
			   !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
			ceph_encode_timespec(&req->r_args.setattr.atime,
					     &attr->ia_atime);
			mask |= CEPH_SETATTR_ATIME;
			release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
				CEPH_CAP_FILE_WR;
		}
	}
	if (ia_valid & ATTR_MTIME) {
		dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
		if (issued & CEPH_CAP_FILE_EXCL) {
			ci->i_time_warp_seq++;
			inode->i_mtime = attr->ia_mtime;
			dirtied |= CEPH_CAP_FILE_EXCL;
		} else if ((issued & CEPH_CAP_FILE_WR) &&
			   timespec_compare(&inode->i_mtime,
					    &attr->ia_mtime) < 0) {
			inode->i_mtime = attr->ia_mtime;
			dirtied |= CEPH_CAP_FILE_WR;
		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
			   !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
			ceph_encode_timespec(&req->r_args.setattr.mtime,
					     &attr->ia_mtime);
			mask |= CEPH_SETATTR_MTIME;
			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
				CEPH_CAP_FILE_WR;
		}
	}
	if (ia_valid & ATTR_SIZE) {
		dout("setattr %p size %lld -> %lld\n", inode,
		     inode->i_size, attr->ia_size);
		if ((issued & CEPH_CAP_FILE_EXCL) &&
		    attr->ia_size > inode->i_size) {
2028
			i_size_write(inode, attr->ia_size);
2029
			inode->i_blocks = calc_inode_blocks(attr->ia_size);
S
Sage Weil 已提交
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
			inode->i_ctime = attr->ia_ctime;
			ci->i_reported_size = attr->ia_size;
			dirtied |= CEPH_CAP_FILE_EXCL;
		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
			   attr->ia_size != inode->i_size) {
			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
			req->r_args.setattr.old_size =
				cpu_to_le64(inode->i_size);
			mask |= CEPH_SETATTR_SIZE;
			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
				CEPH_CAP_FILE_WR;
		}
	}

	/* these do nothing */
	if (ia_valid & ATTR_CTIME) {
		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
		dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
		     only ? "ctime only" : "ignored");
		inode->i_ctime = attr->ia_ctime;
		if (only) {
			/*
			 * if kernel wants to dirty ctime but nothing else,
			 * we need to choose a cap to dirty under, or do
			 * a almost-no-op setattr
			 */
			if (issued & CEPH_CAP_AUTH_EXCL)
				dirtied |= CEPH_CAP_AUTH_EXCL;
			else if (issued & CEPH_CAP_FILE_EXCL)
				dirtied |= CEPH_CAP_FILE_EXCL;
			else if (issued & CEPH_CAP_XATTR_EXCL)
				dirtied |= CEPH_CAP_XATTR_EXCL;
			else
				mask |= CEPH_SETATTR_CTIME;
		}
	}
	if (ia_valid & ATTR_FILE)
		dout("setattr %p ATTR_FILE ... hrm!\n", inode);

	if (dirtied) {
2073 2074
		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
							   &prealloc_cf);
2075
		inode->i_ctime = current_fs_time(inode->i_sb);
S
Sage Weil 已提交
2076 2077 2078
	}

	release &= issued;
2079
	spin_unlock(&ci->i_ceph_lock);
2080 2081
	if (lock_snap_rwsem)
		up_read(&mdsc->snap_rwsem);
S
Sage Weil 已提交
2082

2083 2084 2085
	if (inode_dirty_flags)
		__mark_inode_dirty(inode, inode_dirty_flags);

G
Guangliang Zhao 已提交
2086
	if (ia_valid & ATTR_MODE) {
2087
		err = posix_acl_chmod(inode, attr->ia_mode);
G
Guangliang Zhao 已提交
2088 2089 2090 2091
		if (err)
			goto out_put;
	}

S
Sage Weil 已提交
2092
	if (mask) {
2093 2094
		req->r_inode = inode;
		ihold(inode);
S
Sage Weil 已提交
2095 2096 2097
		req->r_inode_drop = release;
		req->r_args.setattr.mask = cpu_to_le32(mask);
		req->r_num_caps = 1;
2098
		err = ceph_mdsc_do_request(mdsc, NULL, req);
S
Sage Weil 已提交
2099 2100 2101 2102 2103
	}
	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
	     ceph_cap_string(dirtied), mask);

	ceph_mdsc_put_request(req);
Y
Yan, Zheng 已提交
2104 2105
	if (mask & CEPH_SETATTR_SIZE)
		__ceph_do_pending_vmtruncate(inode);
2106
	ceph_free_cap_flush(prealloc_cf);
S
Sage Weil 已提交
2107
	return err;
G
Guangliang Zhao 已提交
2108
out_put:
S
Sage Weil 已提交
2109
	ceph_mdsc_put_request(req);
2110
	ceph_free_cap_flush(prealloc_cf);
S
Sage Weil 已提交
2111 2112 2113
	return err;
}

2114 2115 2116 2117 2118 2119 2120 2121
/*
 * setattr
 */
int ceph_setattr(struct dentry *dentry, struct iattr *attr)
{
	return __ceph_setattr(d_inode(dentry), attr);
}

S
Sage Weil 已提交
2122 2123 2124 2125
/*
 * Verify that we have a lease on the given mask.  If not,
 * do a getattr against an mds.
 */
2126 2127
int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
		      int mask, bool force)
S
Sage Weil 已提交
2128
{
2129 2130
	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
	struct ceph_mds_client *mdsc = fsc->mdsc;
S
Sage Weil 已提交
2131 2132 2133 2134 2135 2136 2137 2138
	struct ceph_mds_request *req;
	int err;

	if (ceph_snap(inode) == CEPH_SNAPDIR) {
		dout("do_getattr inode %p SNAPDIR\n", inode);
		return 0;
	}

2139 2140
	dout("do_getattr inode %p mask %s mode 0%o\n",
	     inode, ceph_cap_string(mask), inode->i_mode);
2141
	if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
S
Sage Weil 已提交
2142 2143 2144 2145 2146
		return 0;

	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
	if (IS_ERR(req))
		return PTR_ERR(req);
2147 2148
	req->r_inode = inode;
	ihold(inode);
S
Sage Weil 已提交
2149 2150
	req->r_num_caps = 1;
	req->r_args.getattr.mask = cpu_to_le32(mask);
2151
	req->r_locked_page = locked_page;
S
Sage Weil 已提交
2152
	err = ceph_mdsc_do_request(mdsc, NULL, req);
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
	if (locked_page && err == 0) {
		u64 inline_version = req->r_reply_info.targeti.inline_version;
		if (inline_version == 0) {
			/* the reply is supposed to contain inline data */
			err = -EINVAL;
		} else if (inline_version == CEPH_INLINE_NONE) {
			err = -ENODATA;
		} else {
			err = req->r_reply_info.targeti.inline_len;
		}
	}
S
Sage Weil 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
	ceph_mdsc_put_request(req);
	dout("do_getattr result=%d\n", err);
	return err;
}


/*
 * Check inode permissions.  We verify we have a valid value for
 * the AUTH cap, then call the generic handler.
 */
2174
int ceph_permission(struct inode *inode, int mask)
S
Sage Weil 已提交
2175
{
2176 2177
	int err;

2178
	if (mask & MAY_NOT_BLOCK)
2179 2180
		return -ECHILD;

2181
	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
S
Sage Weil 已提交
2182 2183

	if (!err)
2184
		err = generic_permission(inode, mask);
S
Sage Weil 已提交
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
	return err;
}

/*
 * Get all attributes.  Hopefully somedata we'll have a statlite()
 * and can limit the fields we require to be accurate.
 */
int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
		 struct kstat *stat)
{
2195
	struct inode *inode = d_inode(dentry);
2196
	struct ceph_inode_info *ci = ceph_inode(inode);
S
Sage Weil 已提交
2197 2198
	int err;

2199
	err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
S
Sage Weil 已提交
2200 2201
	if (!err) {
		generic_fillattr(inode, stat);
Y
Yehuda Sadeh 已提交
2202
		stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
S
Sage Weil 已提交
2203 2204 2205 2206
		if (ceph_snap(inode) != CEPH_NOSNAP)
			stat->dev = ceph_snap(inode);
		else
			stat->dev = 0;
2207
		if (S_ISDIR(inode->i_mode)) {
2208 2209 2210 2211 2212
			if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
						RBYTES))
				stat->size = ci->i_rbytes;
			else
				stat->size = ci->i_files + ci->i_subdirs;
2213
			stat->blocks = 0;
S
Sage Weil 已提交
2214
			stat->blksize = 65536;
2215
		}
S
Sage Weil 已提交
2216 2217 2218
	}
	return err;
}