file.c 45.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/ceph/ceph_debug.h>
S
Sage Weil 已提交
3

4
#include <linux/module.h>
S
Sage Weil 已提交
5
#include <linux/sched.h>
6
#include <linux/slab.h>
S
Sage Weil 已提交
7
#include <linux/file.h>
S
Sage Weil 已提交
8
#include <linux/mount.h>
S
Sage Weil 已提交
9 10
#include <linux/namei.h>
#include <linux/writeback.h>
L
Li Wang 已提交
11
#include <linux/falloc.h>
S
Sage Weil 已提交
12 13 14

#include "super.h"
#include "mds_client.h"
15
#include "cache.h"
S
Sage Weil 已提交
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
static __le32 ceph_flags_sys2wire(u32 flags)
{
	u32 wire_flags = 0;

	switch (flags & O_ACCMODE) {
	case O_RDONLY:
		wire_flags |= CEPH_O_RDONLY;
		break;
	case O_WRONLY:
		wire_flags |= CEPH_O_WRONLY;
		break;
	case O_RDWR:
		wire_flags |= CEPH_O_RDWR;
		break;
	}

33 34
	flags &= ~O_ACCMODE;

35 36 37 38 39 40 41 42 43 44 45
#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }

	ceph_sys2wire(O_CREAT);
	ceph_sys2wire(O_EXCL);
	ceph_sys2wire(O_TRUNC);
	ceph_sys2wire(O_DIRECTORY);
	ceph_sys2wire(O_NOFOLLOW);

#undef ceph_sys2wire

	if (flags)
46
		dout("unused open flags: %x\n", flags);
47 48 49 50

	return cpu_to_le32(wire_flags);
}

S
Sage Weil 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
/*
 * Ceph file operations
 *
 * Implement basic open/close functionality, and implement
 * read/write.
 *
 * We implement three modes of file I/O:
 *  - buffered uses the generic_file_aio_{read,write} helpers
 *
 *  - synchronous is used when there is multi-client read/write
 *    sharing, avoids the page cache, and synchronously waits for an
 *    ack from the OSD.
 *
 *  - direct io takes the variant of the sync path that references
 *    user pages directly.
 *
 * fsync() flushes and waits on dirty pages, but just queues metadata
 * for writeback: since the MDS can recover size and mtime there is no
 * need to wait for MDS acknowledgement.
 */

72
/*
73 74
 * How many pages to get in one call to iov_iter_get_pages().  This
 * determines the size of the on-stack array used as a buffer.
75
 */
76 77 78 79
#define ITER_GET_BVECS_PAGES	64

static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
				struct bio_vec *bvecs)
80
{
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	size_t size = 0;
	int bvec_idx = 0;

	if (maxsize > iov_iter_count(iter))
		maxsize = iov_iter_count(iter);

	while (size < maxsize) {
		struct page *pages[ITER_GET_BVECS_PAGES];
		ssize_t bytes;
		size_t start;
		int idx = 0;

		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
					   ITER_GET_BVECS_PAGES, &start);
		if (bytes < 0)
			return size ?: bytes;

		iov_iter_advance(iter, bytes);
		size += bytes;

		for ( ; bytes; idx++, bvec_idx++) {
			struct bio_vec bv = {
				.bv_page = pages[idx],
				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
				.bv_offset = start,
			};

			bvecs[bvec_idx] = bv;
			bytes -= bv.bv_len;
			start = 0;
		}
	}

	return size;
115 116 117
}

/*
118 119 120 121 122 123
 * iov_iter_get_pages() only considers one iov_iter segment, no matter
 * what maxsize or maxpages are given.  For ITER_BVEC that is a single
 * page.
 *
 * Attempt to get up to @maxsize bytes worth of pages from @iter.
 * Return the number of bytes in the created bio_vec array, or an error.
124
 */
125 126
static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
				    struct bio_vec **bvecs, int *num_bvecs)
127
{
128 129 130 131
	struct bio_vec *bv;
	size_t orig_count = iov_iter_count(iter);
	ssize_t bytes;
	int npages;
132

133 134 135
	iov_iter_truncate(iter, maxsize);
	npages = iov_iter_npages(iter, INT_MAX);
	iov_iter_reexpand(iter, orig_count);
136

137 138 139 140 141 142 143
	/*
	 * __iter_get_bvecs() may populate only part of the array -- zero it
	 * out.
	 */
	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
	if (!bv)
		return -ENOMEM;
144

145 146 147 148 149 150 151
	bytes = __iter_get_bvecs(iter, maxsize, bv);
	if (bytes < 0) {
		/*
		 * No pages were pinned -- just free the array.
		 */
		kvfree(bv);
		return bytes;
152 153
	}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	*bvecs = bv;
	*num_bvecs = npages;
	return bytes;
}

static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
{
	int i;

	for (i = 0; i < num_bvecs; i++) {
		if (bvecs[i].bv_page) {
			if (should_dirty)
				set_page_dirty_lock(bvecs[i].bv_page);
			put_page(bvecs[i].bv_page);
		}
	}
	kvfree(bvecs);
171
}
S
Sage Weil 已提交
172 173 174 175 176 177 178 179

/*
 * Prepare an open request.  Preallocate ceph_cap to avoid an
 * inopportune ENOMEM later.
 */
static struct ceph_mds_request *
prepare_open_request(struct super_block *sb, int flags, int create_mode)
{
180 181
	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
	struct ceph_mds_client *mdsc = fsc->mdsc;
S
Sage Weil 已提交
182 183 184 185 186 187 188 189 190 191 192
	struct ceph_mds_request *req;
	int want_auth = USE_ANY_MDS;
	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;

	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
		want_auth = USE_AUTH_MDS;

	req = ceph_mdsc_create_request(mdsc, op, want_auth);
	if (IS_ERR(req))
		goto out;
	req->r_fmode = ceph_flags_to_mode(flags);
193
	req->r_args.open.flags = ceph_flags_sys2wire(flags);
S
Sage Weil 已提交
194 195 196 197 198
	req->r_args.open.mode = cpu_to_le32(create_mode);
out:
	return req;
}

C
Chengguang Xu 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
static int ceph_init_file_info(struct inode *inode, struct file *file,
					int fmode, bool isdir)
{
	struct ceph_file_info *fi;

	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
			inode->i_mode, isdir ? "dir" : "regular");
	BUG_ON(inode->i_fop->release != ceph_release);

	if (isdir) {
		struct ceph_dir_file_info *dfi =
			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
		if (!dfi) {
			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
			return -ENOMEM;
		}

		file->private_data = dfi;
		fi = &dfi->file_info;
		dfi->next_offset = 2;
		dfi->readdir_cache_idx = -1;
	} else {
		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
		if (!fi) {
			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
			return -ENOMEM;
		}

		file->private_data = fi;
	}

	fi->fmode = fmode;
	spin_lock_init(&fi->rw_contexts_lock);
	INIT_LIST_HEAD(&fi->rw_contexts);

	return 0;
}

S
Sage Weil 已提交
237 238 239 240 241 242 243 244 245 246
/*
 * initialize private struct file data.
 * if we fail, clean up by dropping fmode reference on the ceph_inode
 */
static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
{
	int ret = 0;

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
247 248
		ceph_fscache_register_inode_cookie(inode);
		ceph_fscache_file_set_cookie(inode, file);
S
Sage Weil 已提交
249
	case S_IFDIR:
C
Chengguang Xu 已提交
250 251 252 253
		ret = ceph_init_file_info(inode, file, fmode,
						S_ISDIR(inode->i_mode));
		if (ret)
			return ret;
S
Sage Weil 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
		break;

	case S_IFLNK:
		dout("init_file %p %p 0%o (symlink)\n", inode, file,
		     inode->i_mode);
		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
		break;

	default:
		dout("init_file %p %p 0%o (special)\n", inode, file,
		     inode->i_mode);
		/*
		 * we need to drop the open ref now, since we don't
		 * have .release set to ceph_release.
		 */
		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
		BUG_ON(inode->i_fop->release == ceph_release);

		/* call the proper open fop */
		ret = inode->i_fop->open(inode, file);
	}
	return ret;
}

278 279 280 281 282 283 284 285 286 287 288 289 290
/*
 * try renew caps after session gets killed.
 */
int ceph_renew_caps(struct inode *inode)
{
	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_mds_request *req;
	int err, flags, wanted;

	spin_lock(&ci->i_ceph_lock);
	wanted = __ceph_caps_file_wanted(ci);
	if (__ceph_is_any_real_caps(ci) &&
291
	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
		int issued = __ceph_caps_issued(ci, NULL);
		spin_unlock(&ci->i_ceph_lock);
		dout("renew caps %p want %s issued %s updating mds_wanted\n",
		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
		ceph_check_caps(ci, 0, NULL);
		return 0;
	}
	spin_unlock(&ci->i_ceph_lock);

	flags = 0;
	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
		flags = O_RDWR;
	else if (wanted & CEPH_CAP_FILE_RD)
		flags = O_RDONLY;
	else if (wanted & CEPH_CAP_FILE_WR)
		flags = O_WRONLY;
#ifdef O_LAZY
	if (wanted & CEPH_CAP_FILE_LAZYIO)
		flags |= O_LAZY;
#endif

	req = prepare_open_request(inode->i_sb, flags, 0);
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		goto out;
	}

	req->r_inode = inode;
	ihold(inode);
	req->r_num_caps = 1;
	req->r_fmode = -1;

	err = ceph_mdsc_do_request(mdsc, NULL, req);
	ceph_mdsc_put_request(req);
out:
	dout("renew caps %p open result=%d\n", inode, err);
	return err < 0 ? err : 0;
}

S
Sage Weil 已提交
331 332 333 334 335 336 337 338 339
/*
 * If we already have the requisite capabilities, we can satisfy
 * the open request locally (no need to request new caps from the
 * MDS).  We do, however, need to inform the MDS (asynchronously)
 * if our wanted caps set expands.
 */
int ceph_open(struct inode *inode, struct file *file)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
340 341
	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
	struct ceph_mds_client *mdsc = fsc->mdsc;
S
Sage Weil 已提交
342
	struct ceph_mds_request *req;
343
	struct ceph_file_info *fi = file->private_data;
S
Sage Weil 已提交
344 345 346
	int err;
	int flags, fmode, wanted;

347
	if (fi) {
S
Sage Weil 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
		dout("open file %p is already opened\n", file);
		return 0;
	}

	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
	flags = file->f_flags & ~(O_CREAT|O_EXCL);
	if (S_ISDIR(inode->i_mode))
		flags = O_DIRECTORY;  /* mds likes to know */

	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
	     ceph_vinop(inode), file, flags, file->f_flags);
	fmode = ceph_flags_to_mode(flags);
	wanted = ceph_caps_for_mode(fmode);

	/* snapped files are read-only */
	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
		return -EROFS;

	/* trivially open snapdir */
	if (ceph_snap(inode) == CEPH_SNAPDIR) {
368
		spin_lock(&ci->i_ceph_lock);
S
Sage Weil 已提交
369
		__ceph_get_fmode(ci, fmode);
370
		spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
371 372 373 374
		return ceph_init_file(inode, file, fmode);
	}

	/*
375 376
	 * No need to block if we have caps on the auth MDS (for
	 * write) or any MDS (for read).  Update wanted set
S
Sage Weil 已提交
377 378
	 * asynchronously.
	 */
379
	spin_lock(&ci->i_ceph_lock);
380 381
	if (__ceph_is_any_real_caps(ci) &&
	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
382
		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
S
Sage Weil 已提交
383 384 385 386 387 388
		int issued = __ceph_caps_issued(ci, NULL);

		dout("open %p fmode %d want %s issued %s using existing\n",
		     inode, fmode, ceph_cap_string(wanted),
		     ceph_cap_string(issued));
		__ceph_get_fmode(ci, fmode);
389
		spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
390 391 392 393 394 395 396 397 398 399 400

		/* adjust wanted? */
		if ((issued & wanted) != wanted &&
		    (mds_wanted & wanted) != wanted &&
		    ceph_snap(inode) != CEPH_SNAPDIR)
			ceph_check_caps(ci, 0, NULL);

		return ceph_init_file(inode, file, fmode);
	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
		   (ci->i_snap_caps & wanted) == wanted) {
		__ceph_get_fmode(ci, fmode);
401
		spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
402 403
		return ceph_init_file(inode, file, fmode);
	}
404

405
	spin_unlock(&ci->i_ceph_lock);
S
Sage Weil 已提交
406 407 408 409 410 411 412

	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
	req = prepare_open_request(inode->i_sb, flags, 0);
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		goto out;
	}
413 414
	req->r_inode = inode;
	ihold(inode);
415

S
Sage Weil 已提交
416
	req->r_num_caps = 1;
417
	err = ceph_mdsc_do_request(mdsc, NULL, req);
S
Sage Weil 已提交
418 419 420 421 422 423 424 425 426 427
	if (!err)
		err = ceph_init_file(inode, file, req->r_fmode);
	ceph_mdsc_put_request(req);
	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
out:
	return err;
}


/*
S
Sage Weil 已提交
428 429
 * Do a lookup + open with a single request.  If we get a non-existent
 * file or symlink, return 1 so the VFS can retry.
S
Sage Weil 已提交
430
 */
S
Sage Weil 已提交
431
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
A
Al Viro 已提交
432
		     struct file *file, unsigned flags, umode_t mode,
A
Al Viro 已提交
433
		     int *opened)
S
Sage Weil 已提交
434
{
435 436
	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
	struct ceph_mds_client *mdsc = fsc->mdsc;
S
Sage Weil 已提交
437
	struct ceph_mds_request *req;
S
Sage Weil 已提交
438
	struct dentry *dn;
439
	struct ceph_acls_info acls = {};
440
	int mask;
S
Sage Weil 已提交
441 442
	int err;

A
Al Viro 已提交
443 444
	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
	     dir, dentry, dentry,
S
Sage Weil 已提交
445 446 447 448 449
	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);

	if (dentry->d_name.len > NAME_MAX)
		return -ENAMETOOLONG;

450
	if (flags & O_CREAT) {
451 452
		if (ceph_quota_is_max_files_exceeded(dir))
			return -EDQUOT;
453 454 455 456 457
		err = ceph_pre_init_acls(dir, &mode, &acls);
		if (err < 0)
			return err;
	}

S
Sage Weil 已提交
458 459
	/* do the open */
	req = prepare_open_request(dir->i_sb, flags, mode);
460 461 462 463
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		goto out_acl;
	}
S
Sage Weil 已提交
464 465 466
	req->r_dentry = dget(dentry);
	req->r_num_caps = 2;
	if (flags & O_CREAT) {
467
		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
S
Sage Weil 已提交
468
		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
469 470 471 472
		if (acls.pagelist) {
			req->r_pagelist = acls.pagelist;
			acls.pagelist = NULL;
		}
S
Sage Weil 已提交
473
	}
Y
Yan, Zheng 已提交
474 475 476 477 478 479

       mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
       if (ceph_security_xattr_wanted(dir))
               mask |= CEPH_CAP_XATTR_SHARED;
       req->r_args.open.mask = cpu_to_le32(mask);

480 481
	req->r_parent = dir;
	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
482 483 484
	err = ceph_mdsc_do_request(mdsc,
				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
				   req);
Y
Yan, Zheng 已提交
485
	err = ceph_handle_snapdir(req, dentry, err);
486
	if (err)
487
		goto out_req;
488

J
Jianpeng Ma 已提交
489
	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
S
Sage Weil 已提交
490
		err = ceph_handle_notrace_create(dir, dentry);
491

492
	if (d_in_lookup(dentry)) {
S
Sage Weil 已提交
493 494 495 496 497 498 499 500
		dn = ceph_finish_lookup(req, dentry, err);
		if (IS_ERR(dn))
			err = PTR_ERR(dn);
	} else {
		/* we were given a hashed negative dentry */
		dn = NULL;
	}
	if (err)
501
		goto out_req;
502
	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
S
Sage Weil 已提交
503 504 505 506 507
		/* make vfs retry on splice, ENOENT, or symlink */
		dout("atomic_open finish_no_open on dn %p\n", dn);
		err = finish_no_open(file, dn);
	} else {
		dout("atomic_open finish_open on dn %p\n", dn);
508
		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
509
			ceph_init_inode_acls(d_inode(dentry), &acls);
510 511
			*opened |= FILE_CREATED;
		}
S
Sage Weil 已提交
512 513
		err = finish_open(file, dentry, ceph_open, opened);
	}
514
out_req:
515 516
	if (!req->r_err && req->r_target_inode)
		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
S
Sage Weil 已提交
517
	ceph_mdsc_put_request(req);
518 519
out_acl:
	ceph_release_acls_info(&acls);
S
Sage Weil 已提交
520
	dout("atomic_open result=%d\n", err);
A
Al Viro 已提交
521
	return err;
S
Sage Weil 已提交
522 523 524 525 526 527
}

int ceph_release(struct inode *inode, struct file *file)
{
	struct ceph_inode_info *ci = ceph_inode(inode);

C
Chengguang Xu 已提交
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
	if (S_ISDIR(inode->i_mode)) {
		struct ceph_dir_file_info *dfi = file->private_data;
		dout("release inode %p dir file %p\n", inode, file);
		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));

		ceph_put_fmode(ci, dfi->file_info.fmode);

		if (dfi->last_readdir)
			ceph_mdsc_put_request(dfi->last_readdir);
		kfree(dfi->last_name);
		kfree(dfi->dir_info);
		kmem_cache_free(ceph_dir_file_cachep, dfi);
	} else {
		struct ceph_file_info *fi = file->private_data;
		dout("release inode %p regular file %p\n", inode, file);
		WARN_ON(!list_empty(&fi->rw_contexts));

		ceph_put_fmode(ci, fi->fmode);
		kmem_cache_free(ceph_file_cachep, fi);
	}
548 549

	/* wake up anyone waiting for caps on this inode */
550
	wake_up_all(&ci->i_cap_wq);
S
Sage Weil 已提交
551 552 553
	return 0;
}

Y
Yan, Zheng 已提交
554
enum {
Y
Yan, Zheng 已提交
555 556 557
	HAVE_RETRIED = 1,
	CHECK_EOF =    2,
	READ_INLINE =  3,
Y
Yan, Zheng 已提交
558 559
};

S
Sage Weil 已提交
560 561 562 563 564 565 566 567
/*
 * Read a range of bytes striped over one or more objects.  Iterate over
 * objects we stripe over.  (That's not atomic, but good enough for now.)
 *
 * If we get a short result from the OSD, check against i_size; we need to
 * only return a short read to the caller if we hit EOF.
 */
static int striped_read(struct inode *inode,
568
			u64 pos, u64 len,
569
			struct page **pages, int num_pages,
570
			int page_align, int *checkeof)
S
Sage Weil 已提交
571
{
572
	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
S
Sage Weil 已提交
573
	struct ceph_inode_info *ci = ceph_inode(inode);
574
	u64 this_len;
575
	loff_t i_size;
576 577
	int page_idx;
	int ret, read = 0;
S
Sage Weil 已提交
578 579 580 581 582 583
	bool hit_stripe, was_short;

	/*
	 * we may need to do multiple reads.  not atomic, unfortunately.
	 */
more:
584 585
	this_len = len;
	page_idx = (page_align + read) >> PAGE_SHIFT;
586
	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
S
Sage Weil 已提交
587
				  &ci->i_layout, pos, &this_len,
588 589 590
				  ci->i_truncate_seq, ci->i_truncate_size,
				  pages + page_idx, num_pages - page_idx,
				  ((page_align + read) & ~PAGE_MASK));
S
Sage Weil 已提交
591 592
	if (ret == -ENOENT)
		ret = 0;
593
	hit_stripe = this_len < len;
594
	was_short = ret >= 0 && ret < this_len;
595
	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
S
Sage Weil 已提交
596 597
	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");

598
	i_size = i_size_read(inode);
599
	if (ret >= 0) {
600 601
		if (was_short && (pos + ret < i_size)) {
			int zlen = min(this_len - ret, i_size - pos - ret);
602
			int zoff = page_align + read + ret;
603
			dout(" zero gap %llu to %llu\n",
604
			     pos + ret, pos + ret + zlen);
605 606
			ceph_zero_page_vector_range(zoff, zlen, pages);
			ret += zlen;
S
Sage Weil 已提交
607
		}
608

609
		read += ret;
S
Sage Weil 已提交
610
		pos += ret;
611
		len -= ret;
S
Sage Weil 已提交
612

613
		/* hit stripe and need continue*/
614
		if (len && hit_stripe && pos < i_size)
S
Sage Weil 已提交
615 616 617
			goto more;
	}

618
	if (read > 0) {
619
		ret = read;
620
		/* did we bounce off eof? */
621
		if (pos + len > i_size)
Y
Yan, Zheng 已提交
622
			*checkeof = CHECK_EOF;
S
Sage Weil 已提交
623 624 625 626 627 628 629 630 631 632 633 634
	}

	dout("striped_read returns %d\n", ret);
	return ret;
}

/*
 * Completely synchronous read and write methods.  Direct from __user
 * buffer to osd, or directly to user pages (if O_DIRECT).
 *
 * If the read spans object boundary, just do multiple reads.
 */
635 636
static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
			      int *checkeof)
S
Sage Weil 已提交
637
{
638
	struct file *file = iocb->ki_filp;
A
Al Viro 已提交
639
	struct inode *inode = file_inode(file);
S
Sage Weil 已提交
640
	struct page **pages;
641
	u64 off = iocb->ki_pos;
642 643 644
	int num_pages;
	ssize_t ret;
	size_t len = iov_iter_count(to);
S
Sage Weil 已提交
645

646
	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
S
Sage Weil 已提交
647
	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
648 649 650

	if (!len)
		return 0;
651 652 653 654 655 656
	/*
	 * flush any page cache pages in this range.  this
	 * will make concurrent normal and sync io slow,
	 * but it will at least behave sensibly when they are
	 * in sequence.
	 */
657 658
	ret = filemap_write_and_wait_range(inode->i_mapping, off,
						off + len);
659
	if (ret < 0)
660
		return ret;
661

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
	if (unlikely(to->type & ITER_PIPE)) {
		size_t page_off;
		ret = iov_iter_get_pages_alloc(to, &pages, len,
					       &page_off);
		if (ret <= 0)
			return -ENOMEM;
		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);

		ret = striped_read(inode, off, ret, pages, num_pages,
				   page_off, checkeof);
		if (ret > 0) {
			iov_iter_advance(to, ret);
			off += ret;
		} else {
			iov_iter_advance(to, 0);
		}
		ceph_put_page_vector(pages, num_pages, false);
	} else {
		num_pages = calc_pages_for(off, len);
		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
		if (IS_ERR(pages))
			return PTR_ERR(pages);

		ret = striped_read(inode, off, len, pages, num_pages,
				   (off & ~PAGE_MASK), checkeof);
		if (ret > 0) {
			int l, k = 0;
			size_t left = ret;

			while (left) {
				size_t page_off = off & ~PAGE_MASK;
				size_t copy = min_t(size_t, left,
						    PAGE_SIZE - page_off);
				l = copy_page_to_iter(pages[k++], page_off,
						      copy, to);
				off += l;
				left -= l;
				if (l < copy)
					break;
			}
702
		}
703
		ceph_release_page_vector(pages, num_pages);
704
	}
S
Sage Weil 已提交
705

706 707 708 709
	if (off > iocb->ki_pos) {
		ret = off - iocb->ki_pos;
		iocb->ki_pos = off;
	}
S
Sage Weil 已提交
710

711
	dout("sync_read result %zd\n", ret);
S
Sage Weil 已提交
712 713 714
	return ret;
}

Y
Yan, Zheng 已提交
715 716 717
struct ceph_aio_request {
	struct kiocb *iocb;
	size_t total_len;
718 719
	bool write;
	bool should_dirty;
Y
Yan, Zheng 已提交
720 721 722 723
	int error;
	struct list_head osd_reqs;
	unsigned num_reqs;
	atomic_t pending_reqs;
724
	struct timespec64 mtime;
Y
Yan, Zheng 已提交
725 726 727
	struct ceph_cap_flush *prealloc_cf;
};

728 729 730 731 732 733 734
struct ceph_aio_work {
	struct work_struct work;
	struct ceph_osd_request *req;
};

static void ceph_aio_retry_work(struct work_struct *work);

Y
Yan, Zheng 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
static void ceph_aio_complete(struct inode *inode,
			      struct ceph_aio_request *aio_req)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	int ret;

	if (!atomic_dec_and_test(&aio_req->pending_reqs))
		return;

	ret = aio_req->error;
	if (!ret)
		ret = aio_req->total_len;

	dout("ceph_aio_complete %p rc %d\n", inode, ret);

	if (ret >= 0 && aio_req->write) {
		int dirty;

		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
		if (endoff > i_size_read(inode)) {
			if (ceph_inode_set_size(inode, endoff))
				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
		}

		spin_lock(&ci->i_ceph_lock);
		ci->i_inline_version = CEPH_INLINE_NONE;
		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
					       &aio_req->prealloc_cf);
		spin_unlock(&ci->i_ceph_lock);
		if (dirty)
			__mark_inode_dirty(inode, dirty);

	}

	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
						CEPH_CAP_FILE_RD));

	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);

	ceph_free_cap_flush(aio_req->prealloc_cf);
	kfree(aio_req);
}

778
static void ceph_aio_complete_req(struct ceph_osd_request *req)
Y
Yan, Zheng 已提交
779 780 781 782 783 784
{
	int rc = req->r_result;
	struct inode *inode = req->r_inode;
	struct ceph_aio_request *aio_req = req->r_priv;
	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);

785 786 787 788 789
	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
	BUG_ON(!osd_data->num_bvecs);

	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
	     inode, rc, osd_data->bvec_pos.iter.bi_size);
Y
Yan, Zheng 已提交
790 791

	if (rc == -EOLDSNAPC) {
792 793 794 795 796 797 798 799 800 801 802 803 804
		struct ceph_aio_work *aio_work;
		BUG_ON(!aio_req->write);

		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
		if (aio_work) {
			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
			aio_work->req = req;
			queue_work(ceph_inode_to_client(inode)->wb_wq,
				   &aio_work->work);
			return;
		}
		rc = -ENOMEM;
	} else if (!aio_req->write) {
Y
Yan, Zheng 已提交
805 806
		if (rc == -ENOENT)
			rc = 0;
807 808 809 810
		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
			struct iov_iter i;
			int zlen = osd_data->bvec_pos.iter.bi_size - rc;

Y
Yan, Zheng 已提交
811 812 813 814 815 816 817 818 819 820 821 822 823 824
			/*
			 * If read is satisfied by single OSD request,
			 * it can pass EOF. Otherwise read is within
			 * i_size.
			 */
			if (aio_req->num_reqs == 1) {
				loff_t i_size = i_size_read(inode);
				loff_t endoff = aio_req->iocb->ki_pos + rc;
				if (endoff < i_size)
					zlen = min_t(size_t, zlen,
						     i_size - endoff);
				aio_req->total_len = rc + zlen;
			}

825 826 827 828 829
			iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
				      osd_data->num_bvecs,
				      osd_data->bvec_pos.iter.bi_size);
			iov_iter_advance(&i, rc);
			iov_iter_zero(zlen, &i);
Y
Yan, Zheng 已提交
830 831 832
		}
	}

833 834
	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
		  aio_req->should_dirty);
Y
Yan, Zheng 已提交
835 836 837 838 839 840 841 842 843
	ceph_osdc_put_request(req);

	if (rc < 0)
		cmpxchg(&aio_req->error, 0, rc);

	ceph_aio_complete(inode, aio_req);
	return;
}

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
static void ceph_aio_retry_work(struct work_struct *work)
{
	struct ceph_aio_work *aio_work =
		container_of(work, struct ceph_aio_work, work);
	struct ceph_osd_request *orig_req = aio_work->req;
	struct ceph_aio_request *aio_req = orig_req->r_priv;
	struct inode *inode = orig_req->r_inode;
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_snap_context *snapc;
	struct ceph_osd_request *req;
	int ret;

	spin_lock(&ci->i_ceph_lock);
	if (__ceph_have_pending_cap_snap(ci)) {
		struct ceph_cap_snap *capsnap =
			list_last_entry(&ci->i_cap_snaps,
					struct ceph_cap_snap,
					ci_item);
		snapc = ceph_get_snap_context(capsnap->context);
	} else {
		BUG_ON(!ci->i_head_snapc);
		snapc = ceph_get_snap_context(ci->i_head_snapc);
	}
	spin_unlock(&ci->i_ceph_lock);

	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
			false, GFP_NOFS);
871 872
	if (!req) {
		ret = -ENOMEM;
873 874 875 876
		req = orig_req;
		goto out;
	}

877
	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
878
	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
879
	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
880

881 882 883 884 885 886
	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
	if (ret) {
		ceph_osdc_put_request(req);
		req = orig_req;
		goto out;
	}
887 888 889

	req->r_ops[0] = orig_req->r_ops[0];

890 891
	req->r_mtime = aio_req->mtime;
	req->r_data_offset = req->r_ops[0].extent.offset;
892 893 894 895 896 897 898 899 900 901 902

	ceph_osdc_put_request(orig_req);

	req->r_callback = ceph_aio_complete_req;
	req->r_inode = inode;
	req->r_priv = aio_req;

	ret = ceph_osdc_start_request(req->r_osdc, req, false);
out:
	if (ret < 0) {
		req->r_result = ret;
903
		ceph_aio_complete_req(req);
904 905
	}

906
	ceph_put_snap_context(snapc);
907 908 909
	kfree(aio_work);
}

910
static ssize_t
Y
Yan, Zheng 已提交
911 912 913
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
		       struct ceph_snap_context *snapc,
		       struct ceph_cap_flush **pcf)
S
Sage Weil 已提交
914
{
915
	struct file *file = iocb->ki_filp;
A
Al Viro 已提交
916
	struct inode *inode = file_inode(file);
S
Sage Weil 已提交
917
	struct ceph_inode_info *ci = ceph_inode(inode);
918
	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
919
	struct ceph_vino vino;
S
Sage Weil 已提交
920
	struct ceph_osd_request *req;
921
	struct bio_vec *bvecs;
Y
Yan, Zheng 已提交
922 923
	struct ceph_aio_request *aio_req = NULL;
	int num_pages = 0;
S
Sage Weil 已提交
924 925
	int flags;
	int ret;
926
	struct timespec64 mtime = current_time(inode);
Y
Yan, Zheng 已提交
927 928 929
	size_t count = iov_iter_count(iter);
	loff_t pos = iocb->ki_pos;
	bool write = iov_iter_rw(iter) == WRITE;
930
	bool should_dirty = !write && iter_is_iovec(iter);
S
Sage Weil 已提交
931

Y
Yan, Zheng 已提交
932
	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
S
Sage Weil 已提交
933 934
		return -EROFS;

935 936 937
	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
	     (write ? "write" : "read"), file, pos, (unsigned)count,
	     snapc, snapc->seq);
S
Sage Weil 已提交
938

939
	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
940 941 942
	if (ret < 0)
		return ret;

Y
Yan, Zheng 已提交
943
	if (write) {
944
		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
945 946
					pos >> PAGE_SHIFT,
					(pos + count) >> PAGE_SHIFT);
947
		if (ret2 < 0)
948
			dout("invalidate_inode_pages2_range returned %d\n", ret2);
949

950
		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
Y
Yan, Zheng 已提交
951 952 953
	} else {
		flags = CEPH_OSD_FLAG_READ;
	}
S
Sage Weil 已提交
954

Y
Yan, Zheng 已提交
955
	while (iov_iter_count(iter) > 0) {
956
		u64 size = iov_iter_count(iter);
Y
Yan, Zheng 已提交
957
		ssize_t len;
958

959 960 961 962 963
		if (write)
			size = min_t(u64, size, fsc->mount_options->wsize);
		else
			size = min_t(u64, size, fsc->mount_options->rsize);

964 965
		vino = ceph_vino(inode);
		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
Y
Yan, Zheng 已提交
966
					    vino, pos, &size, 0,
Y
Yanhu Cao 已提交
967
					    1,
Y
Yan, Zheng 已提交
968 969 970
					    write ? CEPH_OSD_OP_WRITE :
						    CEPH_OSD_OP_READ,
					    flags, snapc,
971 972 973 974 975
					    ci->i_truncate_seq,
					    ci->i_truncate_size,
					    false);
		if (IS_ERR(req)) {
			ret = PTR_ERR(req);
976
			break;
977
		}
S
Sage Weil 已提交
978

979 980
		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
		if (len < 0) {
981
			ceph_osdc_put_request(req);
982
			ret = len;
983
			break;
S
Sage Weil 已提交
984
		}
985 986
		if (len != size)
			osd_req_op_extent_update(req, 0, len);
S
Sage Weil 已提交
987 988

		/*
Y
Yan, Zheng 已提交
989 990
		 * To simplify error handling, allow AIO when IO within i_size
		 * or IO can be satisfied by single OSD request.
S
Sage Weil 已提交
991
		 */
Y
Yan, Zheng 已提交
992 993 994 995 996 997
		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
		    (len == count || pos + count <= i_size_read(inode))) {
			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
			if (aio_req) {
				aio_req->iocb = iocb;
				aio_req->write = write;
998
				aio_req->should_dirty = should_dirty;
Y
Yan, Zheng 已提交
999 1000
				INIT_LIST_HEAD(&aio_req->osd_reqs);
				if (write) {
1001
					aio_req->mtime = mtime;
Y
Yan, Zheng 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
					swap(aio_req->prealloc_cf, *pcf);
				}
			}
			/* ignore error */
		}

		if (write) {
			/*
			 * throw out any page cache pages in this range. this
			 * may block.
			 */
			truncate_inode_pages_range(inode->i_mapping, pos,
1014
					(pos+len) | (PAGE_SIZE - 1));
Y
Yan, Zheng 已提交
1015

1016
			req->r_mtime = mtime;
Y
Yan, Zheng 已提交
1017 1018
		}

1019
		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1020

Y
Yan, Zheng 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		if (aio_req) {
			aio_req->total_len += len;
			aio_req->num_reqs++;
			atomic_inc(&aio_req->pending_reqs);

			req->r_callback = ceph_aio_complete_req;
			req->r_inode = inode;
			req->r_priv = aio_req;
			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);

			pos += len;
			continue;
		}

		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1036 1037 1038
		if (!ret)
			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);

Y
Yan, Zheng 已提交
1039 1040 1041 1042 1043
		size = i_size_read(inode);
		if (!write) {
			if (ret == -ENOENT)
				ret = 0;
			if (ret >= 0 && ret < len && pos + ret < size) {
1044
				struct iov_iter i;
Y
Yan, Zheng 已提交
1045 1046
				int zlen = min_t(size_t, len - ret,
						 size - pos - ret);
1047 1048 1049 1050 1051

				iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
					      len);
				iov_iter_advance(&i, ret);
				iov_iter_zero(zlen, &i);
Y
Yan, Zheng 已提交
1052 1053 1054 1055 1056 1057
				ret += zlen;
			}
			if (ret >= 0)
				len = ret;
		}

1058
		put_bvecs(bvecs, num_pages, should_dirty);
1059
		ceph_osdc_put_request(req);
Y
Yan, Zheng 已提交
1060
		if (ret < 0)
1061
			break;
1062

Y
Yan, Zheng 已提交
1063 1064
		pos += len;
		if (!write && pos >= size)
1065
			break;
1066

Y
Yan, Zheng 已提交
1067 1068
		if (write && pos > size) {
			if (ceph_inode_set_size(inode, pos))
1069 1070 1071 1072
				ceph_check_caps(ceph_inode(inode),
						CHECK_CAPS_AUTHONLY,
						NULL);
		}
1073 1074
	}

Y
Yan, Zheng 已提交
1075
	if (aio_req) {
1076 1077
		LIST_HEAD(osd_reqs);

Y
Yan, Zheng 已提交
1078 1079 1080 1081 1082 1083 1084 1085
		if (aio_req->num_reqs == 0) {
			kfree(aio_req);
			return ret;
		}

		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
					      CEPH_CAP_FILE_RD);

1086 1087 1088
		list_splice(&aio_req->osd_reqs, &osd_reqs);
		while (!list_empty(&osd_reqs)) {
			req = list_first_entry(&osd_reqs,
Y
Yan, Zheng 已提交
1089 1090 1091 1092 1093 1094 1095 1096
					       struct ceph_osd_request,
					       r_unsafe_item);
			list_del_init(&req->r_unsafe_item);
			if (ret >= 0)
				ret = ceph_osdc_start_request(req->r_osdc,
							      req, false);
			if (ret < 0) {
				req->r_result = ret;
1097
				ceph_aio_complete_req(req);
Y
Yan, Zheng 已提交
1098 1099 1100 1101 1102 1103 1104
			}
		}
		return -EIOCBQUEUED;
	}

	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
		ret = pos - iocb->ki_pos;
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		iocb->ki_pos = pos;
	}
	return ret;
}

/*
 * Synchronous write, straight from __user pointer or user pages.
 *
 * If write spans object boundary, just do multiple writes.  (For a
 * correct atomic write, we should e.g. take write locks on all
 * objects, rollback on failure, etc.)
 */
Y
Yan, Zheng 已提交
1117
static ssize_t
1118 1119
ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
		struct ceph_snap_context *snapc)
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
	struct ceph_vino vino;
	struct ceph_osd_request *req;
	struct page **pages;
	u64 len;
	int num_pages;
	int written = 0;
	int flags;
	int ret;
1133
	bool check_caps = false;
1134
	struct timespec64 mtime = current_time(inode);
A
Al Viro 已提交
1135
	size_t count = iov_iter_count(from);
1136 1137 1138 1139

	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
		return -EROFS;

1140 1141
	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
	     file, pos, (unsigned)count, snapc, snapc->seq);
1142 1143 1144 1145 1146 1147

	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
	if (ret < 0)
		return ret;

	ret = invalidate_inode_pages2_range(inode->i_mapping,
1148 1149
					    pos >> PAGE_SHIFT,
					    (pos + count) >> PAGE_SHIFT);
1150 1151 1152
	if (ret < 0)
		dout("invalidate_inode_pages2_range returned %d\n", ret);

1153
	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1154

A
Al Viro 已提交
1155
	while ((len = iov_iter_count(from)) > 0) {
1156 1157 1158 1159 1160
		size_t left;
		int n;

		vino = ceph_vino(inode);
		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1161
					    vino, pos, &len, 0, 1,
1162 1163 1164 1165 1166 1167
					    CEPH_OSD_OP_WRITE, flags, snapc,
					    ci->i_truncate_seq,
					    ci->i_truncate_size,
					    false);
		if (IS_ERR(req)) {
			ret = PTR_ERR(req);
1168
			break;
1169 1170 1171 1172 1173 1174
		}

		/*
		 * write from beginning of first page,
		 * regardless of io alignment
		 */
1175
		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1176

1177
		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
S
Sage Weil 已提交
1178 1179 1180 1181
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto out;
		}
1182 1183 1184

		left = len;
		for (n = 0; n < num_pages; n++) {
1185
			size_t plen = min_t(size_t, left, PAGE_SIZE);
A
Al Viro 已提交
1186
			ret = copy_page_from_iter(pages[n], 0, plen, from);
1187 1188 1189 1190 1191 1192 1193
			if (ret != plen) {
				ret = -EFAULT;
				break;
			}
			left -= ret;
		}

S
Sage Weil 已提交
1194 1195 1196 1197 1198
		if (ret < 0) {
			ceph_release_page_vector(pages, num_pages);
			goto out;
		}

1199
		req->r_inode = inode;
S
Sage Weil 已提交
1200

1201 1202
		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
						false, true);
1203

1204
		req->r_mtime = mtime;
1205 1206 1207
		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
		if (!ret)
			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
S
Sage Weil 已提交
1208 1209

out:
1210
		ceph_osdc_put_request(req);
1211 1212
		if (ret != 0) {
			ceph_set_error_write(ci);
1213
			break;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
		}

		ceph_clear_error_write(ci);
		pos += len;
		written += len;
		if (pos > i_size_read(inode)) {
			check_caps = ceph_inode_set_size(inode, pos);
			if (check_caps)
				ceph_check_caps(ceph_inode(inode),
						CHECK_CAPS_AUTHONLY,
						NULL);
		}

1227
	}
S
Sage Weil 已提交
1228

1229
	if (ret != -EOLDSNAPC && written > 0) {
S
Sage Weil 已提交
1230
		ret = written;
1231
		iocb->ki_pos = pos;
S
Sage Weil 已提交
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	}
	return ret;
}

/*
 * Wrap generic_file_aio_read with checks for cap bits on the inode.
 * Atomically grab references, so that those bits are not released
 * back to the MDS mid-read.
 *
 * Hmm, the sync read case isn't actually async... should it be?
 */
A
Al Viro 已提交
1243
static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
S
Sage Weil 已提交
1244 1245
{
	struct file *filp = iocb->ki_filp;
1246
	struct ceph_file_info *fi = filp->private_data;
C
Christoph Hellwig 已提交
1247
	size_t len = iov_iter_count(to);
A
Al Viro 已提交
1248
	struct inode *inode = file_inode(filp);
S
Sage Weil 已提交
1249
	struct ceph_inode_info *ci = ceph_inode(inode);
1250
	struct page *pinned_page = NULL;
S
Sage Weil 已提交
1251
	ssize_t ret;
1252
	int want, got = 0;
Y
Yan, Zheng 已提交
1253
	int retry_op = 0, read = 0;
S
Sage Weil 已提交
1254

1255
again:
1256 1257 1258
	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);

1259 1260 1261 1262
	if (fi->fmode & CEPH_FILE_MODE_LAZY)
		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
	else
		want = CEPH_CAP_FILE_CACHE;
1263
	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
S
Sage Weil 已提交
1264
	if (ret < 0)
1265
		return ret;
S
Sage Weil 已提交
1266

1267
	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1268
	    (iocb->ki_flags & IOCB_DIRECT) ||
1269 1270 1271 1272 1273 1274
	    (fi->flags & CEPH_F_SYNC)) {

		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
		     ceph_cap_string(got));

Y
Yan, Zheng 已提交
1275
		if (ci->i_inline_version == CEPH_INLINE_NONE) {
Y
Yan, Zheng 已提交
1276 1277 1278 1279 1280 1281 1282 1283
			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
				ret = ceph_direct_read_write(iocb, to,
							     NULL, NULL);
				if (ret >= 0 && ret < len)
					retry_op = CHECK_EOF;
			} else {
				ret = ceph_sync_read(iocb, to, &retry_op);
			}
Y
Yan, Zheng 已提交
1284 1285 1286
		} else {
			retry_op = READ_INLINE;
		}
1287
	} else {
1288
		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1289
		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
A
Al Viro 已提交
1290
		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1291
		     ceph_cap_string(got));
1292
		ceph_add_rw_context(fi, &rw_ctx);
A
Al Viro 已提交
1293
		ret = generic_file_read_iter(iocb, to);
1294
		ceph_del_rw_context(fi, &rw_ctx);
1295
	}
S
Sage Weil 已提交
1296 1297
	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1298
	if (pinned_page) {
1299
		put_page(pinned_page);
1300 1301
		pinned_page = NULL;
	}
S
Sage Weil 已提交
1302
	ceph_put_cap_refs(ci, got);
Y
Yan, Zheng 已提交
1303
	if (retry_op > HAVE_RETRIED && ret >= 0) {
Y
Yan, Zheng 已提交
1304 1305 1306 1307
		int statret;
		struct page *page = NULL;
		loff_t i_size;
		if (retry_op == READ_INLINE) {
1308
			page = __page_cache_alloc(GFP_KERNEL);
Y
Yan, Zheng 已提交
1309 1310 1311
			if (!page)
				return -ENOMEM;
		}
1312

Y
Yan, Zheng 已提交
1313 1314 1315
		statret = __ceph_do_getattr(inode, page,
					    CEPH_STAT_CAP_INLINE_DATA, !!page);
		if (statret < 0) {
1316 1317
			if (page)
				__free_page(page);
Y
Yan, Zheng 已提交
1318 1319 1320 1321 1322 1323
			if (statret == -ENODATA) {
				BUG_ON(retry_op != READ_INLINE);
				goto again;
			}
			return statret;
		}
1324

Y
Yan, Zheng 已提交
1325 1326
		i_size = i_size_read(inode);
		if (retry_op == READ_INLINE) {
1327 1328
			BUG_ON(ret > 0 || read > 0);
			if (iocb->ki_pos < i_size &&
1329
			    iocb->ki_pos < PAGE_SIZE) {
Y
Yan, Zheng 已提交
1330 1331
				loff_t end = min_t(loff_t, i_size,
						   iocb->ki_pos + len);
1332
				end = min_t(loff_t, end, PAGE_SIZE);
Y
Yan, Zheng 已提交
1333 1334 1335 1336 1337 1338
				if (statret < end)
					zero_user_segment(page, statret, end);
				ret = copy_page_to_iter(page,
						iocb->ki_pos & ~PAGE_MASK,
						end - iocb->ki_pos, to);
				iocb->ki_pos += ret;
1339 1340 1341 1342 1343 1344 1345 1346
				read += ret;
			}
			if (iocb->ki_pos < i_size && read < len) {
				size_t zlen = min_t(size_t, len - read,
						    i_size - iocb->ki_pos);
				ret = iov_iter_zero(zlen, to);
				iocb->ki_pos += ret;
				read += ret;
Y
Yan, Zheng 已提交
1347 1348
			}
			__free_pages(page, 0);
1349
			return read;
Y
Yan, Zheng 已提交
1350
		}
1351 1352

		/* hit EOF or hole? */
Y
Yan, Zheng 已提交
1353
		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1354
		    ret < len) {
1355
			dout("sync_read hit hole, ppos %lld < size %lld"
1356
			     ", reading more\n", iocb->ki_pos, i_size);
1357

1358 1359
			read += ret;
			len -= ret;
Y
Yan, Zheng 已提交
1360
			retry_op = HAVE_RETRIED;
1361 1362 1363
			goto again;
		}
	}
1364

1365 1366 1367
	if (ret >= 0)
		ret += read;

S
Sage Weil 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
	return ret;
}

/*
 * Take cap references to avoid releasing caps to MDS mid-write.
 *
 * If we are synchronous, and write with an old snap context, the OSD
 * may return EOLDSNAPC.  In that case, retry the write.. _after_
 * dropping our cap refs and allowing the pending snap to logically
 * complete _before_ this write occurs.
 *
 * If we are near ENOSPC, write synchronously.
 */
A
Al Viro 已提交
1381
static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
S
Sage Weil 已提交
1382 1383
{
	struct file *file = iocb->ki_filp;
1384
	struct ceph_file_info *fi = file->private_data;
A
Al Viro 已提交
1385
	struct inode *inode = file_inode(file);
S
Sage Weil 已提交
1386
	struct ceph_inode_info *ci = ceph_inode(inode);
1387 1388
	struct ceph_osd_client *osdc =
		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1389
	struct ceph_cap_flush *prealloc_cf;
1390
	ssize_t count, written = 0;
1391
	int err, want, got;
1392
	loff_t pos;
S
Sage Weil 已提交
1393 1394 1395 1396

	if (ceph_snap(inode) != CEPH_NOSNAP)
		return -EROFS;

1397 1398 1399 1400
	prealloc_cf = ceph_alloc_cap_flush();
	if (!prealloc_cf)
		return -ENOMEM;

Y
Yan, Zheng 已提交
1401
retry_snap:
A
Al Viro 已提交
1402
	inode_lock(inode);
1403 1404

	/* We can write back this queue in page reclaim */
1405
	current->backing_dev_info = inode_to_bdi(inode);
1406

1407 1408 1409 1410 1411 1412
	if (iocb->ki_flags & IOCB_APPEND) {
		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
		if (err < 0)
			goto out;
	}

1413 1414
	err = generic_write_checks(iocb, from);
	if (err <= 0)
1415 1416
		goto out;

1417 1418
	pos = iocb->ki_pos;
	count = iov_iter_count(from);
1419 1420 1421 1422 1423
	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
		err = -EDQUOT;
		goto out;
	}

1424
	err = file_remove_privs(file);
1425 1426 1427 1428 1429 1430 1431
	if (err)
		goto out;

	err = file_update_time(file);
	if (err)
		goto out;

1432 1433 1434 1435 1436 1437
	if (ci->i_inline_version != CEPH_INLINE_NONE) {
		err = ceph_uninline_data(file, NULL);
		if (err < 0)
			goto out;
	}

1438
	/* FIXME: not complete since it doesn't account for being at quota */
1439
	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1440
		err = -ENOSPC;
Y
Yan, Zheng 已提交
1441 1442
		goto out;
	}
1443

1444
	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1445
	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
S
Sage Weil 已提交
1446 1447 1448 1449
	if (fi->fmode & CEPH_FILE_MODE_LAZY)
		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
	else
		want = CEPH_CAP_FILE_BUFFER;
1450
	got = 0;
1451 1452
	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
			    &got, NULL);
1453
	if (err < 0)
1454
		goto out;
S
Sage Weil 已提交
1455

1456
	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1457
	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
S
Sage Weil 已提交
1458 1459

	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1460 1461
	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1462
		struct ceph_snap_context *snapc;
A
Al Viro 已提交
1463
		struct iov_iter data;
A
Al Viro 已提交
1464
		inode_unlock(inode);
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478

		spin_lock(&ci->i_ceph_lock);
		if (__ceph_have_pending_cap_snap(ci)) {
			struct ceph_cap_snap *capsnap =
					list_last_entry(&ci->i_cap_snaps,
							struct ceph_cap_snap,
							ci_item);
			snapc = ceph_get_snap_context(capsnap->context);
		} else {
			BUG_ON(!ci->i_head_snapc);
			snapc = ceph_get_snap_context(ci->i_head_snapc);
		}
		spin_unlock(&ci->i_ceph_lock);

A
Al Viro 已提交
1479 1480
		/* we might need to revert back to that point */
		data = *from;
1481
		if (iocb->ki_flags & IOCB_DIRECT)
Y
Yan, Zheng 已提交
1482 1483
			written = ceph_direct_read_write(iocb, &data, snapc,
							 &prealloc_cf);
1484
		else
1485
			written = ceph_sync_write(iocb, &data, pos, snapc);
A
Al Viro 已提交
1486 1487
		if (written > 0)
			iov_iter_advance(from, written);
1488
		ceph_put_snap_context(snapc);
S
Sage Weil 已提交
1489
	} else {
Y
Yan, Zheng 已提交
1490 1491 1492 1493 1494 1495 1496
		/*
		 * No need to acquire the i_truncate_mutex. Because
		 * the MDS revokes Fwb caps before sending truncate
		 * message to us. We can't get Fwb cap while there
		 * are pending vmtruncate. So write and vmtruncate
		 * can not run at the same time
		 */
A
Al Viro 已提交
1497
		written = generic_perform_write(file, from, pos);
1498 1499
		if (likely(written >= 0))
			iocb->ki_pos = pos + written;
A
Al Viro 已提交
1500
		inode_unlock(inode);
S
Sage Weil 已提交
1501
	}
1502

1503
	if (written >= 0) {
1504
		int dirty;
1505

1506
		spin_lock(&ci->i_ceph_lock);
1507
		ci->i_inline_version = CEPH_INLINE_NONE;
1508 1509
		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
					       &prealloc_cf);
1510
		spin_unlock(&ci->i_ceph_lock);
1511 1512
		if (dirty)
			__mark_inode_dirty(inode, dirty);
1513 1514
		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
			ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
S
Sage Weil 已提交
1515
	}
S
Sage Weil 已提交
1516

S
Sage Weil 已提交
1517
	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
A
Al Viro 已提交
1518
	     inode, ceph_vinop(inode), pos, (unsigned)count,
S
Sage Weil 已提交
1519
	     ceph_cap_string(got));
S
Sage Weil 已提交
1520
	ceph_put_cap_refs(ci, got);
S
Sage Weil 已提交
1521

Y
Yan, Zheng 已提交
1522 1523 1524 1525 1526 1527
	if (written == -EOLDSNAPC) {
		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
		     inode, ceph_vinop(inode), pos, (unsigned)count);
		goto retry_snap;
	}

C
Christoph Hellwig 已提交
1528
	if (written >= 0) {
1529
		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
C
Christoph Hellwig 已提交
1530 1531
			iocb->ki_flags |= IOCB_DSYNC;
		written = generic_write_sync(iocb, written);
Y
Yan, Zheng 已提交
1532
	}
1533

1534 1535
	goto out_unlocked;

1536
out:
A
Al Viro 已提交
1537
	inode_unlock(inode);
1538
out_unlocked:
1539
	ceph_free_cap_flush(prealloc_cf);
1540 1541
	current->backing_dev_info = NULL;
	return written ? written : err;
S
Sage Weil 已提交
1542 1543 1544 1545 1546
}

/*
 * llseek.  be sure to verify file size on SEEK_END.
 */
1547
static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
S
Sage Weil 已提交
1548 1549
{
	struct inode *inode = file->f_mapping->host;
1550
	loff_t i_size;
1551
	loff_t ret;
S
Sage Weil 已提交
1552

A
Al Viro 已提交
1553
	inode_lock(inode);
1554

1555
	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1556
		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1557
		if (ret < 0)
S
Sage Weil 已提交
1558
			goto out;
1559 1560
	}

1561
	i_size = i_size_read(inode);
1562
	switch (whence) {
1563
	case SEEK_END:
1564
		offset += i_size;
S
Sage Weil 已提交
1565 1566 1567 1568 1569 1570 1571 1572 1573
		break;
	case SEEK_CUR:
		/*
		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
		 * position-querying operation.  Avoid rewriting the "same"
		 * f_pos value back to the file because a concurrent read(),
		 * write() or lseek() might have altered it
		 */
		if (offset == 0) {
1574
			ret = file->f_pos;
S
Sage Weil 已提交
1575 1576 1577 1578
			goto out;
		}
		offset += file->f_pos;
		break;
1579
	case SEEK_DATA:
1580
		if (offset < 0 || offset >= i_size) {
1581 1582 1583 1584 1585
			ret = -ENXIO;
			goto out;
		}
		break;
	case SEEK_HOLE:
1586
		if (offset < 0 || offset >= i_size) {
1587 1588 1589
			ret = -ENXIO;
			goto out;
		}
1590
		offset = i_size;
1591
		break;
S
Sage Weil 已提交
1592 1593
	}

1594
	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
S
Sage Weil 已提交
1595 1596

out:
A
Al Viro 已提交
1597
	inode_unlock(inode);
1598
	return ret;
S
Sage Weil 已提交
1599 1600
}

L
Li Wang 已提交
1601 1602 1603 1604
static inline void ceph_zero_partial_page(
	struct inode *inode, loff_t offset, unsigned size)
{
	struct page *page;
1605
	pgoff_t index = offset >> PAGE_SHIFT;
L
Li Wang 已提交
1606 1607 1608 1609

	page = find_lock_page(inode->i_mapping, index);
	if (page) {
		wait_on_page_writeback(page);
1610
		zero_user(page, offset & (PAGE_SIZE - 1), size);
L
Li Wang 已提交
1611
		unlock_page(page);
1612
		put_page(page);
L
Li Wang 已提交
1613 1614 1615 1616 1617 1618
	}
}

static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
				      loff_t length)
{
1619
	loff_t nearly = round_up(offset, PAGE_SIZE);
L
Li Wang 已提交
1620 1621 1622 1623 1624 1625 1626 1627
	if (offset < nearly) {
		loff_t size = nearly - offset;
		if (length < size)
			size = length;
		ceph_zero_partial_page(inode, offset, size);
		offset += size;
		length -= size;
	}
1628 1629
	if (length >= PAGE_SIZE) {
		loff_t size = round_down(length, PAGE_SIZE);
L
Li Wang 已提交
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
		truncate_pagecache_range(inode, offset, offset + size - 1);
		offset += size;
		length -= size;
	}
	if (length)
		ceph_zero_partial_page(inode, offset, length);
}

static int ceph_zero_partial_object(struct inode *inode,
				    loff_t offset, loff_t *length)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
	struct ceph_osd_request *req;
	int ret = 0;
	loff_t zero = 0;
	int op;

	if (!length) {
		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
		length = &zero;
	} else {
		op = CEPH_OSD_OP_ZERO;
	}

	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
					ceph_vino(inode),
					offset, length,
1658
					0, 1, op,
1659
					CEPH_OSD_FLAG_WRITE,
L
Li Wang 已提交
1660 1661 1662 1663 1664 1665
					NULL, 0, 0, false);
	if (IS_ERR(req)) {
		ret = PTR_ERR(req);
		goto out;
	}

1666
	req->r_mtime = inode->i_mtime;
L
Li Wang 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
	if (!ret) {
		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
		if (ret == -ENOENT)
			ret = 0;
	}
	ceph_osdc_put_request(req);

out:
	return ret;
}

static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
{
	int ret = 0;
	struct ceph_inode_info *ci = ceph_inode(inode);
1683 1684 1685
	s32 stripe_unit = ci->i_layout.stripe_unit;
	s32 stripe_count = ci->i_layout.stripe_count;
	s32 object_size = ci->i_layout.object_size;
S
Sage Weil 已提交
1686 1687 1688 1689 1690 1691 1692
	u64 object_set_size = object_size * stripe_count;
	u64 nearly, t;

	/* round offset up to next period boundary */
	nearly = offset + object_set_size - 1;
	t = nearly;
	nearly -= do_div(t, object_set_size);
L
Li Wang 已提交
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728

	while (length && offset < nearly) {
		loff_t size = length;
		ret = ceph_zero_partial_object(inode, offset, &size);
		if (ret < 0)
			return ret;
		offset += size;
		length -= size;
	}
	while (length >= object_set_size) {
		int i;
		loff_t pos = offset;
		for (i = 0; i < stripe_count; ++i) {
			ret = ceph_zero_partial_object(inode, pos, NULL);
			if (ret < 0)
				return ret;
			pos += stripe_unit;
		}
		offset += object_set_size;
		length -= object_set_size;
	}
	while (length) {
		loff_t size = length;
		ret = ceph_zero_partial_object(inode, offset, &size);
		if (ret < 0)
			return ret;
		offset += size;
		length -= size;
	}
	return ret;
}

static long ceph_fallocate(struct file *file, int mode,
				loff_t offset, loff_t length)
{
	struct ceph_file_info *fi = file->private_data;
L
Libo Chen 已提交
1729
	struct inode *inode = file_inode(file);
L
Li Wang 已提交
1730 1731 1732
	struct ceph_inode_info *ci = ceph_inode(inode);
	struct ceph_osd_client *osdc =
		&ceph_inode_to_client(inode)->client->osdc;
1733
	struct ceph_cap_flush *prealloc_cf;
L
Li Wang 已提交
1734 1735 1736 1737 1738 1739
	int want, got = 0;
	int dirty;
	int ret = 0;
	loff_t endoff = 0;
	loff_t size;

1740 1741 1742
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
		return -EOPNOTSUPP;

L
Li Wang 已提交
1743 1744 1745
	if (!S_ISREG(inode->i_mode))
		return -EOPNOTSUPP;

1746 1747 1748 1749
	prealloc_cf = ceph_alloc_cap_flush();
	if (!prealloc_cf)
		return -ENOMEM;

A
Al Viro 已提交
1750
	inode_lock(inode);
L
Li Wang 已提交
1751 1752 1753 1754 1755 1756

	if (ceph_snap(inode) != CEPH_NOSNAP) {
		ret = -EROFS;
		goto unlock;
	}

1757 1758 1759 1760 1761 1762
	if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
	    ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
		ret = -EDQUOT;
		goto unlock;
	}

1763 1764
	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
L
Li Wang 已提交
1765 1766 1767 1768
		ret = -ENOSPC;
		goto unlock;
	}

1769 1770 1771 1772 1773 1774
	if (ci->i_inline_version != CEPH_INLINE_NONE) {
		ret = ceph_uninline_data(file, NULL);
		if (ret < 0)
			goto unlock;
	}

L
Li Wang 已提交
1775
	size = i_size_read(inode);
1776
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
L
Li Wang 已提交
1777
		endoff = offset + length;
1778 1779 1780 1781
		ret = inode_newsize_ok(inode, endoff);
		if (ret)
			goto unlock;
	}
L
Li Wang 已提交
1782 1783 1784 1785 1786 1787

	if (fi->fmode & CEPH_FILE_MODE_LAZY)
		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
	else
		want = CEPH_CAP_FILE_BUFFER;

1788
	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
L
Li Wang 已提交
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	if (ret < 0)
		goto unlock;

	if (mode & FALLOC_FL_PUNCH_HOLE) {
		if (offset < size)
			ceph_zero_pagecache_range(inode, offset, length);
		ret = ceph_zero_objects(inode, offset, length);
	} else if (endoff > size) {
		truncate_pagecache_range(inode, size, -1);
		if (ceph_inode_set_size(inode, endoff))
			ceph_check_caps(ceph_inode(inode),
				CHECK_CAPS_AUTHONLY, NULL);
	}

	if (!ret) {
		spin_lock(&ci->i_ceph_lock);
1805
		ci->i_inline_version = CEPH_INLINE_NONE;
1806 1807
		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
					       &prealloc_cf);
L
Li Wang 已提交
1808 1809 1810
		spin_unlock(&ci->i_ceph_lock);
		if (dirty)
			__mark_inode_dirty(inode, dirty);
1811 1812 1813
		if ((endoff > size) &&
		    ceph_quota_is_max_bytes_approaching(inode, endoff))
			ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
L
Li Wang 已提交
1814 1815 1816 1817
	}

	ceph_put_cap_refs(ci, got);
unlock:
A
Al Viro 已提交
1818
	inode_unlock(inode);
1819
	ceph_free_cap_flush(prealloc_cf);
L
Li Wang 已提交
1820 1821 1822
	return ret;
}

S
Sage Weil 已提交
1823 1824 1825 1826
const struct file_operations ceph_file_fops = {
	.open = ceph_open,
	.release = ceph_release,
	.llseek = ceph_llseek,
A
Al Viro 已提交
1827
	.read_iter = ceph_read_iter,
A
Al Viro 已提交
1828
	.write_iter = ceph_write_iter,
S
Sage Weil 已提交
1829 1830
	.mmap = ceph_mmap,
	.fsync = ceph_fsync,
G
Greg Farnum 已提交
1831 1832
	.lock = ceph_lock,
	.flock = ceph_flock,
1833
	.splice_read = generic_file_splice_read,
1834
	.splice_write = iter_file_splice_write,
S
Sage Weil 已提交
1835 1836
	.unlocked_ioctl = ceph_ioctl,
	.compat_ioctl	= ceph_ioctl,
L
Li Wang 已提交
1837
	.fallocate	= ceph_fallocate,
S
Sage Weil 已提交
1838 1839
};