file.c 106.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *   fs/cifs/file.c
 *
 *   vfs operations that deal with files
S
Steve French 已提交
5
 *
6
 *   Copyright (C) International Business Machines  Corp., 2002,2010
L
Linus Torvalds 已提交
7
 *   Author(s): Steve French (sfrench@us.ibm.com)
J
[CIFS]  
Jeremy Allison 已提交
8
 *              Jeremy Allison (jra@samba.org)
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 *   This library is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published
 *   by the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This library is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 *   the GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this library; if not, write to the Free Software
 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */
#include <linux/fs.h>
25
#include <linux/backing-dev.h>
L
Linus Torvalds 已提交
26 27 28 29
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
30
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/delay.h>
33
#include <linux/mount.h>
34
#include <linux/slab.h>
35
#include <linux/swap.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
44
#include "fscache.h"
L
Linus Torvalds 已提交
45

46

L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59
static inline int cifs_convert_flags(unsigned int flags)
{
	if ((flags & O_ACCMODE) == O_RDONLY)
		return GENERIC_READ;
	else if ((flags & O_ACCMODE) == O_WRONLY)
		return GENERIC_WRITE;
	else if ((flags & O_ACCMODE) == O_RDWR) {
		/* GENERIC_ALL is too much permission to request
		   can cause unnecessary access denied on create */
		/* return GENERIC_ALL; */
		return (GENERIC_READ | GENERIC_WRITE);
	}

60 61 62
	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
		FILE_READ_DATA);
63
}
64

65
static u32 cifs_posix_convert_flags(unsigned int flags)
66
{
67
	u32 posix_flags = 0;
68

69
	if ((flags & O_ACCMODE) == O_RDONLY)
70
		posix_flags = SMB_O_RDONLY;
71
	else if ((flags & O_ACCMODE) == O_WRONLY)
72 73 74 75
		posix_flags = SMB_O_WRONLY;
	else if ((flags & O_ACCMODE) == O_RDWR)
		posix_flags = SMB_O_RDWR;

76
	if (flags & O_CREAT) {
77
		posix_flags |= SMB_O_CREAT;
78 79 80
		if (flags & O_EXCL)
			posix_flags |= SMB_O_EXCL;
	} else if (flags & O_EXCL)
81 82
		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
			 current->comm, current->tgid);
83

84 85 86
	if (flags & O_TRUNC)
		posix_flags |= SMB_O_TRUNC;
	/* be safe and imply O_SYNC for O_DSYNC */
87
	if (flags & O_DSYNC)
88
		posix_flags |= SMB_O_SYNC;
89
	if (flags & O_DIRECTORY)
90
		posix_flags |= SMB_O_DIRECTORY;
91
	if (flags & O_NOFOLLOW)
92
		posix_flags |= SMB_O_NOFOLLOW;
93
	if (flags & O_DIRECT)
94
		posix_flags |= SMB_O_DIRECT;
95 96

	return posix_flags;
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106
}

static inline int cifs_get_disposition(unsigned int flags)
{
	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
		return FILE_CREATE;
	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
		return FILE_OVERWRITE_IF;
	else if ((flags & O_CREAT) == O_CREAT)
		return FILE_OPEN_IF;
107 108
	else if ((flags & O_TRUNC) == O_TRUNC)
		return FILE_OVERWRITE;
L
Linus Torvalds 已提交
109 110 111 112
	else
		return FILE_OPEN;
}

113 114
int cifs_posix_open(char *full_path, struct inode **pinode,
			struct super_block *sb, int mode, unsigned int f_flags,
115
			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
116 117 118 119 120 121 122
{
	int rc;
	FILE_UNIX_BASIC_INFO *presp_data;
	__u32 posix_flags = 0;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	struct cifs_fattr fattr;
	struct tcon_link *tlink;
123
	struct cifs_tcon *tcon;
124

125
	cifs_dbg(FYI, "posix open %s\n", full_path);
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
	if (presp_data == NULL)
		return -ENOMEM;

	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink)) {
		rc = PTR_ERR(tlink);
		goto posix_open_ret;
	}

	tcon = tlink_tcon(tlink);
	mode &= ~current_umask();

	posix_flags = cifs_posix_convert_flags(f_flags);
	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
			     poplock, full_path, cifs_sb->local_nls,
143
			     cifs_remap(cifs_sb));
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
	cifs_put_tlink(tlink);

	if (rc)
		goto posix_open_ret;

	if (presp_data->Type == cpu_to_le32(-1))
		goto posix_open_ret; /* open ok, caller does qpathinfo */

	if (!pinode)
		goto posix_open_ret; /* caller does not need info */

	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);

	/* get new inode and set it up */
	if (*pinode == NULL) {
		cifs_fill_uniqueid(sb, &fattr);
		*pinode = cifs_iget(sb, &fattr);
		if (!*pinode) {
			rc = -ENOMEM;
			goto posix_open_ret;
		}
	} else {
		cifs_fattr_to_inode(*pinode, &fattr);
	}

posix_open_ret:
	kfree(presp_data);
	return rc;
}

174 175
static int
cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
176 177
	     struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
	     struct cifs_fid *fid, unsigned int xid)
178 179
{
	int rc;
180
	int desired_access;
181
	int disposition;
182
	int create_options = CREATE_NOT_DIR;
183
	FILE_ALL_INFO *buf;
P
Pavel Shilovsky 已提交
184
	struct TCP_Server_Info *server = tcon->ses->server;
185
	struct cifs_open_parms oparms;
186

P
Pavel Shilovsky 已提交
187
	if (!server->ops->open)
188 189 190
		return -ENOSYS;

	desired_access = cifs_convert_flags(f_flags);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

/*********************************************************************
 *  open flag mapping table:
 *
 *	POSIX Flag            CIFS Disposition
 *	----------            ----------------
 *	O_CREAT               FILE_OPEN_IF
 *	O_CREAT | O_EXCL      FILE_CREATE
 *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
 *	O_TRUNC               FILE_OVERWRITE
 *	none of the above     FILE_OPEN
 *
 *	Note that there is not a direct match between disposition
 *	FILE_SUPERSEDE (ie create whether or not file exists although
 *	O_CREAT | O_TRUNC is similar but truncates the existing
 *	file rather than creating a new file as FILE_SUPERSEDE does
 *	(which uses the attributes / metadata passed in on open call)
 *?
 *?  O_SYNC is a reasonable match to CIFS writethrough flag
 *?  and the read write flags match reasonably.  O_LARGEFILE
 *?  is irrelevant because largefile support is always used
 *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
 *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
 *********************************************************************/

	disposition = cifs_get_disposition(f_flags);

	/* BB pass O_SYNC flag through on file attributes .. BB */

	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

224 225 226
	if (backup_cred(cifs_sb))
		create_options |= CREATE_OPEN_BACKUP_INTENT;

227 228 229 230 231 232 233
	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
	if (f_flags & O_SYNC)
		create_options |= CREATE_WRITE_THROUGH;

	if (f_flags & O_DIRECT)
		create_options |= CREATE_NO_BUFFER;

234 235 236 237 238 239 240
	oparms.tcon = tcon;
	oparms.cifs_sb = cifs_sb;
	oparms.desired_access = desired_access;
	oparms.create_options = create_options;
	oparms.disposition = disposition;
	oparms.path = full_path;
	oparms.fid = fid;
241
	oparms.reconnect = false;
242 243

	rc = server->ops->open(xid, &oparms, oplock, buf);
244 245 246 247 248 249 250 251 252

	if (rc)
		goto out;

	if (tcon->unix_ext)
		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
					      xid);
	else
		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
253
					 xid, fid);
254 255 256 257 258 259

out:
	kfree(buf);
	return rc;
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
static bool
cifs_has_mand_locks(struct cifsInodeInfo *cinode)
{
	struct cifs_fid_locks *cur;
	bool has_locks = false;

	down_read(&cinode->lock_sem);
	list_for_each_entry(cur, &cinode->llist, llist) {
		if (!list_empty(&cur->locks)) {
			has_locks = true;
			break;
		}
	}
	up_read(&cinode->lock_sem);
	return has_locks;
}

277
struct cifsFileInfo *
278
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
279 280
		  struct tcon_link *tlink, __u32 oplock)
{
G
Goldwyn Rodrigues 已提交
281
	struct dentry *dentry = file_dentry(file);
282
	struct inode *inode = d_inode(dentry);
283 284
	struct cifsInodeInfo *cinode = CIFS_I(inode);
	struct cifsFileInfo *cfile;
285
	struct cifs_fid_locks *fdlocks;
286
	struct cifs_tcon *tcon = tlink_tcon(tlink);
287
	struct TCP_Server_Info *server = tcon->ses->server;
288 289 290 291 292

	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
	if (cfile == NULL)
		return cfile;

293 294 295 296 297 298 299 300 301
	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
	if (!fdlocks) {
		kfree(cfile);
		return NULL;
	}

	INIT_LIST_HEAD(&fdlocks->locks);
	fdlocks->cfile = cfile;
	cfile->llist = fdlocks;
302
	down_write(&cinode->lock_sem);
303
	list_add(&fdlocks->llist, &cinode->llist);
304
	up_write(&cinode->lock_sem);
305

306 307 308 309 310 311 312 313
	cfile->count = 1;
	cfile->pid = current->tgid;
	cfile->uid = current_fsuid();
	cfile->dentry = dget(dentry);
	cfile->f_flags = file->f_flags;
	cfile->invalidHandle = false;
	cfile->tlink = cifs_get_tlink(tlink);
	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
314
	mutex_init(&cfile->fh_mutex);
315
	spin_lock_init(&cfile->file_info_lock);
316

317 318
	cifs_sb_active(inode->i_sb);

319 320 321 322
	/*
	 * If the server returned a read oplock and we have mandatory brlocks,
	 * set oplock level to None.
	 */
323
	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
324
		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
325 326 327
		oplock = 0;
	}

328
	spin_lock(&tcon->open_file_lock);
329
	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
330 331 332
		oplock = fid->pending_open->oplock;
	list_del(&fid->pending_open->olist);

333
	fid->purge_cache = false;
334
	server->ops->set_fid(cfile, fid, oplock);
335 336

	list_add(&cfile->tlist, &tcon->openFileList);
337

338 339
	/* if readable file instance put first in list*/
	if (file->f_mode & FMODE_READ)
340
		list_add(&cfile->flist, &cinode->openFileList);
341
	else
342
		list_add_tail(&cfile->flist, &cinode->openFileList);
343
	spin_unlock(&tcon->open_file_lock);
344

345
	if (fid->purge_cache)
346
		cifs_zap_mapping(inode);
347

348 349
	file->private_data = cfile;
	return cfile;
350 351
}

352 353 354
struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
355
	spin_lock(&cifs_file->file_info_lock);
356
	cifsFileInfo_get_locked(cifs_file);
357
	spin_unlock(&cifs_file->file_info_lock);
358 359 360
	return cifs_file;
}

361 362
/*
 * Release a reference on the file private data. This may involve closing
363
 * the filehandle out on the server. Must be called without holding
364
 * tcon->open_file_lock and cifs_file->file_info_lock.
365
 */
366 367
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
368
	struct inode *inode = d_inode(cifs_file->dentry);
369
	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
370
	struct TCP_Server_Info *server = tcon->ses->server;
P
Pavel Shilovsky 已提交
371
	struct cifsInodeInfo *cifsi = CIFS_I(inode);
372 373
	struct super_block *sb = inode->i_sb;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
374
	struct cifsLockInfo *li, *tmp;
375 376
	struct cifs_fid fid;
	struct cifs_pending_open open;
377
	bool oplock_break_cancelled;
378

379 380 381
	spin_lock(&tcon->open_file_lock);

	spin_lock(&cifs_file->file_info_lock);
382
	if (--cifs_file->count > 0) {
383 384
		spin_unlock(&cifs_file->file_info_lock);
		spin_unlock(&tcon->open_file_lock);
385 386
		return;
	}
387
	spin_unlock(&cifs_file->file_info_lock);
388

389 390 391 392 393 394
	if (server->ops->get_lease_key)
		server->ops->get_lease_key(inode, &fid);

	/* store open in pending opens to make sure we don't miss lease break */
	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);

395 396 397 398 399
	/* remove it from the lists */
	list_del(&cifs_file->flist);
	list_del(&cifs_file->tlist);

	if (list_empty(&cifsi->openFileList)) {
400
		cifs_dbg(FYI, "closing last open instance for inode %p\n",
401
			 d_inode(cifs_file->dentry));
402 403 404 405 406
		/*
		 * In strict cache mode we need invalidate mapping on the last
		 * close  because it may cause a error when we open this file
		 * again and get at least level II oplock.
		 */
407
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
408
			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
409
		cifs_set_oplock_level(cifsi, 0);
410
	}
411 412

	spin_unlock(&tcon->open_file_lock);
413

414
	oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
415

416
	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
417
		struct TCP_Server_Info *server = tcon->ses->server;
418
		unsigned int xid;
419

420
		xid = get_xid();
421
		if (server->ops->close)
422 423
			server->ops->close(xid, tcon, &cifs_file->fid);
		_free_xid(xid);
424 425
	}

426 427 428
	if (oplock_break_cancelled)
		cifs_done_oplock_break(cifsi);

429 430
	cifs_del_pending_open(&open);

431 432
	/*
	 * Delete any outstanding lock records. We'll lose them when the file
433 434
	 * is closed anyway.
	 */
435
	down_write(&cifsi->lock_sem);
436
	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
437
		list_del(&li->llist);
438
		cifs_del_lock_waiters(li);
439
		kfree(li);
440
	}
441 442
	list_del(&cifs_file->llist->llist);
	kfree(cifs_file->llist);
443
	up_write(&cifsi->lock_sem);
444 445 446

	cifs_put_tlink(cifs_file->tlink);
	dput(cifs_file->dentry);
447
	cifs_sb_deactive(sb);
448
	kfree(cifs_file);
449 450
}

L
Linus Torvalds 已提交
451
int cifs_open(struct inode *inode, struct file *file)
452

L
Linus Torvalds 已提交
453 454
{
	int rc = -EACCES;
455
	unsigned int xid;
456
	__u32 oplock;
L
Linus Torvalds 已提交
457
	struct cifs_sb_info *cifs_sb;
P
Pavel Shilovsky 已提交
458
	struct TCP_Server_Info *server;
459
	struct cifs_tcon *tcon;
460
	struct tcon_link *tlink;
461
	struct cifsFileInfo *cfile = NULL;
L
Linus Torvalds 已提交
462
	char *full_path = NULL;
P
Pavel Shilovsky 已提交
463
	bool posix_open_ok = false;
464
	struct cifs_fid fid;
465
	struct cifs_pending_open open;
L
Linus Torvalds 已提交
466

467
	xid = get_xid();
L
Linus Torvalds 已提交
468 469

	cifs_sb = CIFS_SB(inode->i_sb);
470 471
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink)) {
472
		free_xid(xid);
473 474 475
		return PTR_ERR(tlink);
	}
	tcon = tlink_tcon(tlink);
P
Pavel Shilovsky 已提交
476
	server = tcon->ses->server;
L
Linus Torvalds 已提交
477

G
Goldwyn Rodrigues 已提交
478
	full_path = build_path_from_dentry(file_dentry(file));
L
Linus Torvalds 已提交
479
	if (full_path == NULL) {
480
		rc = -ENOMEM;
481
		goto out;
L
Linus Torvalds 已提交
482 483
	}

484
	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
485
		 inode, file->f_flags, full_path);
486

487 488 489 490 491 492 493 494
	if (file->f_flags & O_DIRECT &&
	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
			file->f_op = &cifs_file_direct_nobrl_ops;
		else
			file->f_op = &cifs_file_direct_ops;
	}

495
	if (server->oplocks)
496 497 498 499
		oplock = REQ_OPLOCK;
	else
		oplock = 0;

500
	if (!tcon->broken_posix_open && tcon->unix_ext &&
501 502
	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
503
		/* can not refresh inode info since size could be stale */
504
		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
505
				cifs_sb->mnt_file_mode /* ignored */,
506
				file->f_flags, &oplock, &fid.netfid, xid);
507
		if (rc == 0) {
508
			cifs_dbg(FYI, "posix open succeeded\n");
P
Pavel Shilovsky 已提交
509
			posix_open_ok = true;
510 511
		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
			if (tcon->ses->serverNOS)
512 513 514
				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
					 tcon->ses->serverName,
					 tcon->ses->serverNOS);
515
			tcon->broken_posix_open = true;
516 517 518
		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
			 (rc != -EOPNOTSUPP)) /* path not found or net err */
			goto out;
519 520 521 522
		/*
		 * Else fallthrough to retry open the old way on network i/o
		 * or DFS errors.
		 */
523 524
	}

525 526 527 528 529
	if (server->ops->get_lease_key)
		server->ops->get_lease_key(inode, &fid);

	cifs_add_pending_open(&fid, tlink, &open);

P
Pavel Shilovsky 已提交
530
	if (!posix_open_ok) {
P
Pavel Shilovsky 已提交
531 532 533
		if (server->ops->get_lease_key)
			server->ops->get_lease_key(inode, &fid);

P
Pavel Shilovsky 已提交
534
		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
535
				  file->f_flags, &oplock, &fid, xid);
536 537
		if (rc) {
			cifs_del_pending_open(&open);
P
Pavel Shilovsky 已提交
538
			goto out;
539
		}
P
Pavel Shilovsky 已提交
540
	}
541

542 543
	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
	if (cfile == NULL) {
P
Pavel Shilovsky 已提交
544 545
		if (server->ops->close)
			server->ops->close(xid, tcon, &fid);
546
		cifs_del_pending_open(&open);
L
Linus Torvalds 已提交
547 548 549 550
		rc = -ENOMEM;
		goto out;
	}

551 552
	cifs_fscache_set_inode_cookie(inode, file);

P
Pavel Shilovsky 已提交
553
	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
554 555 556 557
		/*
		 * Time to set mode which we can not set earlier due to
		 * problems creating new read-only files.
		 */
P
Pavel Shilovsky 已提交
558 559
		struct cifs_unix_set_info_args args = {
			.mode	= inode->i_mode,
560 561
			.uid	= INVALID_UID, /* no change */
			.gid	= INVALID_GID, /* no change */
P
Pavel Shilovsky 已提交
562 563 564 565 566
			.ctime	= NO_CHANGE_64,
			.atime	= NO_CHANGE_64,
			.mtime	= NO_CHANGE_64,
			.device	= 0,
		};
567 568
		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
				       cfile->pid);
L
Linus Torvalds 已提交
569 570 571 572
	}

out:
	kfree(full_path);
573
	free_xid(xid);
574
	cifs_put_tlink(tlink);
L
Linus Torvalds 已提交
575 576 577
	return rc;
}

P
Pavel Shilovsky 已提交
578 579
static int cifs_push_posix_locks(struct cifsFileInfo *cfile);

580 581
/*
 * Try to reacquire byte range locks that were released when session
P
Pavel Shilovsky 已提交
582
 * to server was lost.
583
 */
P
Pavel Shilovsky 已提交
584 585
static int
cifs_relock_file(struct cifsFileInfo *cfile)
L
Linus Torvalds 已提交
586
{
P
Pavel Shilovsky 已提交
587
	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
588
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
P
Pavel Shilovsky 已提交
589
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
L
Linus Torvalds 已提交
590 591
	int rc = 0;

592
	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
P
Pavel Shilovsky 已提交
593
	if (cinode->can_cache_brlcks) {
594 595
		/* can cache locks - no need to relock */
		up_read(&cinode->lock_sem);
P
Pavel Shilovsky 已提交
596 597 598 599 600 601 602 603 604
		return rc;
	}

	if (cap_unix(tcon->ses) &&
	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
		rc = cifs_push_posix_locks(cfile);
	else
		rc = tcon->ses->server->ops->push_mand_locks(cfile);
L
Linus Torvalds 已提交
605

606
	up_read(&cinode->lock_sem);
L
Linus Torvalds 已提交
607 608 609
	return rc;
}

610 611
static int
cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
L
Linus Torvalds 已提交
612 613
{
	int rc = -EACCES;
614
	unsigned int xid;
615
	__u32 oplock;
L
Linus Torvalds 已提交
616
	struct cifs_sb_info *cifs_sb;
617
	struct cifs_tcon *tcon;
618 619
	struct TCP_Server_Info *server;
	struct cifsInodeInfo *cinode;
S
Steve French 已提交
620
	struct inode *inode;
L
Linus Torvalds 已提交
621
	char *full_path = NULL;
622
	int desired_access;
L
Linus Torvalds 已提交
623
	int disposition = FILE_OPEN;
624
	int create_options = CREATE_NOT_DIR;
625
	struct cifs_open_parms oparms;
L
Linus Torvalds 已提交
626

627
	xid = get_xid();
628 629 630
	mutex_lock(&cfile->fh_mutex);
	if (!cfile->invalidHandle) {
		mutex_unlock(&cfile->fh_mutex);
631
		rc = 0;
632
		free_xid(xid);
633
		return rc;
L
Linus Torvalds 已提交
634 635
	}

636
	inode = d_inode(cfile->dentry);
L
Linus Torvalds 已提交
637
	cifs_sb = CIFS_SB(inode->i_sb);
638 639 640 641 642 643 644 645 646 647
	tcon = tlink_tcon(cfile->tlink);
	server = tcon->ses->server;

	/*
	 * Can not grab rename sem here because various ops, including those
	 * that already have the rename sem can end up causing writepage to get
	 * called and if the server was down that means we end up here, and we
	 * can never tell if the caller already has the rename_sem.
	 */
	full_path = build_path_from_dentry(cfile->dentry);
L
Linus Torvalds 已提交
648
	if (full_path == NULL) {
649
		rc = -ENOMEM;
650
		mutex_unlock(&cfile->fh_mutex);
651
		free_xid(xid);
652
		return rc;
L
Linus Torvalds 已提交
653 654
	}

655 656
	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
		 inode, cfile->f_flags, full_path);
L
Linus Torvalds 已提交
657

658
	if (tcon->ses->server->oplocks)
L
Linus Torvalds 已提交
659 660
		oplock = REQ_OPLOCK;
	else
661
		oplock = 0;
L
Linus Torvalds 已提交
662

663
	if (tcon->unix_ext && cap_unix(tcon->ses) &&
664
	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
665
				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
666 667 668 669
		/*
		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
		 * original open. Must mask them off for a reopen.
		 */
670
		unsigned int oflags = cfile->f_flags &
J
Jeff Layton 已提交
671
						~(O_CREAT | O_EXCL | O_TRUNC);
672

673
		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
674
				     cifs_sb->mnt_file_mode /* ignored */,
675
				     oflags, &oplock, &cfile->fid.netfid, xid);
676
		if (rc == 0) {
677
			cifs_dbg(FYI, "posix reopen succeeded\n");
678
			oparms.reconnect = true;
679 680
			goto reopen_success;
		}
681 682 683 684
		/*
		 * fallthrough to retry open the old way on errors, especially
		 * in the reconnect path it is important to retry hard
		 */
685 686
	}

687
	desired_access = cifs_convert_flags(cfile->f_flags);
688

689 690 691
	if (backup_cred(cifs_sb))
		create_options |= CREATE_OPEN_BACKUP_INTENT;

P
Pavel Shilovsky 已提交
692
	if (server->ops->get_lease_key)
693
		server->ops->get_lease_key(inode, &cfile->fid);
P
Pavel Shilovsky 已提交
694

695 696 697 698 699 700
	oparms.tcon = tcon;
	oparms.cifs_sb = cifs_sb;
	oparms.desired_access = desired_access;
	oparms.create_options = create_options;
	oparms.disposition = disposition;
	oparms.path = full_path;
701 702
	oparms.fid = &cfile->fid;
	oparms.reconnect = true;
703

704 705
	/*
	 * Can not refresh inode by passing in file_info buf to be returned by
706
	 * ops->open and then calling get_inode_info with returned buf since
707 708 709 710
	 * file might have write behind data that needs to be flushed and server
	 * version of file size can be stale. If we knew for sure that inode was
	 * not dirty locally we could do this.
	 */
711
	rc = server->ops->open(xid, &oparms, &oplock, NULL);
712 713 714 715 716 717 718
	if (rc == -ENOENT && oparms.reconnect == false) {
		/* durable handle timeout is expired - open the file again */
		rc = server->ops->open(xid, &oparms, &oplock, NULL);
		/* indicate that we need to relock the file */
		oparms.reconnect = true;
	}

L
Linus Torvalds 已提交
719
	if (rc) {
720
		mutex_unlock(&cfile->fh_mutex);
721 722
		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
		cifs_dbg(FYI, "oplock: %d\n", oplock);
J
Jeff Layton 已提交
723 724 725
		goto reopen_error_exit;
	}

726
reopen_success:
727 728 729
	cfile->invalidHandle = false;
	mutex_unlock(&cfile->fh_mutex);
	cinode = CIFS_I(inode);
J
Jeff Layton 已提交
730 731 732

	if (can_flush) {
		rc = filemap_write_and_wait(inode->i_mapping);
733
		mapping_set_error(inode->i_mapping, rc);
J
Jeff Layton 已提交
734 735

		if (tcon->unix_ext)
736 737
			rc = cifs_get_inode_info_unix(&inode, full_path,
						      inode->i_sb, xid);
J
Jeff Layton 已提交
738
		else
739 740 741 742 743 744 745 746 747 748
			rc = cifs_get_inode_info(&inode, full_path, NULL,
						 inode->i_sb, xid, NULL);
	}
	/*
	 * Else we are writing out data to server already and could deadlock if
	 * we tried to flush data, and since we do not know if we have data that
	 * would invalidate the current end of file on the server we can not go
	 * to the server to get the new inode info.
	 */

749 750 751 752 753 754 755 756 757
	/*
	 * If the server returned a read oplock and we have mandatory brlocks,
	 * set oplock level to None.
	 */
	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
		oplock = 0;
	}

758 759 760
	server->ops->set_fid(cfile, &cfile->fid, oplock);
	if (oparms.reconnect)
		cifs_relock_file(cfile);
J
Jeff Layton 已提交
761 762

reopen_error_exit:
L
Linus Torvalds 已提交
763
	kfree(full_path);
764
	free_xid(xid);
L
Linus Torvalds 已提交
765 766 767 768 769
	return rc;
}

int cifs_close(struct inode *inode, struct file *file)
{
770 771 772 773
	if (file->private_data != NULL) {
		cifsFileInfo_put(file->private_data);
		file->private_data = NULL;
	}
J
[CIFS]  
Jeremy Allison 已提交
774

775 776
	/* return code from the ->release op is always ignored */
	return 0;
L
Linus Torvalds 已提交
777 778
}

779 780 781
void
cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
{
782
	struct cifsFileInfo *open_file;
783 784
	struct list_head *tmp;
	struct list_head *tmp1;
785 786
	struct list_head tmp_list;

787 788 789 790 791
	if (!tcon->use_persistent || !tcon->need_reopen_files)
		return;

	tcon->need_reopen_files = false;

792 793
	cifs_dbg(FYI, "Reopen persistent handles");
	INIT_LIST_HEAD(&tmp_list);
794 795 796

	/* list all files open on tree connection, reopen resilient handles  */
	spin_lock(&tcon->open_file_lock);
797
	list_for_each(tmp, &tcon->openFileList) {
798
		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
799 800 801 802
		if (!open_file->invalidHandle)
			continue;
		cifsFileInfo_get(open_file);
		list_add_tail(&open_file->rlist, &tmp_list);
803 804
	}
	spin_unlock(&tcon->open_file_lock);
805 806 807

	list_for_each_safe(tmp, tmp1, &tmp_list) {
		open_file = list_entry(tmp, struct cifsFileInfo, rlist);
808 809
		if (cifs_reopen_file(open_file, false /* do not flush */))
			tcon->need_reopen_files = true;
810 811 812
		list_del_init(&open_file->rlist);
		cifsFileInfo_put(open_file);
	}
813 814
}

L
Linus Torvalds 已提交
815 816 817
int cifs_closedir(struct inode *inode, struct file *file)
{
	int rc = 0;
818
	unsigned int xid;
819
	struct cifsFileInfo *cfile = file->private_data;
820 821 822
	struct cifs_tcon *tcon;
	struct TCP_Server_Info *server;
	char *buf;
L
Linus Torvalds 已提交
823

824
	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
L
Linus Torvalds 已提交
825

826 827 828
	if (cfile == NULL)
		return rc;

829
	xid = get_xid();
830 831
	tcon = tlink_tcon(cfile->tlink);
	server = tcon->ses->server;
L
Linus Torvalds 已提交
832

833
	cifs_dbg(FYI, "Freeing private data in close dir\n");
834
	spin_lock(&cfile->file_info_lock);
835
	if (server->ops->dir_needs_close(cfile)) {
836
		cfile->invalidHandle = true;
837
		spin_unlock(&cfile->file_info_lock);
838 839 840 841
		if (server->ops->close_dir)
			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
		else
			rc = -ENOSYS;
842
		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
843 844 845
		/* not much we can do if it fails anyway, ignore rc */
		rc = 0;
	} else
846
		spin_unlock(&cfile->file_info_lock);
847 848 849

	buf = cfile->srch_inf.ntwrk_buf_start;
	if (buf) {
850
		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
851 852 853 854 855
		cfile->srch_inf.ntwrk_buf_start = NULL;
		if (cfile->srch_inf.smallBuf)
			cifs_small_buf_release(buf);
		else
			cifs_buf_release(buf);
L
Linus Torvalds 已提交
856
	}
857 858 859 860

	cifs_put_tlink(cfile->tlink);
	kfree(file->private_data);
	file->private_data = NULL;
L
Linus Torvalds 已提交
861
	/* BB can we lock the filestruct while this is going on? */
862
	free_xid(xid);
L
Linus Torvalds 已提交
863 864 865
	return rc;
}

866
static struct cifsLockInfo *
867
cifs_lock_init(__u64 offset, __u64 length, __u8 type)
J
[CIFS]  
Jeremy Allison 已提交
868
{
869
	struct cifsLockInfo *lock =
S
Steve French 已提交
870
		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
871 872 873 874 875 876 877 878 879
	if (!lock)
		return lock;
	lock->offset = offset;
	lock->length = length;
	lock->type = type;
	lock->pid = current->tgid;
	INIT_LIST_HEAD(&lock->blist);
	init_waitqueue_head(&lock->block_q);
	return lock;
880 881
}

882
void
883 884 885 886 887 888 889 890 891
cifs_del_lock_waiters(struct cifsLockInfo *lock)
{
	struct cifsLockInfo *li, *tmp;
	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
		list_del_init(&li->blist);
		wake_up(&li->block_q);
	}
}

892 893 894 895 896
#define CIFS_LOCK_OP	0
#define CIFS_READ_OP	1
#define CIFS_WRITE_OP	2

/* @rw_check : 0 - no op, 1 - read, 2 - write */
897
static bool
898 899
cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
			    __u64 length, __u8 type, struct cifsFileInfo *cfile,
900
			    struct cifsLockInfo **conf_lock, int rw_check)
901
{
902
	struct cifsLockInfo *li;
903
	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
904
	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
905

906
	list_for_each_entry(li, &fdlocks->locks, llist) {
907 908 909
		if (offset + length <= li->offset ||
		    offset >= li->offset + li->length)
			continue;
910 911 912 913 914 915 916
		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
		    server->ops->compare_fids(cfile, cur_cfile)) {
			/* shared lock prevents write op through the same fid */
			if (!(li->type & server->vals->shared_lock_type) ||
			    rw_check != CIFS_WRITE_OP)
				continue;
		}
917 918 919
		if ((type & server->vals->shared_lock_type) &&
		    ((server->ops->compare_fids(cfile, cur_cfile) &&
		     current->tgid == li->pid) || type == li->type))
920
			continue;
921 922
		if (conf_lock)
			*conf_lock = li;
923
		return true;
924 925 926 927
	}
	return false;
}

928
bool
929
cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
930
			__u8 type, struct cifsLockInfo **conf_lock,
931
			int rw_check)
932
{
933
	bool rc = false;
934
	struct cifs_fid_locks *cur;
935
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
936

937 938
	list_for_each_entry(cur, &cinode->llist, llist) {
		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
939
						 cfile, conf_lock, rw_check);
940 941 942 943 944
		if (rc)
			break;
	}

	return rc;
945 946
}

947 948 949 950 951 952 953
/*
 * Check if there is another lock that prevents us to set the lock (mandatory
 * style). If such a lock exists, update the flock structure with its
 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
 * or leave it the same if we can't. Returns 0 if we don't need to request to
 * the server or 1 otherwise.
 */
954
static int
955 956
cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
	       __u8 type, struct file_lock *flock)
957 958 959
{
	int rc = 0;
	struct cifsLockInfo *conf_lock;
960
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
961
	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
962 963
	bool exist;

964
	down_read(&cinode->lock_sem);
965

966
	exist = cifs_find_lock_conflict(cfile, offset, length, type,
967
					&conf_lock, CIFS_LOCK_OP);
968 969 970 971
	if (exist) {
		flock->fl_start = conf_lock->offset;
		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
		flock->fl_pid = conf_lock->pid;
972
		if (conf_lock->type & server->vals->shared_lock_type)
973 974 975 976 977 978 979 980
			flock->fl_type = F_RDLCK;
		else
			flock->fl_type = F_WRLCK;
	} else if (!cinode->can_cache_brlcks)
		rc = 1;
	else
		flock->fl_type = F_UNLCK;

981
	up_read(&cinode->lock_sem);
982 983 984
	return rc;
}

985
static void
986
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
987
{
988
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
989
	down_write(&cinode->lock_sem);
990
	list_add_tail(&lock->llist, &cfile->llist->locks);
991
	up_write(&cinode->lock_sem);
J
[CIFS]  
Jeremy Allison 已提交
992 993
}

994 995 996 997 998 999
/*
 * Set the byte-range lock (mandatory style). Returns:
 * 1) 0, if we set the lock and don't need to request to the server;
 * 2) 1, if no locks prevent us but we need to request to the server;
 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
 */
1000
static int
1001
cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1002
		 bool wait)
1003
{
1004
	struct cifsLockInfo *conf_lock;
1005
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1006 1007 1008 1009 1010
	bool exist;
	int rc = 0;

try_again:
	exist = false;
1011
	down_write(&cinode->lock_sem);
1012

1013
	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1014
					lock->type, &conf_lock, CIFS_LOCK_OP);
1015
	if (!exist && cinode->can_cache_brlcks) {
1016
		list_add_tail(&lock->llist, &cfile->llist->locks);
1017
		up_write(&cinode->lock_sem);
1018 1019 1020 1021 1022 1023 1024 1025 1026
		return rc;
	}

	if (!exist)
		rc = 1;
	else if (!wait)
		rc = -EACCES;
	else {
		list_add_tail(&lock->blist, &conf_lock->blist);
1027
		up_write(&cinode->lock_sem);
1028 1029 1030 1031 1032
		rc = wait_event_interruptible(lock->block_q,
					(lock->blist.prev == &lock->blist) &&
					(lock->blist.next == &lock->blist));
		if (!rc)
			goto try_again;
1033
		down_write(&cinode->lock_sem);
1034
		list_del_init(&lock->blist);
1035 1036
	}

1037
	up_write(&cinode->lock_sem);
1038 1039 1040
	return rc;
}

1041 1042 1043 1044 1045 1046 1047
/*
 * Check if there is another lock that prevents us to set the lock (posix
 * style). If such a lock exists, update the flock structure with its
 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
 * or leave it the same if we can't. Returns 0 if we don't need to request to
 * the server or 1 otherwise.
 */
1048
static int
1049 1050 1051
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
	int rc = 0;
A
Al Viro 已提交
1052
	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1053 1054
	unsigned char saved_type = flock->fl_type;

1055 1056 1057
	if ((flock->fl_flags & FL_POSIX) == 0)
		return 1;

1058
	down_read(&cinode->lock_sem);
1059 1060 1061 1062 1063 1064 1065
	posix_test_lock(file, flock);

	if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
		flock->fl_type = saved_type;
		rc = 1;
	}

1066
	up_read(&cinode->lock_sem);
1067 1068 1069
	return rc;
}

1070 1071 1072 1073 1074 1075
/*
 * Set the byte-range lock (posix style). Returns:
 * 1) 0, if we set the lock and don't need to request to the server;
 * 2) 1, if we need to request to the server;
 * 3) <0, if the error occurs while setting the lock.
 */
1076 1077 1078
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
A
Al Viro 已提交
1079
	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1080 1081 1082 1083
	int rc = 1;

	if ((flock->fl_flags & FL_POSIX) == 0)
		return rc;
1084

1085
try_again:
1086
	down_write(&cinode->lock_sem);
1087
	if (!cinode->can_cache_brlcks) {
1088
		up_write(&cinode->lock_sem);
1089
		return rc;
1090
	}
1091 1092

	rc = posix_lock_file(file, flock, NULL);
1093
	up_write(&cinode->lock_sem);
1094 1095 1096 1097
	if (rc == FILE_LOCK_DEFERRED) {
		rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
		if (!rc)
			goto try_again;
1098
		posix_unblock_lock(flock);
1099
	}
1100
	return rc;
1101 1102
}

1103
int
1104
cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1105
{
1106 1107
	unsigned int xid;
	int rc = 0, stored_rc;
1108 1109
	struct cifsLockInfo *li, *tmp;
	struct cifs_tcon *tcon;
1110
	unsigned int num, max_num, max_buf;
1111
	LOCKING_ANDX_RANGE *buf, *cur;
1112 1113 1114 1115
	static const int types[] = {
		LOCKING_ANDX_LARGE_FILES,
		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
	};
1116
	int i;
1117

1118
	xid = get_xid();
1119 1120
	tcon = tlink_tcon(cfile->tlink);

1121 1122 1123 1124 1125 1126
	/*
	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
	 * and check it for zero before using.
	 */
	max_buf = tcon->ses->server->maxBuf;
	if (!max_buf) {
1127
		free_xid(xid);
1128 1129 1130 1131 1132
		return -EINVAL;
	}

	max_num = (max_buf - sizeof(struct smb_hdr)) /
						sizeof(LOCKING_ANDX_RANGE);
1133
	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1134
	if (!buf) {
1135
		free_xid(xid);
1136
		return -ENOMEM;
1137 1138 1139 1140 1141
	}

	for (i = 0; i < 2; i++) {
		cur = buf;
		num = 0;
1142
		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1143 1144 1145 1146 1147 1148 1149 1150
			if (li->type != types[i])
				continue;
			cur->Pid = cpu_to_le16(li->pid);
			cur->LengthLow = cpu_to_le32((u32)li->length);
			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
			cur->OffsetLow = cpu_to_le32((u32)li->offset);
			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
			if (++num == max_num) {
1151 1152
				stored_rc = cifs_lockv(xid, tcon,
						       cfile->fid.netfid,
1153 1154
						       (__u8)li->type, 0, num,
						       buf);
1155 1156 1157 1158 1159 1160 1161 1162 1163
				if (stored_rc)
					rc = stored_rc;
				cur = buf;
				num = 0;
			} else
				cur++;
		}

		if (num) {
1164
			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1165
					       (__u8)types[i], 0, num, buf);
1166 1167 1168
			if (stored_rc)
				rc = stored_rc;
		}
1169 1170
	}

1171
	kfree(buf);
1172
	free_xid(xid);
1173 1174 1175
	return rc;
}

1176 1177 1178 1179 1180 1181
static __u32
hash_lockowner(fl_owner_t owner)
{
	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
}

1182 1183 1184 1185 1186 1187 1188 1189 1190
struct lock_to_push {
	struct list_head llist;
	__u64 offset;
	__u64 length;
	__u32 pid;
	__u16 netfid;
	__u8 type;
};

1191
static int
1192
cifs_push_posix_locks(struct cifsFileInfo *cfile)
1193
{
1194
	struct inode *inode = d_inode(cfile->dentry);
1195
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1196 1197
	struct file_lock *flock;
	struct file_lock_context *flctx = inode->i_flctx;
1198
	unsigned int count = 0, i;
1199
	int rc = 0, xid, type;
1200 1201
	struct list_head locks_to_send, *el;
	struct lock_to_push *lck, *tmp;
1202 1203
	__u64 length;

1204
	xid = get_xid();
1205

1206 1207
	if (!flctx)
		goto out;
1208

1209 1210 1211 1212 1213 1214
	spin_lock(&flctx->flc_lock);
	list_for_each(el, &flctx->flc_posix) {
		count++;
	}
	spin_unlock(&flctx->flc_lock);

1215 1216
	INIT_LIST_HEAD(&locks_to_send);

1217
	/*
1218 1219
	 * Allocating count locks is enough because no FL_POSIX locks can be
	 * added to the list while we are holding cinode->lock_sem that
1220
	 * protects locking operations of this inode.
1221
	 */
1222
	for (i = 0; i < count; i++) {
1223 1224 1225 1226 1227 1228 1229 1230 1231
		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
		if (!lck) {
			rc = -ENOMEM;
			goto err_out;
		}
		list_add_tail(&lck->llist, &locks_to_send);
	}

	el = locks_to_send.next;
1232
	spin_lock(&flctx->flc_lock);
1233
	list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1234
		if (el == &locks_to_send) {
1235 1236 1237 1238
			/*
			 * The list ended. We don't have enough allocated
			 * structures - something is really wrong.
			 */
1239
			cifs_dbg(VFS, "Can't push all brlocks!\n");
1240 1241
			break;
		}
1242 1243 1244 1245 1246
		length = 1 + flock->fl_end - flock->fl_start;
		if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
			type = CIFS_RDLCK;
		else
			type = CIFS_WRLCK;
1247
		lck = list_entry(el, struct lock_to_push, llist);
1248
		lck->pid = hash_lockowner(flock->fl_owner);
1249
		lck->netfid = cfile->fid.netfid;
1250 1251 1252
		lck->length = length;
		lck->type = type;
		lck->offset = flock->fl_start;
1253
	}
1254
	spin_unlock(&flctx->flc_lock);
1255 1256 1257 1258 1259

	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
		int stored_rc;

		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1260
					     lck->offset, lck->length, NULL,
1261 1262 1263 1264 1265 1266 1267
					     lck->type, 0);
		if (stored_rc)
			rc = stored_rc;
		list_del(&lck->llist);
		kfree(lck);
	}

1268
out:
1269
	free_xid(xid);
1270
	return rc;
1271 1272 1273 1274 1275 1276
err_out:
	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
		list_del(&lck->llist);
		kfree(lck);
	}
	goto out;
1277 1278
}

1279
static int
1280
cifs_push_locks(struct cifsFileInfo *cfile)
1281
{
1282
	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1283
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1284
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1285 1286 1287 1288 1289 1290 1291 1292
	int rc = 0;

	/* we are going to update can_cache_brlcks here - need a write access */
	down_write(&cinode->lock_sem);
	if (!cinode->can_cache_brlcks) {
		up_write(&cinode->lock_sem);
		return rc;
	}
1293

1294
	if (cap_unix(tcon->ses) &&
1295 1296
	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1297 1298 1299
		rc = cifs_push_posix_locks(cfile);
	else
		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1300

1301 1302 1303
	cinode->can_cache_brlcks = false;
	up_write(&cinode->lock_sem);
	return rc;
1304 1305
}

1306
static void
1307
cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1308
		bool *wait_flag, struct TCP_Server_Info *server)
L
Linus Torvalds 已提交
1309
{
1310
	if (flock->fl_flags & FL_POSIX)
1311
		cifs_dbg(FYI, "Posix\n");
1312
	if (flock->fl_flags & FL_FLOCK)
1313
		cifs_dbg(FYI, "Flock\n");
1314
	if (flock->fl_flags & FL_SLEEP) {
1315
		cifs_dbg(FYI, "Blocking lock\n");
1316
		*wait_flag = true;
L
Linus Torvalds 已提交
1317
	}
1318
	if (flock->fl_flags & FL_ACCESS)
1319
		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1320
	if (flock->fl_flags & FL_LEASE)
1321
		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1322
	if (flock->fl_flags &
1323 1324
	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
	       FL_ACCESS | FL_LEASE | FL_CLOSE)))
1325
		cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
L
Linus Torvalds 已提交
1326

1327
	*type = server->vals->large_lock_type;
1328
	if (flock->fl_type == F_WRLCK) {
1329
		cifs_dbg(FYI, "F_WRLCK\n");
1330
		*type |= server->vals->exclusive_lock_type;
1331 1332
		*lock = 1;
	} else if (flock->fl_type == F_UNLCK) {
1333
		cifs_dbg(FYI, "F_UNLCK\n");
1334
		*type |= server->vals->unlock_lock_type;
1335 1336 1337
		*unlock = 1;
		/* Check if unlock includes more than one lock range */
	} else if (flock->fl_type == F_RDLCK) {
1338
		cifs_dbg(FYI, "F_RDLCK\n");
1339
		*type |= server->vals->shared_lock_type;
1340 1341
		*lock = 1;
	} else if (flock->fl_type == F_EXLCK) {
1342
		cifs_dbg(FYI, "F_EXLCK\n");
1343
		*type |= server->vals->exclusive_lock_type;
1344 1345
		*lock = 1;
	} else if (flock->fl_type == F_SHLCK) {
1346
		cifs_dbg(FYI, "F_SHLCK\n");
1347
		*type |= server->vals->shared_lock_type;
1348
		*lock = 1;
L
Linus Torvalds 已提交
1349
	} else
1350
		cifs_dbg(FYI, "Unknown type of lock\n");
1351
}
L
Linus Torvalds 已提交
1352

1353
static int
1354
cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1355
	   bool wait_flag, bool posix_lck, unsigned int xid)
1356 1357 1358
{
	int rc = 0;
	__u64 length = 1 + flock->fl_end - flock->fl_start;
1359 1360
	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1361
	struct TCP_Server_Info *server = tcon->ses->server;
1362
	__u16 netfid = cfile->fid.netfid;
1363

1364 1365
	if (posix_lck) {
		int posix_lock_type;
1366 1367 1368 1369 1370

		rc = cifs_posix_lock_test(file, flock);
		if (!rc)
			return rc;

1371
		if (type & server->vals->shared_lock_type)
1372 1373 1374
			posix_lock_type = CIFS_RDLCK;
		else
			posix_lock_type = CIFS_WRLCK;
1375 1376
		rc = CIFSSMBPosixLock(xid, tcon, netfid,
				      hash_lockowner(flock->fl_owner),
1377
				      flock->fl_start, length, flock,
1378
				      posix_lock_type, wait_flag);
1379 1380
		return rc;
	}
L
Linus Torvalds 已提交
1381

1382
	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1383 1384 1385
	if (!rc)
		return rc;

1386
	/* BB we could chain these into one lock request BB */
1387 1388
	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
				    1, 0, false);
1389
	if (rc == 0) {
1390 1391
		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
					    type, 0, 1, false);
1392 1393
		flock->fl_type = F_UNLCK;
		if (rc != 0)
1394 1395
			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
				 rc);
1396
		return 0;
L
Linus Torvalds 已提交
1397
	}
J
[CIFS]  
Jeremy Allison 已提交
1398

1399
	if (type & server->vals->shared_lock_type) {
1400
		flock->fl_type = F_WRLCK;
1401
		return 0;
J
[CIFS]  
Jeremy Allison 已提交
1402 1403
	}

1404 1405 1406 1407 1408
	type &= ~server->vals->exclusive_lock_type;

	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
				    type | server->vals->shared_lock_type,
				    1, 0, false);
1409
	if (rc == 0) {
1410 1411
		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
			type | server->vals->shared_lock_type, 0, 1, false);
1412 1413
		flock->fl_type = F_RDLCK;
		if (rc != 0)
1414 1415
			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
				 rc);
1416 1417 1418
	} else
		flock->fl_type = F_WRLCK;

1419
	return 0;
1420 1421
}

1422
void
1423 1424 1425 1426 1427 1428 1429
cifs_move_llist(struct list_head *source, struct list_head *dest)
{
	struct list_head *li, *tmp;
	list_for_each_safe(li, tmp, source)
		list_move(li, dest);
}

1430
void
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
cifs_free_llist(struct list_head *llist)
{
	struct cifsLockInfo *li, *tmp;
	list_for_each_entry_safe(li, tmp, llist, llist) {
		cifs_del_lock_waiters(li);
		list_del(&li->llist);
		kfree(li);
	}
}

1441
int
1442 1443
cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
		  unsigned int xid)
1444 1445
{
	int rc = 0, stored_rc;
1446 1447 1448 1449
	static const int types[] = {
		LOCKING_ANDX_LARGE_FILES,
		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
	};
1450
	unsigned int i;
1451
	unsigned int max_num, num, max_buf;
1452 1453
	LOCKING_ANDX_RANGE *buf, *cur;
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1454
	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1455 1456 1457 1458 1459 1460
	struct cifsLockInfo *li, *tmp;
	__u64 length = 1 + flock->fl_end - flock->fl_start;
	struct list_head tmp_llist;

	INIT_LIST_HEAD(&tmp_llist);

1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
	/*
	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
	 * and check it for zero before using.
	 */
	max_buf = tcon->ses->server->maxBuf;
	if (!max_buf)
		return -EINVAL;

	max_num = (max_buf - sizeof(struct smb_hdr)) /
						sizeof(LOCKING_ANDX_RANGE);
1471
	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1472 1473 1474
	if (!buf)
		return -ENOMEM;

1475
	down_write(&cinode->lock_sem);
1476 1477 1478
	for (i = 0; i < 2; i++) {
		cur = buf;
		num = 0;
1479
		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1480 1481 1482 1483 1484 1485 1486 1487
			if (flock->fl_start > li->offset ||
			    (flock->fl_start + length) <
			    (li->offset + li->length))
				continue;
			if (current->tgid != li->pid)
				continue;
			if (types[i] != li->type)
				continue;
1488
			if (cinode->can_cache_brlcks) {
1489 1490
				/*
				 * We can cache brlock requests - simply remove
1491
				 * a lock from the file's list.
1492 1493 1494 1495
				 */
				list_del(&li->llist);
				cifs_del_lock_waiters(li);
				kfree(li);
1496
				continue;
1497
			}
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
			cur->Pid = cpu_to_le16(li->pid);
			cur->LengthLow = cpu_to_le32((u32)li->length);
			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
			cur->OffsetLow = cpu_to_le32((u32)li->offset);
			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
			/*
			 * We need to save a lock here to let us add it again to
			 * the file's list if the unlock range request fails on
			 * the server.
			 */
			list_move(&li->llist, &tmp_llist);
			if (++num == max_num) {
1510 1511
				stored_rc = cifs_lockv(xid, tcon,
						       cfile->fid.netfid,
1512 1513 1514 1515 1516 1517 1518 1519
						       li->type, num, 0, buf);
				if (stored_rc) {
					/*
					 * We failed on the unlock range
					 * request - add all locks from the tmp
					 * list to the head of the file's list.
					 */
					cifs_move_llist(&tmp_llist,
1520
							&cfile->llist->locks);
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
					rc = stored_rc;
				} else
					/*
					 * The unlock range request succeed -
					 * free the tmp list.
					 */
					cifs_free_llist(&tmp_llist);
				cur = buf;
				num = 0;
			} else
				cur++;
1532 1533
		}
		if (num) {
1534
			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1535 1536
					       types[i], num, 0, buf);
			if (stored_rc) {
1537 1538
				cifs_move_llist(&tmp_llist,
						&cfile->llist->locks);
1539 1540 1541 1542 1543 1544
				rc = stored_rc;
			} else
				cifs_free_llist(&tmp_llist);
		}
	}

1545
	up_write(&cinode->lock_sem);
1546 1547 1548 1549
	kfree(buf);
	return rc;
}

1550
static int
1551
cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1552 1553
	   bool wait_flag, bool posix_lck, int lock, int unlock,
	   unsigned int xid)
1554 1555 1556 1557 1558
{
	int rc = 0;
	__u64 length = 1 + flock->fl_end - flock->fl_start;
	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1559
	struct TCP_Server_Info *server = tcon->ses->server;
1560
	struct inode *inode = d_inode(cfile->dentry);
1561 1562

	if (posix_lck) {
1563
		int posix_lock_type;
1564 1565 1566 1567 1568

		rc = cifs_posix_lock_set(file, flock);
		if (!rc || rc < 0)
			return rc;

1569
		if (type & server->vals->shared_lock_type)
1570 1571 1572
			posix_lock_type = CIFS_RDLCK;
		else
			posix_lock_type = CIFS_WRLCK;
1573

1574
		if (unlock == 1)
1575
			posix_lock_type = CIFS_UNLCK;
J
[CIFS]  
Jeremy Allison 已提交
1576

1577
		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1578 1579
				      hash_lockowner(flock->fl_owner),
				      flock->fl_start, length,
1580
				      NULL, posix_lock_type, wait_flag);
1581 1582
		goto out;
	}
J
[CIFS]  
Jeremy Allison 已提交
1583

1584
	if (lock) {
1585 1586
		struct cifsLockInfo *lock;

1587
		lock = cifs_lock_init(flock->fl_start, length, type);
1588 1589 1590
		if (!lock)
			return -ENOMEM;

1591
		rc = cifs_lock_add_if(cfile, lock, wait_flag);
1592
		if (rc < 0) {
1593
			kfree(lock);
1594 1595 1596
			return rc;
		}
		if (!rc)
1597 1598
			goto out;

1599 1600 1601 1602 1603 1604 1605
		/*
		 * Windows 7 server can delay breaking lease from read to None
		 * if we set a byte-range lock on a file - break it explicitly
		 * before sending the lock to the server to be sure the next
		 * read won't conflict with non-overlapted locks due to
		 * pagereading.
		 */
1606 1607
		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
					CIFS_CACHE_READ(CIFS_I(inode))) {
1608
			cifs_zap_mapping(inode);
1609 1610
			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
				 inode);
1611
			CIFS_I(inode)->oplock = 0;
1612 1613
		}

1614 1615
		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
					    type, 1, 0, wait_flag);
1616 1617
		if (rc) {
			kfree(lock);
1618
			return rc;
1619
		}
1620

1621
		cifs_lock_add(cfile, lock);
1622
	} else if (unlock)
1623
		rc = server->ops->mand_unlock_range(cfile, flock, xid);
1624 1625

out:
1626
	if (flock->fl_flags & FL_POSIX && !rc)
1627
		rc = locks_lock_file_wait(file, flock);
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
	return rc;
}

int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
{
	int rc, xid;
	int lock = 0, unlock = 0;
	bool wait_flag = false;
	bool posix_lck = false;
	struct cifs_sb_info *cifs_sb;
	struct cifs_tcon *tcon;
	struct cifsInodeInfo *cinode;
	struct cifsFileInfo *cfile;
	__u16 netfid;
1642
	__u32 type;
1643 1644

	rc = -EACCES;
1645
	xid = get_xid();
1646

1647 1648 1649
	cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
		 cmd, flock->fl_flags, flock->fl_type,
		 flock->fl_start, flock->fl_end);
1650 1651 1652

	cfile = (struct cifsFileInfo *)file->private_data;
	tcon = tlink_tcon(cfile->tlink);
1653 1654 1655 1656

	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
			tcon->ses->server);

1657
	cifs_sb = CIFS_FILE_SB(file);
1658
	netfid = cfile->fid.netfid;
A
Al Viro 已提交
1659
	cinode = CIFS_I(file_inode(file));
1660

1661
	if (cap_unix(tcon->ses) &&
1662 1663 1664 1665 1666 1667 1668 1669
	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
		posix_lck = true;
	/*
	 * BB add code here to normalize offset and length to account for
	 * negative length which we can not accept over the wire.
	 */
	if (IS_GETLK(cmd)) {
1670
		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1671
		free_xid(xid);
1672 1673 1674 1675 1676 1677 1678 1679
		return rc;
	}

	if (!lock && !unlock) {
		/*
		 * if no lock or unlock then nothing to do since we do not
		 * know what it is
		 */
1680
		free_xid(xid);
1681
		return -EOPNOTSUPP;
J
[CIFS]  
Jeremy Allison 已提交
1682 1683
	}

1684 1685
	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
			xid);
1686
	free_xid(xid);
L
Linus Torvalds 已提交
1687 1688 1689
	return rc;
}

1690 1691 1692 1693
/*
 * update the file size (if needed) after a write. Should be called with
 * the inode->i_lock held
 */
1694
void
1695 1696 1697 1698 1699 1700 1701 1702 1703
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
		      unsigned int bytes_written)
{
	loff_t end_of_write = offset + bytes_written;

	if (end_of_write > cifsi->server_eof)
		cifsi->server_eof = end_of_write;
}

1704 1705 1706
static ssize_t
cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
	   size_t write_size, loff_t *offset)
L
Linus Torvalds 已提交
1707 1708 1709 1710 1711
{
	int rc = 0;
	unsigned int bytes_written = 0;
	unsigned int total_written;
	struct cifs_sb_info *cifs_sb;
1712 1713
	struct cifs_tcon *tcon;
	struct TCP_Server_Info *server;
1714
	unsigned int xid;
1715
	struct dentry *dentry = open_file->dentry;
1716
	struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1717
	struct cifs_io_parms io_parms;
L
Linus Torvalds 已提交
1718

1719
	cifs_sb = CIFS_SB(dentry->d_sb);
L
Linus Torvalds 已提交
1720

A
Al Viro 已提交
1721 1722
	cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
		 write_size, *offset, dentry);
L
Linus Torvalds 已提交
1723

1724 1725 1726 1727 1728
	tcon = tlink_tcon(open_file->tlink);
	server = tcon->ses->server;

	if (!server->ops->sync_write)
		return -ENOSYS;
1729

1730
	xid = get_xid();
L
Linus Torvalds 已提交
1731 1732 1733 1734 1735

	for (total_written = 0; write_size > total_written;
	     total_written += bytes_written) {
		rc = -EAGAIN;
		while (rc == -EAGAIN) {
1736 1737 1738
			struct kvec iov[2];
			unsigned int len;

L
Linus Torvalds 已提交
1739 1740 1741
			if (open_file->invalidHandle) {
				/* we could deadlock if we called
				   filemap_fdatawait from here so tell
S
Steve French 已提交
1742
				   reopen_file not to flush data to
L
Linus Torvalds 已提交
1743
				   server now */
J
Jeff Layton 已提交
1744
				rc = cifs_reopen_file(open_file, false);
L
Linus Torvalds 已提交
1745 1746 1747
				if (rc != 0)
					break;
			}
1748

1749
			len = min(server->ops->wp_retry_size(d_inode(dentry)),
1750
				  (unsigned int)write_size - total_written);
1751 1752 1753
			/* iov[0] is reserved for smb header */
			iov[1].iov_base = (char *)write_data + total_written;
			iov[1].iov_len = len;
1754
			io_parms.pid = pid;
1755 1756
			io_parms.tcon = tcon;
			io_parms.offset = *offset;
1757
			io_parms.length = len;
1758 1759
			rc = server->ops->sync_write(xid, &open_file->fid,
					&io_parms, &bytes_written, iov, 1);
L
Linus Torvalds 已提交
1760 1761 1762 1763 1764
		}
		if (rc || (bytes_written == 0)) {
			if (total_written)
				break;
			else {
1765
				free_xid(xid);
L
Linus Torvalds 已提交
1766 1767
				return rc;
			}
1768
		} else {
1769
			spin_lock(&d_inode(dentry)->i_lock);
1770
			cifs_update_eof(cifsi, *offset, bytes_written);
1771
			spin_unlock(&d_inode(dentry)->i_lock);
1772
			*offset += bytes_written;
1773
		}
L
Linus Torvalds 已提交
1774 1775
	}

1776
	cifs_stats_bytes_written(tcon, total_written);
L
Linus Torvalds 已提交
1777

1778
	if (total_written > 0) {
1779 1780 1781 1782
		spin_lock(&d_inode(dentry)->i_lock);
		if (*offset > d_inode(dentry)->i_size)
			i_size_write(d_inode(dentry), *offset);
		spin_unlock(&d_inode(dentry)->i_lock);
L
Linus Torvalds 已提交
1783
	}
1784
	mark_inode_dirty_sync(d_inode(dentry));
1785
	free_xid(xid);
L
Linus Torvalds 已提交
1786 1787 1788
	return total_written;
}

1789 1790
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
					bool fsuid_only)
S
Steve French 已提交
1791 1792
{
	struct cifsFileInfo *open_file = NULL;
1793
	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1794
	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1795 1796 1797 1798

	/* only filter by fsuid on multiuser mounts */
	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
		fsuid_only = false;
S
Steve French 已提交
1799

1800
	spin_lock(&tcon->open_file_lock);
S
Steve French 已提交
1801 1802 1803 1804
	/* we could simply get the first_list_entry since write-only entries
	   are always at the end of the list but since the first entry might
	   have a close pending, we go through the whole list */
	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1805
		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1806
			continue;
1807
		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
S
Steve French 已提交
1808 1809 1810
			if (!open_file->invalidHandle) {
				/* found a good file */
				/* lock it so it will not be closed on us */
1811 1812
				cifsFileInfo_get(open_file);
				spin_unlock(&tcon->open_file_lock);
S
Steve French 已提交
1813 1814 1815 1816 1817 1818 1819
				return open_file;
			} /* else might as well continue, and look for
			     another, or simply have the caller reopen it
			     again rather than trying to fix this handle */
		} else /* write only file */
			break; /* write only files are last so must be done */
	}
1820
	spin_unlock(&tcon->open_file_lock);
S
Steve French 已提交
1821 1822 1823
	return NULL;
}

1824 1825
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
					bool fsuid_only)
1826
{
1827
	struct cifsFileInfo *open_file, *inv_file = NULL;
1828
	struct cifs_sb_info *cifs_sb;
1829
	struct cifs_tcon *tcon;
1830
	bool any_available = false;
1831
	int rc;
1832
	unsigned int refind = 0;
1833

1834 1835 1836 1837
	/* Having a null inode here (because mapping->host was set to zero by
	the VFS or MM) should not happen but we had reports of on oops (due to
	it being zero) during stress testcases so we need to check for it */

S
Steve French 已提交
1838
	if (cifs_inode == NULL) {
1839
		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1840 1841 1842 1843
		dump_stack();
		return NULL;
	}

1844
	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1845
	tcon = cifs_sb_master_tcon(cifs_sb);
1846

1847 1848 1849 1850
	/* only filter by fsuid on multiuser mounts */
	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
		fsuid_only = false;

1851
	spin_lock(&tcon->open_file_lock);
1852
refind_writable:
1853
	if (refind > MAX_REOPEN_ATT) {
1854
		spin_unlock(&tcon->open_file_lock);
1855 1856
		return NULL;
	}
1857
	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1858 1859
		if (!any_available && open_file->pid != current->tgid)
			continue;
1860
		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1861
			continue;
1862
		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1863 1864
			if (!open_file->invalidHandle) {
				/* found a good writable file */
1865 1866
				cifsFileInfo_get(open_file);
				spin_unlock(&tcon->open_file_lock);
1867
				return open_file;
1868 1869 1870
			} else {
				if (!inv_file)
					inv_file = open_file;
1871
			}
1872 1873
		}
	}
1874 1875 1876 1877 1878
	/* couldn't find useable FH with same pid, try any available */
	if (!any_available) {
		any_available = true;
		goto refind_writable;
	}
1879 1880 1881

	if (inv_file) {
		any_available = false;
1882
		cifsFileInfo_get(inv_file);
1883 1884
	}

1885
	spin_unlock(&tcon->open_file_lock);
1886 1887 1888 1889 1890 1891

	if (inv_file) {
		rc = cifs_reopen_file(inv_file, false);
		if (!rc)
			return inv_file;
		else {
1892
			spin_lock(&tcon->open_file_lock);
1893 1894
			list_move_tail(&inv_file->flist,
					&cifs_inode->openFileList);
1895
			spin_unlock(&tcon->open_file_lock);
1896 1897
			cifsFileInfo_put(inv_file);
			++refind;
1898
			inv_file = NULL;
1899
			spin_lock(&tcon->open_file_lock);
1900 1901 1902 1903
			goto refind_writable;
		}
	}

1904 1905 1906
	return NULL;
}

L
Linus Torvalds 已提交
1907 1908 1909
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
{
	struct address_space *mapping = page->mapping;
1910
	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
L
Linus Torvalds 已提交
1911 1912 1913 1914
	char *write_data;
	int rc = -EFAULT;
	int bytes_written = 0;
	struct inode *inode;
1915
	struct cifsFileInfo *open_file;
L
Linus Torvalds 已提交
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925

	if (!mapping || !mapping->host)
		return -EFAULT;

	inode = page->mapping->host;

	offset += (loff_t)from;
	write_data = kmap(page);
	write_data += from;

1926
	if ((to > PAGE_SIZE) || (from > to)) {
L
Linus Torvalds 已提交
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
		kunmap(page);
		return -EIO;
	}

	/* racing with truncate? */
	if (offset > mapping->host->i_size) {
		kunmap(page);
		return 0; /* don't care */
	}

	/* check to make sure that we are not extending the file */
	if (mapping->host->i_size - offset < (loff_t)to)
S
Steve French 已提交
1939
		to = (unsigned)(mapping->host->i_size - offset);
L
Linus Torvalds 已提交
1940

1941
	open_file = find_writable_file(CIFS_I(mapping->host), false);
1942
	if (open_file) {
1943 1944
		bytes_written = cifs_write(open_file, open_file->pid,
					   write_data, to - from, &offset);
1945
		cifsFileInfo_put(open_file);
L
Linus Torvalds 已提交
1946
		/* Does mm or vfs already set times? */
1947
		inode->i_atime = inode->i_mtime = current_time(inode);
1948
		if ((bytes_written > 0) && (offset))
1949
			rc = 0;
1950 1951
		else if (bytes_written < 0)
			rc = bytes_written;
1952
	} else {
1953
		cifs_dbg(FYI, "No writeable filehandles for inode\n");
L
Linus Torvalds 已提交
1954 1955 1956 1957 1958 1959 1960
		rc = -EIO;
	}

	kunmap(page);
	return rc;
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
static struct cifs_writedata *
wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
			  pgoff_t end, pgoff_t *index,
			  unsigned int *found_pages)
{
	unsigned int nr_pages;
	struct page **pages;
	struct cifs_writedata *wdata;

	wdata = cifs_writedata_alloc((unsigned int)tofind,
				     cifs_writev_complete);
	if (!wdata)
		return NULL;

	/*
	 * find_get_pages_tag seems to return a max of 256 on each
	 * iteration, so we must call it several times in order to
	 * fill the array or the wsize is effectively limited to
1979
	 * 256 * PAGE_SIZE.
1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	 */
	*found_pages = 0;
	pages = wdata->pages;
	do {
		nr_pages = find_get_pages_tag(mapping, index,
					      PAGECACHE_TAG_DIRTY, tofind,
					      pages);
		*found_pages += nr_pages;
		tofind -= nr_pages;
		pages += nr_pages;
	} while (nr_pages && tofind && *index <= end);

	return wdata;
}

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
static unsigned int
wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
		    struct address_space *mapping,
		    struct writeback_control *wbc,
		    pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
{
	unsigned int nr_pages = 0, i;
	struct page *page;

	for (i = 0; i < found_pages; i++) {
		page = wdata->pages[i];
		/*
		 * At this point we hold neither mapping->tree_lock nor
		 * lock on the page itself: the page may be truncated or
		 * invalidated (changing page->mapping to NULL), or even
		 * swizzled back from swapper_space to tmpfs file
		 * mapping
		 */

		if (nr_pages == 0)
			lock_page(page);
		else if (!trylock_page(page))
			break;

		if (unlikely(page->mapping != mapping)) {
			unlock_page(page);
			break;
		}

		if (!wbc->range_cyclic && page->index > end) {
			*done = true;
			unlock_page(page);
			break;
		}

		if (*next && (page->index != *next)) {
			/* Not next consecutive page */
			unlock_page(page);
			break;
		}

		if (wbc->sync_mode != WB_SYNC_NONE)
			wait_on_page_writeback(page);

		if (PageWriteback(page) ||
				!clear_page_dirty_for_io(page)) {
			unlock_page(page);
			break;
		}

		/*
		 * This actually clears the dirty bit in the radix tree.
		 * See cifs_writepage() for more commentary.
		 */
		set_page_writeback(page);
		if (page_offset(page) >= i_size_read(mapping->host)) {
			*done = true;
			unlock_page(page);
			end_page_writeback(page);
			break;
		}

		wdata->pages[i] = page;
		*next = page->index + 1;
		++nr_pages;
	}

	/* reset index to refind any pages skipped */
	if (nr_pages == 0)
		*index = wdata->pages[0]->index + 1;

	/* put any pages we aren't going to use */
	for (i = nr_pages; i < found_pages; i++) {
2068
		put_page(wdata->pages[i]);
2069 2070 2071 2072 2073 2074
		wdata->pages[i] = NULL;
	}

	return nr_pages;
}

2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
static int
wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
		 struct address_space *mapping, struct writeback_control *wbc)
{
	int rc = 0;
	struct TCP_Server_Info *server;
	unsigned int i;

	wdata->sync_mode = wbc->sync_mode;
	wdata->nr_pages = nr_pages;
	wdata->offset = page_offset(wdata->pages[0]);
2086
	wdata->pagesz = PAGE_SIZE;
2087 2088
	wdata->tailsz = min(i_size_read(mapping->host) -
			page_offset(wdata->pages[nr_pages - 1]),
2089 2090
			(loff_t)PAGE_SIZE);
	wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2091

2092 2093 2094 2095 2096 2097 2098
	if (wdata->cfile != NULL)
		cifsFileInfo_put(wdata->cfile);
	wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
	if (!wdata->cfile) {
		cifs_dbg(VFS, "No writable handles for inode\n");
		rc = -EBADF;
	} else {
2099 2100 2101
		wdata->pid = wdata->cfile->pid;
		server = tlink_tcon(wdata->cfile->tlink)->ses->server;
		rc = server->ops->async_writev(wdata, cifs_writedata_release);
2102
	}
2103 2104 2105 2106 2107 2108 2109

	for (i = 0; i < nr_pages; ++i)
		unlock_page(wdata->pages[i]);

	return rc;
}

L
Linus Torvalds 已提交
2110
static int cifs_writepages(struct address_space *mapping,
2111
			   struct writeback_control *wbc)
L
Linus Torvalds 已提交
2112
{
2113
	struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2114
	struct TCP_Server_Info *server;
2115 2116 2117
	bool done = false, scanned = false, range_whole = false;
	pgoff_t end, index;
	struct cifs_writedata *wdata;
2118
	int rc = 0;
2119

2120
	/*
2121
	 * If wsize is smaller than the page cache size, default to writing
2122 2123
	 * one page at a time via cifs_writepage
	 */
2124
	if (cifs_sb->wsize < PAGE_SIZE)
2125 2126
		return generic_writepages(mapping, wbc);

2127
	if (wbc->range_cyclic) {
2128
		index = mapping->writeback_index; /* Start from prev offset */
2129 2130
		end = -1;
	} else {
2131 2132
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
2133
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2134 2135
			range_whole = true;
		scanned = true;
2136
	}
2137
	server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2138
retry:
2139
	while (!done && index <= end) {
2140
		unsigned int i, nr_pages, found_pages, wsize, credits;
2141
		pgoff_t next = 0, tofind, saved_index = index;
2142

2143 2144 2145 2146
		rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
						   &wsize, &credits);
		if (rc)
			break;
2147

2148
		tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2149

2150 2151
		wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
						  &found_pages);
2152 2153
		if (!wdata) {
			rc = -ENOMEM;
2154
			add_credits_and_wake_if(server, credits, 0);
2155 2156 2157 2158 2159
			break;
		}

		if (found_pages == 0) {
			kref_put(&wdata->refcount, cifs_writedata_release);
2160
			add_credits_and_wake_if(server, credits, 0);
2161 2162 2163
			break;
		}

2164 2165
		nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
					       end, &index, &next, &done);
2166

2167 2168 2169
		/* nothing to write? */
		if (nr_pages == 0) {
			kref_put(&wdata->refcount, cifs_writedata_release);
2170
			add_credits_and_wake_if(server, credits, 0);
2171
			continue;
2172
		}
2173

2174
		wdata->credits = credits;
2175

2176
		rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2177

2178 2179
		/* send failure -- clean up the mess */
		if (rc != 0) {
2180
			add_credits_and_wake_if(server, wdata->credits, 0);
2181
			for (i = 0; i < nr_pages; ++i) {
2182
				if (rc == -EAGAIN)
2183 2184 2185 2186 2187
					redirty_page_for_writepage(wbc,
							   wdata->pages[i]);
				else
					SetPageError(wdata->pages[i]);
				end_page_writeback(wdata->pages[i]);
2188
				put_page(wdata->pages[i]);
2189
			}
2190 2191
			if (rc != -EAGAIN)
				mapping_set_error(mapping, rc);
2192 2193
		}
		kref_put(&wdata->refcount, cifs_writedata_release);
2194

2195 2196 2197 2198 2199
		if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
			index = saved_index;
			continue;
		}

2200 2201 2202
		wbc->nr_to_write -= nr_pages;
		if (wbc->nr_to_write <= 0)
			done = true;
2203

2204
		index = next;
2205
	}
2206

2207 2208 2209 2210 2211
	if (!scanned && !done) {
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
2212
		scanned = true;
2213 2214 2215
		index = 0;
		goto retry;
	}
2216

2217
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2218 2219
		mapping->writeback_index = index;

L
Linus Torvalds 已提交
2220 2221 2222
	return rc;
}

2223 2224
static int
cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
L
Linus Torvalds 已提交
2225
{
2226
	int rc;
2227
	unsigned int xid;
L
Linus Torvalds 已提交
2228

2229
	xid = get_xid();
L
Linus Torvalds 已提交
2230
/* BB add check for wbc flags */
2231
	get_page(page);
S
Steve French 已提交
2232
	if (!PageUptodate(page))
2233
		cifs_dbg(FYI, "ppw - page not up to date\n");
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244

	/*
	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
	 *
	 * A writepage() implementation always needs to do either this,
	 * or re-dirty the page with "redirty_page_for_writepage()" in
	 * the case of a failure.
	 *
	 * Just unlocking the page will cause the radix tree tag-bits
	 * to fail to update with the state of the page correctly.
	 */
S
Steve French 已提交
2245
	set_page_writeback(page);
2246
retry_write:
2247
	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2248 2249 2250
	if (rc == -EAGAIN) {
		if (wbc->sync_mode == WB_SYNC_ALL)
			goto retry_write;
2251
		redirty_page_for_writepage(wbc, page);
2252
	} else if (rc != 0) {
2253
		SetPageError(page);
2254 2255
		mapping_set_error(page->mapping, rc);
	} else {
2256
		SetPageUptodate(page);
2257
	}
2258
	end_page_writeback(page);
2259
	put_page(page);
2260
	free_xid(xid);
L
Linus Torvalds 已提交
2261 2262 2263
	return rc;
}

2264 2265 2266 2267 2268 2269 2270
static int cifs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc = cifs_writepage_locked(page, wbc);
	unlock_page(page);
	return rc;
}

N
Nick Piggin 已提交
2271 2272 2273
static int cifs_write_end(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
L
Linus Torvalds 已提交
2274
{
N
Nick Piggin 已提交
2275 2276
	int rc;
	struct inode *inode = mapping->host;
2277 2278 2279 2280 2281 2282 2283 2284
	struct cifsFileInfo *cfile = file->private_data;
	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
	__u32 pid;

	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
		pid = cfile->pid;
	else
		pid = current->tgid;
L
Linus Torvalds 已提交
2285

2286
	cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2287
		 page, pos, copied);
N
Nick Piggin 已提交
2288

2289 2290 2291 2292
	if (PageChecked(page)) {
		if (copied == len)
			SetPageUptodate(page);
		ClearPageChecked(page);
2293
	} else if (!PageUptodate(page) && copied == PAGE_SIZE)
N
Nick Piggin 已提交
2294
		SetPageUptodate(page);
S
Steve French 已提交
2295

L
Linus Torvalds 已提交
2296
	if (!PageUptodate(page)) {
N
Nick Piggin 已提交
2297
		char *page_data;
2298
		unsigned offset = pos & (PAGE_SIZE - 1);
2299
		unsigned int xid;
N
Nick Piggin 已提交
2300

2301
		xid = get_xid();
L
Linus Torvalds 已提交
2302 2303 2304 2305 2306 2307
		/* this is probably better than directly calling
		   partialpage_write since in this function the file handle is
		   known which we might as well	leverage */
		/* BB check if anything else missing out of ppw
		   such as updating last write time */
		page_data = kmap(page);
2308
		rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
N
Nick Piggin 已提交
2309
		/* if (rc < 0) should we set writebehind rc? */
L
Linus Torvalds 已提交
2310
		kunmap(page);
N
Nick Piggin 已提交
2311

2312
		free_xid(xid);
S
Steve French 已提交
2313
	} else {
N
Nick Piggin 已提交
2314 2315
		rc = copied;
		pos += copied;
2316
		set_page_dirty(page);
L
Linus Torvalds 已提交
2317 2318
	}

N
Nick Piggin 已提交
2319 2320 2321 2322 2323 2324 2325 2326
	if (rc > 0) {
		spin_lock(&inode->i_lock);
		if (pos > inode->i_size)
			i_size_write(inode, pos);
		spin_unlock(&inode->i_lock);
	}

	unlock_page(page);
2327
	put_page(page);
N
Nick Piggin 已提交
2328

L
Linus Torvalds 已提交
2329 2330 2331
	return rc;
}

2332 2333
int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
		      int datasync)
L
Linus Torvalds 已提交
2334
{
2335
	unsigned int xid;
L
Linus Torvalds 已提交
2336
	int rc = 0;
2337
	struct cifs_tcon *tcon;
2338
	struct TCP_Server_Info *server;
2339
	struct cifsFileInfo *smbfile = file->private_data;
A
Al Viro 已提交
2340
	struct inode *inode = file_inode(file);
2341
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
L
Linus Torvalds 已提交
2342

2343
	rc = file_write_and_wait_range(file, start, end);
2344 2345
	if (rc)
		return rc;
A
Al Viro 已提交
2346
	inode_lock(inode);
2347

2348
	xid = get_xid();
L
Linus Torvalds 已提交
2349

A
Al Viro 已提交
2350 2351
	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
		 file, datasync);
2352

2353
	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2354
		rc = cifs_zap_mapping(inode);
2355
		if (rc) {
2356
			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2357 2358 2359
			rc = 0; /* don't care about it in fsync */
		}
	}
2360

2361
	tcon = tlink_tcon(smbfile->tlink);
2362 2363 2364 2365 2366 2367 2368
	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
		server = tcon->ses->server;
		if (server->ops->flush)
			rc = server->ops->flush(xid, tcon, &smbfile->fid);
		else
			rc = -ENOSYS;
	}
2369

2370
	free_xid(xid);
A
Al Viro 已提交
2371
	inode_unlock(inode);
2372 2373 2374
	return rc;
}

2375
int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2376
{
2377
	unsigned int xid;
2378
	int rc = 0;
2379
	struct cifs_tcon *tcon;
2380
	struct TCP_Server_Info *server;
2381
	struct cifsFileInfo *smbfile = file->private_data;
2382
	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2383 2384
	struct inode *inode = file->f_mapping->host;

2385
	rc = file_write_and_wait_range(file, start, end);
2386 2387
	if (rc)
		return rc;
A
Al Viro 已提交
2388
	inode_lock(inode);
2389

2390
	xid = get_xid();
2391

A
Al Viro 已提交
2392 2393
	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
		 file, datasync);
2394 2395

	tcon = tlink_tcon(smbfile->tlink);
2396 2397 2398 2399 2400 2401 2402
	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
		server = tcon->ses->server;
		if (server->ops->flush)
			rc = server->ops->flush(xid, tcon, &smbfile->fid);
		else
			rc = -ENOSYS;
	}
2403

2404
	free_xid(xid);
A
Al Viro 已提交
2405
	inode_unlock(inode);
L
Linus Torvalds 已提交
2406 2407 2408 2409 2410 2411 2412
	return rc;
}

/*
 * As file closes, flush all cached write data for this inode checking
 * for write behind errors.
 */
2413
int cifs_flush(struct file *file, fl_owner_t id)
L
Linus Torvalds 已提交
2414
{
A
Al Viro 已提交
2415
	struct inode *inode = file_inode(file);
L
Linus Torvalds 已提交
2416 2417
	int rc = 0;

2418
	if (file->f_mode & FMODE_WRITE)
2419
		rc = filemap_write_and_wait(inode->i_mapping);
2420

2421
	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
L
Linus Torvalds 已提交
2422 2423 2424 2425

	return rc;
}

2426 2427 2428 2429 2430 2431 2432
static int
cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
{
	int rc = 0;
	unsigned long i;

	for (i = 0; i < num_pages; i++) {
2433
		pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2434 2435 2436 2437 2438 2439 2440
		if (!pages[i]) {
			/*
			 * save number of pages we have already allocated and
			 * return with ENOMEM error
			 */
			num_pages = i;
			rc = -ENOMEM;
2441
			break;
2442 2443 2444
		}
	}

2445 2446 2447 2448
	if (rc) {
		for (i = 0; i < num_pages; i++)
			put_page(pages[i]);
	}
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
	return rc;
}

static inline
size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
{
	size_t num_pages;
	size_t clen;

	clen = min_t(const size_t, len, wsize);
J
Jeff Layton 已提交
2459
	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2460 2461 2462 2463 2464 2465 2466

	if (cur_len)
		*cur_len = clen;

	return num_pages;
}

2467
static void
2468
cifs_uncached_writedata_release(struct kref *refcount)
2469 2470
{
	int i;
2471 2472 2473
	struct cifs_writedata *wdata = container_of(refcount,
					struct cifs_writedata, refcount);

2474
	kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2475 2476 2477 2478 2479
	for (i = 0; i < wdata->nr_pages; i++)
		put_page(wdata->pages[i]);
	cifs_writedata_release(refcount);
}

2480 2481
static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);

2482 2483 2484
static void
cifs_uncached_writev_complete(struct work_struct *work)
{
2485 2486
	struct cifs_writedata *wdata = container_of(work,
					struct cifs_writedata, work);
2487
	struct inode *inode = d_inode(wdata->cfile->dentry);
2488 2489 2490 2491 2492 2493 2494 2495 2496
	struct cifsInodeInfo *cifsi = CIFS_I(inode);

	spin_lock(&inode->i_lock);
	cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
	if (cifsi->server_eof > inode->i_size)
		i_size_write(inode, cifsi->server_eof);
	spin_unlock(&inode->i_lock);

	complete(&wdata->done);
2497 2498
	collect_uncached_write_data(wdata->ctx);
	/* the below call can possibly free the last ref to aio ctx */
2499
	kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2500 2501 2502
}

static int
2503 2504
wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
		      size_t *len, unsigned long *num_pages)
2505
{
2506 2507
	size_t save_len, copied, bytes, cur_len = *len;
	unsigned long i, nr_pages = *num_pages;
2508

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525
	save_len = cur_len;
	for (i = 0; i < nr_pages; i++) {
		bytes = min_t(const size_t, cur_len, PAGE_SIZE);
		copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
		cur_len -= copied;
		/*
		 * If we didn't copy as much as we expected, then that
		 * may mean we trod into an unmapped area. Stop copying
		 * at that point. On the next pass through the big
		 * loop, we'll likely end up getting a zero-length
		 * write and bailing out of it.
		 */
		if (copied < bytes)
			break;
	}
	cur_len = save_len - cur_len;
	*len = cur_len;
2526

2527 2528 2529 2530 2531 2532 2533 2534
	/*
	 * If we have no data to send, then that probably means that
	 * the copy above failed altogether. That's most likely because
	 * the address in the iovec was bogus. Return -EFAULT and let
	 * the caller free anything we allocated and bail out.
	 */
	if (!cur_len)
		return -EFAULT;
2535

2536 2537 2538 2539 2540 2541
	/*
	 * i + 1 now represents the number of pages we actually used in
	 * the copy phase above.
	 */
	*num_pages = i + 1;
	return 0;
2542 2543
}

2544 2545 2546
static int
cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
		     struct cifsFileInfo *open_file,
2547 2548
		     struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
		     struct cifs_aio_ctx *ctx)
2549
{
2550 2551
	int rc = 0;
	size_t cur_len;
2552
	unsigned long nr_pages, num_pages, i;
2553
	struct cifs_writedata *wdata;
2554
	struct iov_iter saved_from = *from;
2555
	loff_t saved_offset = offset;
2556
	pid_t pid;
2557
	struct TCP_Server_Info *server;
2558 2559 2560 2561 2562 2563

	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
		pid = open_file->pid;
	else
		pid = current->tgid;

2564 2565
	server = tlink_tcon(open_file->tlink)->ses->server;

2566
	do {
2567 2568 2569 2570 2571 2572
		unsigned int wsize, credits;

		rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
						   &wsize, &credits);
		if (rc)
			break;
2573

2574
		nr_pages = get_numpages(wsize, len, &cur_len);
2575 2576 2577 2578
		wdata = cifs_writedata_alloc(nr_pages,
					     cifs_uncached_writev_complete);
		if (!wdata) {
			rc = -ENOMEM;
2579
			add_credits_and_wake_if(server, credits, 0);
2580 2581 2582 2583 2584 2585
			break;
		}

		rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
		if (rc) {
			kfree(wdata);
2586
			add_credits_and_wake_if(server, credits, 0);
2587 2588 2589
			break;
		}

2590 2591 2592
		num_pages = nr_pages;
		rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
		if (rc) {
2593 2594 2595
			for (i = 0; i < nr_pages; i++)
				put_page(wdata->pages[i]);
			kfree(wdata);
2596
			add_credits_and_wake_if(server, credits, 0);
2597 2598 2599 2600
			break;
		}

		/*
2601 2602
		 * Bring nr_pages down to the number of pages we actually used,
		 * and free any pages that we didn't use.
2603
		 */
2604
		for ( ; nr_pages > num_pages; nr_pages--)
2605 2606
			put_page(wdata->pages[nr_pages - 1]);

2607 2608 2609 2610 2611 2612
		wdata->sync_mode = WB_SYNC_ALL;
		wdata->nr_pages = nr_pages;
		wdata->offset = (__u64)offset;
		wdata->cfile = cifsFileInfo_get(open_file);
		wdata->pid = pid;
		wdata->bytes = cur_len;
2613 2614
		wdata->pagesz = PAGE_SIZE;
		wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2615
		wdata->credits = credits;
2616 2617
		wdata->ctx = ctx;
		kref_get(&ctx->refcount);
2618 2619

		if (!wdata->cfile->invalidHandle ||
2620
		    !(rc = cifs_reopen_file(wdata->cfile, false)))
2621 2622
			rc = server->ops->async_writev(wdata,
					cifs_uncached_writedata_release);
2623
		if (rc) {
2624
			add_credits_and_wake_if(server, wdata->credits, 0);
2625 2626
			kref_put(&wdata->refcount,
				 cifs_uncached_writedata_release);
2627
			if (rc == -EAGAIN) {
2628
				*from = saved_from;
2629 2630 2631
				iov_iter_advance(from, offset - saved_offset);
				continue;
			}
2632 2633 2634
			break;
		}

2635
		list_add_tail(&wdata->list, wdata_list);
2636 2637
		offset += cur_len;
		len -= cur_len;
2638 2639
	} while (len > 0);

2640 2641 2642
	return rc;
}

2643
static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2644
{
2645
	struct cifs_writedata *wdata, *tmp;
2646 2647
	struct cifs_tcon *tcon;
	struct cifs_sb_info *cifs_sb;
2648 2649
	struct dentry *dentry = ctx->cfile->dentry;
	unsigned int i;
2650 2651
	int rc;

2652 2653
	tcon = tlink_tcon(ctx->cfile->tlink);
	cifs_sb = CIFS_SB(dentry->d_sb);
2654

2655
	mutex_lock(&ctx->aio_mutex);
2656

2657 2658 2659 2660
	if (list_empty(&ctx->list)) {
		mutex_unlock(&ctx->aio_mutex);
		return;
	}
2661

2662
	rc = ctx->rc;
2663 2664
	/*
	 * Wait for and collect replies for any successful sends in order of
2665 2666
	 * increasing offset. Once an error is hit, then return without waiting
	 * for any more replies.
2667 2668
	 */
restart_loop:
2669
	list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2670
		if (!rc) {
2671 2672 2673 2674 2675 2676
			if (!try_wait_for_completion(&wdata->done)) {
				mutex_unlock(&ctx->aio_mutex);
				return;
			}

			if (wdata->result)
2677 2678
				rc = wdata->result;
			else
2679
				ctx->total_len += wdata->bytes;
2680 2681 2682

			/* resend call if it's a retryable error */
			if (rc == -EAGAIN) {
2683
				struct list_head tmp_list;
2684
				struct iov_iter tmp_from = ctx->iter;
2685 2686 2687 2688 2689

				INIT_LIST_HEAD(&tmp_list);
				list_del_init(&wdata->list);

				iov_iter_advance(&tmp_from,
2690
						 wdata->offset - ctx->pos);
2691 2692 2693

				rc = cifs_write_from_iter(wdata->offset,
						wdata->bytes, &tmp_from,
2694 2695
						ctx->cfile, cifs_sb, &tmp_list,
						ctx);
2696

2697
				list_splice(&tmp_list, &ctx->list);
2698 2699 2700

				kref_put(&wdata->refcount,
					 cifs_uncached_writedata_release);
2701 2702 2703 2704
				goto restart_loop;
			}
		}
		list_del_init(&wdata->list);
2705
		kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2706 2707
	}

2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
	for (i = 0; i < ctx->npages; i++)
		put_page(ctx->bv[i].bv_page);

	cifs_stats_bytes_written(tcon, ctx->total_len);
	set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);

	ctx->rc = (rc == 0) ? ctx->total_len : rc;

	mutex_unlock(&ctx->aio_mutex);

	if (ctx->iocb && ctx->iocb->ki_complete)
		ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
	else
		complete(&ctx->done);
}

ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
{
	struct file *file = iocb->ki_filp;
	ssize_t total_written = 0;
	struct cifsFileInfo *cfile;
	struct cifs_tcon *tcon;
	struct cifs_sb_info *cifs_sb;
	struct cifs_aio_ctx *ctx;
	struct iov_iter saved_from = *from;
	int rc;

	/*
	 * BB - optimize the way when signing is disabled. We can drop this
	 * extra memory-to-memory copying and use iovec buffers for constructing
	 * write request.
	 */

	rc = generic_write_checks(iocb, from);
	if (rc <= 0)
		return rc;

	cifs_sb = CIFS_FILE_SB(file);
	cfile = file->private_data;
	tcon = tlink_tcon(cfile->tlink);

	if (!tcon->ses->server->ops->async_writev)
		return -ENOSYS;

	ctx = cifs_aio_ctx_alloc();
	if (!ctx)
		return -ENOMEM;

	ctx->cfile = cifsFileInfo_get(cfile);

	if (!is_sync_kiocb(iocb))
		ctx->iocb = iocb;

	ctx->pos = iocb->ki_pos;

	rc = setup_aio_ctx_iter(ctx, from, WRITE);
	if (rc) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return rc;
	}

	/* grab a lock here due to read response handlers can access ctx */
	mutex_lock(&ctx->aio_mutex);

	rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
				  cfile, cifs_sb, &ctx->list, ctx);

	/*
	 * If at least one write was successfully sent, then discard any rc
	 * value from the later writes. If the other write succeeds, then
	 * we'll end up returning whatever was written. If it fails, then
	 * we'll get a new rc value from that.
	 */
	if (!list_empty(&ctx->list))
		rc = 0;

	mutex_unlock(&ctx->aio_mutex);

	if (rc) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return rc;
	}

	if (!is_sync_kiocb(iocb)) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return -EIOCBQUEUED;
	}

	rc = wait_for_completion_killable(&ctx->done);
	if (rc) {
		mutex_lock(&ctx->aio_mutex);
		ctx->rc = rc = -EINTR;
		total_written = ctx->total_len;
		mutex_unlock(&ctx->aio_mutex);
	} else {
		rc = ctx->rc;
		total_written = ctx->total_len;
	}

	kref_put(&ctx->refcount, cifs_aio_ctx_release);

2809 2810
	if (unlikely(!total_written))
		return rc;
2811

2812 2813
	iocb->ki_pos += total_written;
	return total_written;
2814 2815
}

2816
static ssize_t
A
Al Viro 已提交
2817
cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2818
{
2819 2820 2821 2822 2823
	struct file *file = iocb->ki_filp;
	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
	struct inode *inode = file->f_mapping->host;
	struct cifsInodeInfo *cinode = CIFS_I(inode);
	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2824
	ssize_t rc;
2825

2826
	inode_lock(inode);
2827 2828 2829 2830 2831
	/*
	 * We need to hold the sem to be sure nobody modifies lock list
	 * with a brlock that prevents writing.
	 */
	down_read(&cinode->lock_sem);
2832

2833 2834
	rc = generic_write_checks(iocb, from);
	if (rc <= 0)
2835 2836 2837
		goto out;

	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2838
				     server->vals->exclusive_lock_type, NULL,
2839
				     CIFS_WRITE_OP))
A
Al Viro 已提交
2840
		rc = __generic_file_write_iter(iocb, from);
2841 2842 2843
	else
		rc = -EACCES;
out:
2844
	up_read(&cinode->lock_sem);
A
Al Viro 已提交
2845
	inode_unlock(inode);
A
Al Viro 已提交
2846

2847 2848
	if (rc > 0)
		rc = generic_write_sync(iocb, rc);
2849 2850 2851 2852
	return rc;
}

ssize_t
A
Al Viro 已提交
2853
cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2854
{
A
Al Viro 已提交
2855
	struct inode *inode = file_inode(iocb->ki_filp);
2856 2857 2858 2859 2860
	struct cifsInodeInfo *cinode = CIFS_I(inode);
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
						iocb->ki_filp->private_data;
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2861
	ssize_t written;
2862

2863 2864 2865 2866
	written = cifs_get_writer(cinode);
	if (written)
		return written;

2867
	if (CIFS_CACHE_WRITE(cinode)) {
2868 2869
		if (cap_unix(tcon->ses) &&
		(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2870
		  && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
A
Al Viro 已提交
2871
			written = generic_file_write_iter(iocb, from);
2872 2873
			goto out;
		}
A
Al Viro 已提交
2874
		written = cifs_writev(iocb, from);
2875
		goto out;
2876 2877
	}
	/*
2878 2879 2880 2881
	 * For non-oplocked files in strict cache mode we need to write the data
	 * to the server exactly from the pos to pos+len-1 rather than flush all
	 * affected pages because it may cause a error with mandatory locks on
	 * these pages but not on the region from pos to ppos+len-1.
2882
	 */
A
Al Viro 已提交
2883
	written = cifs_user_writev(iocb, from);
2884
	if (written > 0 && CIFS_CACHE_READ(cinode)) {
2885 2886 2887 2888 2889
		/*
		 * Windows 7 server can delay breaking level2 oplock if a write
		 * request comes - break it on the client to prevent reading
		 * an old data.
		 */
2890
		cifs_zap_mapping(inode);
2891 2892
		cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
			 inode);
2893
		cinode->oplock = 0;
2894
	}
2895 2896
out:
	cifs_put_writer(cinode);
2897
	return written;
2898 2899
}

2900
static struct cifs_readdata *
2901
cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2902 2903
{
	struct cifs_readdata *rdata;
2904

2905 2906
	rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
			GFP_KERNEL);
2907
	if (rdata != NULL) {
2908
		kref_init(&rdata->refcount);
2909 2910
		INIT_LIST_HEAD(&rdata->list);
		init_completion(&rdata->done);
2911 2912
		INIT_WORK(&rdata->work, complete);
	}
2913

2914 2915 2916
	return rdata;
}

2917 2918
void
cifs_readdata_release(struct kref *refcount)
2919
{
2920 2921 2922 2923 2924 2925
	struct cifs_readdata *rdata = container_of(refcount,
					struct cifs_readdata, refcount);

	if (rdata->cfile)
		cifsFileInfo_put(rdata->cfile);

2926 2927 2928
	kfree(rdata);
}

2929
static int
2930
cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2931 2932
{
	int rc = 0;
2933
	struct page *page;
2934 2935
	unsigned int i;

2936
	for (i = 0; i < nr_pages; i++) {
2937 2938 2939 2940 2941
		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
		if (!page) {
			rc = -ENOMEM;
			break;
		}
2942
		rdata->pages[i] = page;
2943 2944 2945
	}

	if (rc) {
2946 2947 2948
		for (i = 0; i < nr_pages; i++) {
			put_page(rdata->pages[i]);
			rdata->pages[i] = NULL;
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
		}
	}
	return rc;
}

static void
cifs_uncached_readdata_release(struct kref *refcount)
{
	struct cifs_readdata *rdata = container_of(refcount,
					struct cifs_readdata, refcount);
2959
	unsigned int i;
2960

2961
	kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
2962 2963 2964
	for (i = 0; i < rdata->nr_pages; i++) {
		put_page(rdata->pages[i]);
		rdata->pages[i] = NULL;
2965 2966 2967 2968 2969 2970 2971
	}
	cifs_readdata_release(refcount);
}

/**
 * cifs_readdata_to_iov - copy data from pages in response to an iovec
 * @rdata:	the readdata response with list of pages holding data
2972
 * @iter:	destination for our data
2973 2974 2975 2976 2977
 *
 * This function copies data from a list of pages in a readdata response into
 * an array of iovecs. It will first calculate where the data should go
 * based on the info in the readdata and then copy the data into that spot.
 */
2978 2979
static int
cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2980
{
2981
	size_t remaining = rdata->got_bytes;
2982
	unsigned int i;
2983

2984 2985
	for (i = 0; i < rdata->nr_pages; i++) {
		struct page *page = rdata->pages[i];
2986
		size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2987 2988 2989 2990 2991 2992 2993 2994 2995
		size_t written;

		if (unlikely(iter->type & ITER_PIPE)) {
			void *addr = kmap_atomic(page);

			written = copy_to_iter(addr, copy, iter);
			kunmap_atomic(addr);
		} else
			written = copy_page_to_iter(page, 0, copy, iter);
2996 2997 2998
		remaining -= written;
		if (written < copy && iov_iter_count(iter) > 0)
			break;
2999
	}
3000
	return remaining ? -EFAULT : 0;
3001 3002
}

3003 3004
static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);

3005 3006 3007 3008 3009 3010 3011
static void
cifs_uncached_readv_complete(struct work_struct *work)
{
	struct cifs_readdata *rdata = container_of(work,
						struct cifs_readdata, work);

	complete(&rdata->done);
3012 3013
	collect_uncached_read_data(rdata->ctx);
	/* the below call can possibly free the last ref to aio ctx */
3014 3015 3016 3017
	kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}

static int
3018 3019 3020
uncached_fill_pages(struct TCP_Server_Info *server,
		    struct cifs_readdata *rdata, struct iov_iter *iter,
		    unsigned int len)
3021
{
3022
	int result = 0;
3023 3024
	unsigned int i;
	unsigned int nr_pages = rdata->nr_pages;
3025

3026
	rdata->got_bytes = 0;
3027
	rdata->tailsz = PAGE_SIZE;
3028 3029
	for (i = 0; i < nr_pages; i++) {
		struct page *page = rdata->pages[i];
3030
		size_t n;
3031

3032
		if (len <= 0) {
3033
			/* no need to hold page hostage */
3034 3035
			rdata->pages[i] = NULL;
			rdata->nr_pages--;
3036
			put_page(page);
3037
			continue;
3038
		}
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
		n = len;
		if (len >= PAGE_SIZE) {
			/* enough data to fill the page */
			n = PAGE_SIZE;
			len -= n;
		} else {
			zero_user(page, len, PAGE_SIZE - len);
			rdata->tailsz = len;
			len = 0;
		}
3049 3050 3051 3052
		if (iter)
			result = copy_page_from_iter(page, 0, n, iter);
		else
			result = cifs_read_page_from_socket(server, page, n);
3053 3054 3055
		if (result < 0)
			break;

3056
		rdata->got_bytes += result;
3057 3058
	}

3059 3060
	return rdata->got_bytes > 0 && result != -ECONNABORTED ?
						rdata->got_bytes : result;
3061 3062
}

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077
static int
cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
			      struct cifs_readdata *rdata, unsigned int len)
{
	return uncached_fill_pages(server, rdata, NULL, len);
}

static int
cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
			      struct cifs_readdata *rdata,
			      struct iov_iter *iter)
{
	return uncached_fill_pages(server, rdata, iter, iter->count);
}

3078 3079
static int
cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3080 3081
		     struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
		     struct cifs_aio_ctx *ctx)
L
Linus Torvalds 已提交
3082
{
3083
	struct cifs_readdata *rdata;
3084
	unsigned int npages, rsize, credits;
3085 3086
	size_t cur_len;
	int rc;
3087
	pid_t pid;
3088
	struct TCP_Server_Info *server;
3089

3090
	server = tlink_tcon(open_file->tlink)->ses->server;
3091

3092 3093 3094 3095 3096
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
		pid = open_file->pid;
	else
		pid = current->tgid;

3097
	do {
3098 3099 3100 3101 3102 3103
		rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
						   &rsize, &credits);
		if (rc)
			break;

		cur_len = min_t(const size_t, len, rsize);
3104
		npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3105

3106 3107 3108 3109
		/* allocate a readdata struct */
		rdata = cifs_readdata_alloc(npages,
					    cifs_uncached_readv_complete);
		if (!rdata) {
3110
			add_credits_and_wake_if(server, credits, 0);
3111
			rc = -ENOMEM;
3112
			break;
L
Linus Torvalds 已提交
3113
		}
3114

3115
		rc = cifs_read_allocate_pages(rdata, npages);
3116 3117 3118 3119
		if (rc)
			goto error;

		rdata->cfile = cifsFileInfo_get(open_file);
3120
		rdata->nr_pages = npages;
3121 3122 3123
		rdata->offset = offset;
		rdata->bytes = cur_len;
		rdata->pid = pid;
3124 3125
		rdata->pagesz = PAGE_SIZE;
		rdata->read_into_pages = cifs_uncached_read_into_pages;
3126
		rdata->copy_into_pages = cifs_uncached_copy_into_pages;
3127
		rdata->credits = credits;
3128 3129
		rdata->ctx = ctx;
		kref_get(&ctx->refcount);
3130

3131
		if (!rdata->cfile->invalidHandle ||
3132
		    !(rc = cifs_reopen_file(rdata->cfile, true)))
3133
			rc = server->ops->async_readv(rdata);
3134 3135
error:
		if (rc) {
3136
			add_credits_and_wake_if(server, rdata->credits, 0);
3137 3138
			kref_put(&rdata->refcount,
				 cifs_uncached_readdata_release);
3139 3140
			if (rc == -EAGAIN)
				continue;
3141 3142 3143
			break;
		}

3144
		list_add_tail(&rdata->list, rdata_list);
3145 3146 3147 3148
		offset += cur_len;
		len -= cur_len;
	} while (len > 0);

3149 3150 3151
	return rc;
}

3152 3153
static void
collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3154
{
3155 3156
	struct cifs_readdata *rdata, *tmp;
	struct iov_iter *to = &ctx->iter;
3157 3158
	struct cifs_sb_info *cifs_sb;
	struct cifs_tcon *tcon;
3159 3160
	unsigned int i;
	int rc;
3161

3162 3163
	tcon = tlink_tcon(ctx->cfile->tlink);
	cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3164

3165
	mutex_lock(&ctx->aio_mutex);
3166

3167 3168 3169 3170
	if (list_empty(&ctx->list)) {
		mutex_unlock(&ctx->aio_mutex);
		return;
	}
3171

3172
	rc = ctx->rc;
3173
	/* the loop below should proceed in the order of increasing offsets */
3174
again:
3175
	list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3176
		if (!rc) {
3177 3178 3179 3180 3181 3182
			if (!try_wait_for_completion(&rdata->done)) {
				mutex_unlock(&ctx->aio_mutex);
				return;
			}

			if (rdata->result == -EAGAIN) {
3183
				/* resend call if it's a retryable error */
3184
				struct list_head tmp_list;
3185
				unsigned int got_bytes = rdata->got_bytes;
3186

3187 3188
				list_del_init(&rdata->list);
				INIT_LIST_HEAD(&tmp_list);
3189

3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201
				/*
				 * Got a part of data and then reconnect has
				 * happened -- fill the buffer and continue
				 * reading.
				 */
				if (got_bytes && got_bytes < rdata->bytes) {
					rc = cifs_readdata_to_iov(rdata, to);
					if (rc) {
						kref_put(&rdata->refcount,
						cifs_uncached_readdata_release);
						continue;
					}
3202
				}
3203 3204 3205 3206 3207

				rc = cifs_send_async_read(
						rdata->offset + got_bytes,
						rdata->bytes - got_bytes,
						rdata->cfile, cifs_sb,
3208
						&tmp_list, ctx);
3209

3210
				list_splice(&tmp_list, &ctx->list);
3211

3212 3213 3214 3215 3216 3217
				kref_put(&rdata->refcount,
					 cifs_uncached_readdata_release);
				goto again;
			} else if (rdata->result)
				rc = rdata->result;
			else
A
Al Viro 已提交
3218
				rc = cifs_readdata_to_iov(rdata, to);
3219

3220 3221 3222
			/* if there was a short read -- discard anything left */
			if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
				rc = -ENODATA;
L
Linus Torvalds 已提交
3223
		}
3224 3225
		list_del_init(&rdata->list);
		kref_put(&rdata->refcount, cifs_uncached_readdata_release);
L
Linus Torvalds 已提交
3226
	}
3227

3228 3229 3230 3231 3232
	for (i = 0; i < ctx->npages; i++) {
		if (ctx->should_dirty)
			set_page_dirty(ctx->bv[i].bv_page);
		put_page(ctx->bv[i].bv_page);
	}
3233

3234 3235 3236
	ctx->total_len = ctx->len - iov_iter_count(to);

	cifs_stats_bytes_read(tcon, ctx->total_len);
3237

3238 3239 3240 3241
	/* mask nodata case */
	if (rc == -ENODATA)
		rc = 0;

3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
	ctx->rc = (rc == 0) ? ctx->total_len : rc;

	mutex_unlock(&ctx->aio_mutex);

	if (ctx->iocb && ctx->iocb->ki_complete)
		ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
	else
		complete(&ctx->done);
}

ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
{
	struct file *file = iocb->ki_filp;
	ssize_t rc;
	size_t len;
	ssize_t total_read = 0;
	loff_t offset = iocb->ki_pos;
	struct cifs_sb_info *cifs_sb;
	struct cifs_tcon *tcon;
	struct cifsFileInfo *cfile;
	struct cifs_aio_ctx *ctx;

	len = iov_iter_count(to);
	if (!len)
		return 0;

	cifs_sb = CIFS_FILE_SB(file);
	cfile = file->private_data;
	tcon = tlink_tcon(cfile->tlink);

	if (!tcon->ses->server->ops->async_readv)
		return -ENOSYS;

	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
		cifs_dbg(FYI, "attempting read on write only file instance\n");

	ctx = cifs_aio_ctx_alloc();
	if (!ctx)
		return -ENOMEM;

	ctx->cfile = cifsFileInfo_get(cfile);

	if (!is_sync_kiocb(iocb))
		ctx->iocb = iocb;

3287
	if (to->type == ITER_IOVEC)
3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
		ctx->should_dirty = true;

	rc = setup_aio_ctx_iter(ctx, to, READ);
	if (rc) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return rc;
	}

	len = ctx->len;

	/* grab a lock here due to read response handlers can access ctx */
	mutex_lock(&ctx->aio_mutex);

	rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);

	/* if at least one read request send succeeded, then reset rc */
	if (!list_empty(&ctx->list))
		rc = 0;

	mutex_unlock(&ctx->aio_mutex);

	if (rc) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return rc;
	}

	if (!is_sync_kiocb(iocb)) {
		kref_put(&ctx->refcount, cifs_aio_ctx_release);
		return -EIOCBQUEUED;
	}

	rc = wait_for_completion_killable(&ctx->done);
	if (rc) {
		mutex_lock(&ctx->aio_mutex);
		ctx->rc = rc = -EINTR;
		total_read = ctx->total_len;
		mutex_unlock(&ctx->aio_mutex);
	} else {
		rc = ctx->rc;
		total_read = ctx->total_len;
	}

	kref_put(&ctx->refcount, cifs_aio_ctx_release);

3332
	if (total_read) {
A
Al Viro 已提交
3333
		iocb->ki_pos += total_read;
3334 3335 3336
		return total_read;
	}
	return rc;
3337 3338
}

3339
ssize_t
A
Al Viro 已提交
3340
cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3341
{
A
Al Viro 已提交
3342
	struct inode *inode = file_inode(iocb->ki_filp);
3343 3344 3345 3346 3347 3348
	struct cifsInodeInfo *cinode = CIFS_I(inode);
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
						iocb->ki_filp->private_data;
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
	int rc = -EACCES;
3349 3350 3351 3352 3353 3354 3355 3356 3357

	/*
	 * In strict cache mode we need to read from the server all the time
	 * if we don't have level II oplock because the server can delay mtime
	 * change - so we can't make a decision about inode invalidating.
	 * And we can also fail with pagereading if there are mandatory locks
	 * on pages affected by this read but not on the region from pos to
	 * pos+len-1.
	 */
3358
	if (!CIFS_CACHE_READ(cinode))
A
Al Viro 已提交
3359
		return cifs_user_readv(iocb, to);
3360

3361 3362 3363
	if (cap_unix(tcon->ses) &&
	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
A
Al Viro 已提交
3364
		return generic_file_read_iter(iocb, to);
3365 3366 3367 3368 3369 3370

	/*
	 * We need to hold the sem to be sure nobody modifies lock list
	 * with a brlock that prevents reading.
	 */
	down_read(&cinode->lock_sem);
A
Al Viro 已提交
3371
	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3372
				     tcon->ses->server->vals->shared_lock_type,
3373
				     NULL, CIFS_READ_OP))
A
Al Viro 已提交
3374
		rc = generic_file_read_iter(iocb, to);
3375 3376
	up_read(&cinode->lock_sem);
	return rc;
3377
}
L
Linus Torvalds 已提交
3378

3379 3380
static ssize_t
cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
L
Linus Torvalds 已提交
3381 3382 3383 3384 3385
{
	int rc = -EACCES;
	unsigned int bytes_read = 0;
	unsigned int total_read;
	unsigned int current_read_size;
3386
	unsigned int rsize;
L
Linus Torvalds 已提交
3387
	struct cifs_sb_info *cifs_sb;
3388
	struct cifs_tcon *tcon;
3389
	struct TCP_Server_Info *server;
3390
	unsigned int xid;
3391
	char *cur_offset;
L
Linus Torvalds 已提交
3392
	struct cifsFileInfo *open_file;
3393
	struct cifs_io_parms io_parms;
3394
	int buf_type = CIFS_NO_BUFFER;
3395
	__u32 pid;
L
Linus Torvalds 已提交
3396

3397
	xid = get_xid();
3398
	cifs_sb = CIFS_FILE_SB(file);
L
Linus Torvalds 已提交
3399

3400 3401 3402
	/* FIXME: set up handlers for larger reads and/or convert to async */
	rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);

L
Linus Torvalds 已提交
3403
	if (file->private_data == NULL) {
3404
		rc = -EBADF;
3405
		free_xid(xid);
3406
		return rc;
L
Linus Torvalds 已提交
3407
	}
3408
	open_file = file->private_data;
3409
	tcon = tlink_tcon(open_file->tlink);
3410 3411 3412 3413 3414 3415
	server = tcon->ses->server;

	if (!server->ops->sync_read) {
		free_xid(xid);
		return -ENOSYS;
	}
L
Linus Torvalds 已提交
3416

3417 3418 3419 3420 3421
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
		pid = open_file->pid;
	else
		pid = current->tgid;

L
Linus Torvalds 已提交
3422
	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3423
		cifs_dbg(FYI, "attempting read on write only file instance\n");
L
Linus Torvalds 已提交
3424

3425 3426
	for (total_read = 0, cur_offset = read_data; read_size > total_read;
	     total_read += bytes_read, cur_offset += bytes_read) {
3427 3428 3429 3430 3431 3432 3433 3434 3435
		do {
			current_read_size = min_t(uint, read_size - total_read,
						  rsize);
			/*
			 * For windows me and 9x we do not want to request more
			 * than it negotiated since it will refuse the read
			 * then.
			 */
			if ((tcon->ses) && !(tcon->ses->capabilities &
3436
				tcon->ses->server->vals->cap_large_files)) {
3437 3438 3439
				current_read_size = min_t(uint,
					current_read_size, CIFSMaxBufSize);
			}
3440
			if (open_file->invalidHandle) {
J
Jeff Layton 已提交
3441
				rc = cifs_reopen_file(open_file, true);
L
Linus Torvalds 已提交
3442 3443 3444
				if (rc != 0)
					break;
			}
3445
			io_parms.pid = pid;
3446
			io_parms.tcon = tcon;
3447
			io_parms.offset = *offset;
3448
			io_parms.length = current_read_size;
3449
			rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3450 3451
						    &bytes_read, &cur_offset,
						    &buf_type);
3452 3453
		} while (rc == -EAGAIN);

L
Linus Torvalds 已提交
3454 3455 3456 3457
		if (rc || (bytes_read == 0)) {
			if (total_read) {
				break;
			} else {
3458
				free_xid(xid);
L
Linus Torvalds 已提交
3459 3460 3461
				return rc;
			}
		} else {
3462
			cifs_stats_bytes_read(tcon, total_read);
3463
			*offset += bytes_read;
L
Linus Torvalds 已提交
3464 3465
		}
	}
3466
	free_xid(xid);
L
Linus Torvalds 已提交
3467 3468 3469
	return total_read;
}

3470 3471 3472 3473 3474
/*
 * If the page is mmap'ed into a process' page tables, then we need to make
 * sure that it doesn't change while being written back.
 */
static int
3475
cifs_page_mkwrite(struct vm_fault *vmf)
3476 3477 3478 3479 3480 3481 3482
{
	struct page *page = vmf->page;

	lock_page(page);
	return VM_FAULT_LOCKED;
}

3483
static const struct vm_operations_struct cifs_file_vm_ops = {
3484
	.fault = filemap_fault,
3485
	.map_pages = filemap_map_pages,
3486 3487 3488
	.page_mkwrite = cifs_page_mkwrite,
};

3489 3490 3491
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
	int rc, xid;
A
Al Viro 已提交
3492
	struct inode *inode = file_inode(file);
3493

3494
	xid = get_xid();
3495

3496
	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
3497
		rc = cifs_zap_mapping(inode);
3498 3499 3500
		if (rc)
			return rc;
	}
3501 3502

	rc = generic_file_mmap(file, vma);
3503 3504
	if (rc == 0)
		vma->vm_ops = &cifs_file_vm_ops;
3505
	free_xid(xid);
3506 3507 3508
	return rc;
}

L
Linus Torvalds 已提交
3509 3510 3511 3512
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	int rc, xid;

3513
	xid = get_xid();
J
Jeff Layton 已提交
3514
	rc = cifs_revalidate_file(file);
L
Linus Torvalds 已提交
3515
	if (rc) {
3516 3517
		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
			 rc);
3518
		free_xid(xid);
L
Linus Torvalds 已提交
3519 3520 3521
		return rc;
	}
	rc = generic_file_mmap(file, vma);
3522 3523
	if (rc == 0)
		vma->vm_ops = &cifs_file_vm_ops;
3524
	free_xid(xid);
L
Linus Torvalds 已提交
3525 3526 3527
	return rc;
}

3528 3529 3530
static void
cifs_readv_complete(struct work_struct *work)
{
3531
	unsigned int i, got_bytes;
3532 3533 3534
	struct cifs_readdata *rdata = container_of(work,
						struct cifs_readdata, work);

3535
	got_bytes = rdata->got_bytes;
3536 3537 3538
	for (i = 0; i < rdata->nr_pages; i++) {
		struct page *page = rdata->pages[i];

3539 3540
		lru_cache_add_file(page);

3541 3542
		if (rdata->result == 0 ||
		    (rdata->result == -EAGAIN && got_bytes)) {
3543 3544 3545 3546 3547 3548
			flush_dcache_page(page);
			SetPageUptodate(page);
		}

		unlock_page(page);

3549 3550
		if (rdata->result == 0 ||
		    (rdata->result == -EAGAIN && got_bytes))
3551 3552
			cifs_readpage_to_fscache(rdata->mapping->host, page);

3553
		got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3554

3555
		put_page(page);
3556
		rdata->pages[i] = NULL;
3557
	}
3558
	kref_put(&rdata->refcount, cifs_readdata_release);
3559 3560
}

3561
static int
3562 3563 3564
readpages_fill_pages(struct TCP_Server_Info *server,
		     struct cifs_readdata *rdata, struct iov_iter *iter,
		     unsigned int len)
3565
{
3566
	int result = 0;
3567
	unsigned int i;
3568 3569
	u64 eof;
	pgoff_t eof_index;
3570
	unsigned int nr_pages = rdata->nr_pages;
3571 3572 3573

	/* determine the eof that the server (probably) has */
	eof = CIFS_I(rdata->mapping->host)->server_eof;
3574
	eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3575
	cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3576

3577
	rdata->got_bytes = 0;
3578
	rdata->tailsz = PAGE_SIZE;
3579 3580
	for (i = 0; i < nr_pages; i++) {
		struct page *page = rdata->pages[i];
3581
		size_t n = PAGE_SIZE;
3582

3583 3584
		if (len >= PAGE_SIZE) {
			len -= PAGE_SIZE;
3585
		} else if (len > 0) {
3586
			/* enough for partial page, fill and zero the rest */
3587
			zero_user(page, len, PAGE_SIZE - len);
3588
			n = rdata->tailsz = len;
3589
			len = 0;
3590 3591 3592 3593 3594 3595 3596 3597 3598
		} else if (page->index > eof_index) {
			/*
			 * The VFS will not try to do readahead past the
			 * i_size, but it's possible that we have outstanding
			 * writes with gaps in the middle and the i_size hasn't
			 * caught up yet. Populate those with zeroed out pages
			 * to prevent the VFS from repeatedly attempting to
			 * fill them until the writes are flushed.
			 */
3599
			zero_user(page, 0, PAGE_SIZE);
3600 3601 3602 3603
			lru_cache_add_file(page);
			flush_dcache_page(page);
			SetPageUptodate(page);
			unlock_page(page);
3604
			put_page(page);
3605 3606
			rdata->pages[i] = NULL;
			rdata->nr_pages--;
3607
			continue;
3608 3609 3610 3611
		} else {
			/* no need to hold page hostage */
			lru_cache_add_file(page);
			unlock_page(page);
3612
			put_page(page);
3613 3614
			rdata->pages[i] = NULL;
			rdata->nr_pages--;
3615
			continue;
3616
		}
3617

3618 3619 3620 3621
		if (iter)
			result = copy_page_from_iter(page, 0, n, iter);
		else
			result = cifs_read_page_from_socket(server, page, n);
3622 3623 3624
		if (result < 0)
			break;

3625
		rdata->got_bytes += result;
3626 3627
	}

3628 3629
	return rdata->got_bytes > 0 && result != -ECONNABORTED ?
						rdata->got_bytes : result;
3630 3631
}

3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646
static int
cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
			       struct cifs_readdata *rdata, unsigned int len)
{
	return readpages_fill_pages(server, rdata, NULL, len);
}

static int
cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
			       struct cifs_readdata *rdata,
			       struct iov_iter *iter)
{
	return readpages_fill_pages(server, rdata, iter, iter->count);
}

3647 3648 3649 3650 3651 3652 3653 3654
static int
readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
		    unsigned int rsize, struct list_head *tmplist,
		    unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
{
	struct page *page, *tpage;
	unsigned int expected_index;
	int rc;
3655
	gfp_t gfp = readahead_gfp_mask(mapping);
3656

3657 3658
	INIT_LIST_HEAD(tmplist);

3659 3660 3661 3662 3663 3664 3665
	page = list_entry(page_list->prev, struct page, lru);

	/*
	 * Lock the page and put it in the cache. Since no one else
	 * should have access to this page, we're safe to simply set
	 * PG_locked without checking it first.
	 */
3666
	__SetPageLocked(page);
3667
	rc = add_to_page_cache_locked(page, mapping,
3668
				      page->index, gfp);
3669 3670 3671

	/* give up if we can't stick it in the cache */
	if (rc) {
3672
		__ClearPageLocked(page);
3673 3674 3675 3676
		return rc;
	}

	/* move first page to the tmplist */
3677 3678
	*offset = (loff_t)page->index << PAGE_SHIFT;
	*bytes = PAGE_SIZE;
3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689
	*nr_pages = 1;
	list_move_tail(&page->lru, tmplist);

	/* now try and add more pages onto the request */
	expected_index = page->index + 1;
	list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
		/* discontinuity ? */
		if (page->index != expected_index)
			break;

		/* would this page push the read over the rsize? */
3690
		if (*bytes + PAGE_SIZE > rsize)
3691 3692
			break;

3693
		__SetPageLocked(page);
3694
		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
3695
			__ClearPageLocked(page);
3696 3697 3698
			break;
		}
		list_move_tail(&page->lru, tmplist);
3699
		(*bytes) += PAGE_SIZE;
3700 3701 3702 3703
		expected_index++;
		(*nr_pages)++;
	}
	return rc;
3704 3705
}

L
Linus Torvalds 已提交
3706 3707 3708
static int cifs_readpages(struct file *file, struct address_space *mapping,
	struct list_head *page_list, unsigned num_pages)
{
3709 3710 3711
	int rc;
	struct list_head tmplist;
	struct cifsFileInfo *open_file = file->private_data;
3712
	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3713
	struct TCP_Server_Info *server;
3714
	pid_t pid;
L
Linus Torvalds 已提交
3715

3716 3717 3718
	/*
	 * Reads as many pages as possible from fscache. Returns -ENOBUFS
	 * immediately if the cookie is negative
3719 3720 3721
	 *
	 * After this point, every page in the list might have PG_fscache set,
	 * so we will need to clean that up off of every page we don't use.
3722 3723 3724 3725
	 */
	rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
					 &num_pages);
	if (rc == 0)
3726
		return rc;
3727

3728 3729 3730 3731 3732
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
		pid = open_file->pid;
	else
		pid = current->tgid;

3733
	rc = 0;
3734
	server = tlink_tcon(open_file->tlink)->ses->server;
L
Linus Torvalds 已提交
3735

3736 3737
	cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
		 __func__, file, mapping, num_pages);
3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750

	/*
	 * Start with the page at end of list and move it to private
	 * list. Do the same with any following pages until we hit
	 * the rsize limit, hit an index discontinuity, or run out of
	 * pages. Issue the async read and then start the loop again
	 * until the list is empty.
	 *
	 * Note that list order is important. The page_list is in
	 * the order of declining indexes. When we put the pages in
	 * the rdata->pages, then we want them in increasing order.
	 */
	while (!list_empty(page_list)) {
3751
		unsigned int i, nr_pages, bytes, rsize;
3752 3753 3754
		loff_t offset;
		struct page *page, *tpage;
		struct cifs_readdata *rdata;
3755
		unsigned credits;
L
Linus Torvalds 已提交
3756

3757 3758 3759 3760
		rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
						   &rsize, &credits);
		if (rc)
			break;
3761 3762

		/*
3763 3764 3765 3766
		 * Give up immediately if rsize is too small to read an entire
		 * page. The VFS will fall back to readpage. We should never
		 * reach this point however since we set ra_pages to 0 when the
		 * rsize is smaller than a cache page.
3767
		 */
3768
		if (unlikely(rsize < PAGE_SIZE)) {
3769
			add_credits_and_wake_if(server, credits, 0);
3770
			return 0;
3771
		}
3772

3773 3774
		rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
					 &nr_pages, &offset, &bytes);
3775
		if (rc) {
3776
			add_credits_and_wake_if(server, credits, 0);
3777 3778 3779
			break;
		}

3780
		rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3781 3782 3783 3784 3785 3786
		if (!rdata) {
			/* best to give up if we're out of mem */
			list_for_each_entry_safe(page, tpage, &tmplist, lru) {
				list_del(&page->lru);
				lru_cache_add_file(page);
				unlock_page(page);
3787
				put_page(page);
3788 3789
			}
			rc = -ENOMEM;
3790
			add_credits_and_wake_if(server, credits, 0);
3791 3792 3793
			break;
		}

3794
		rdata->cfile = cifsFileInfo_get(open_file);
3795 3796 3797 3798
		rdata->mapping = mapping;
		rdata->offset = offset;
		rdata->bytes = bytes;
		rdata->pid = pid;
3799
		rdata->pagesz = PAGE_SIZE;
3800
		rdata->read_into_pages = cifs_readpages_read_into_pages;
3801
		rdata->copy_into_pages = cifs_readpages_copy_into_pages;
3802
		rdata->credits = credits;
3803 3804 3805 3806 3807

		list_for_each_entry_safe(page, tpage, &tmplist, lru) {
			list_del(&page->lru);
			rdata->pages[rdata->nr_pages++] = page;
		}
3808

3809
		if (!rdata->cfile->invalidHandle ||
3810
		    !(rc = cifs_reopen_file(rdata->cfile, true)))
3811 3812
			rc = server->ops->async_readv(rdata);
		if (rc) {
3813
			add_credits_and_wake_if(server, rdata->credits, 0);
3814 3815
			for (i = 0; i < rdata->nr_pages; i++) {
				page = rdata->pages[i];
3816 3817
				lru_cache_add_file(page);
				unlock_page(page);
3818
				put_page(page);
L
Linus Torvalds 已提交
3819
			}
3820
			/* Fallback to the readpage in error/reconnect cases */
3821
			kref_put(&rdata->refcount, cifs_readdata_release);
L
Linus Torvalds 已提交
3822 3823
			break;
		}
3824 3825

		kref_put(&rdata->refcount, cifs_readdata_release);
L
Linus Torvalds 已提交
3826 3827
	}

3828 3829 3830 3831 3832
	/* Any pages that have been shown to fscache but didn't get added to
	 * the pagecache must be uncached before they get returned to the
	 * allocator.
	 */
	cifs_fscache_readpages_cancel(mapping->host, page_list);
L
Linus Torvalds 已提交
3833 3834 3835
	return rc;
}

3836 3837 3838
/*
 * cifs_readpage_worker must be called with the page pinned
 */
L
Linus Torvalds 已提交
3839 3840 3841 3842 3843 3844
static int cifs_readpage_worker(struct file *file, struct page *page,
	loff_t *poffset)
{
	char *read_data;
	int rc;

3845
	/* Is the page cached? */
A
Al Viro 已提交
3846
	rc = cifs_readpage_from_fscache(file_inode(file), page);
3847 3848 3849
	if (rc == 0)
		goto read_complete;

L
Linus Torvalds 已提交
3850 3851
	read_data = kmap(page);
	/* for reads over a certain size could initiate async read ahead */
S
Steve French 已提交
3852

3853
	rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
S
Steve French 已提交
3854

L
Linus Torvalds 已提交
3855 3856 3857
	if (rc < 0)
		goto io_error;
	else
3858
		cifs_dbg(FYI, "Bytes read %d\n", rc);
S
Steve French 已提交
3859

A
Al Viro 已提交
3860
	file_inode(file)->i_atime =
3861
		current_time(file_inode(file));
S
Steve French 已提交
3862

3863 3864
	if (PAGE_SIZE > rc)
		memset(read_data + rc, 0, PAGE_SIZE - rc);
L
Linus Torvalds 已提交
3865 3866 3867

	flush_dcache_page(page);
	SetPageUptodate(page);
3868 3869

	/* send this page to the cache */
A
Al Viro 已提交
3870
	cifs_readpage_to_fscache(file_inode(file), page);
3871

L
Linus Torvalds 已提交
3872
	rc = 0;
S
Steve French 已提交
3873

L
Linus Torvalds 已提交
3874
io_error:
S
Steve French 已提交
3875
	kunmap(page);
3876
	unlock_page(page);
3877 3878

read_complete:
L
Linus Torvalds 已提交
3879 3880 3881 3882 3883
	return rc;
}

static int cifs_readpage(struct file *file, struct page *page)
{
3884
	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
L
Linus Torvalds 已提交
3885
	int rc = -EACCES;
3886
	unsigned int xid;
L
Linus Torvalds 已提交
3887

3888
	xid = get_xid();
L
Linus Torvalds 已提交
3889 3890

	if (file->private_data == NULL) {
3891
		rc = -EBADF;
3892
		free_xid(xid);
3893
		return rc;
L
Linus Torvalds 已提交
3894 3895
	}

3896
	cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
3897
		 page, (int)offset, (int)offset);
L
Linus Torvalds 已提交
3898 3899 3900

	rc = cifs_readpage_worker(file, page, &offset);

3901
	free_xid(xid);
L
Linus Torvalds 已提交
3902 3903 3904
	return rc;
}

3905 3906 3907
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
	struct cifsFileInfo *open_file;
3908 3909
	struct cifs_tcon *tcon =
		cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3910

3911
	spin_lock(&tcon->open_file_lock);
3912
	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3913
		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3914
			spin_unlock(&tcon->open_file_lock);
3915 3916 3917
			return 1;
		}
	}
3918
	spin_unlock(&tcon->open_file_lock);
3919 3920 3921
	return 0;
}

L
Linus Torvalds 已提交
3922 3923 3924
/* We do not want to update the file size from server for inodes
   open for write - to avoid races with writepage extending
   the file - in the future we could consider allowing
S
Steve French 已提交
3925
   refreshing the inode only on increases in the file size
L
Linus Torvalds 已提交
3926 3927
   but this is tricky to do without racing with writebehind
   page caching in the current Linux kernel design */
3928
bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
L
Linus Torvalds 已提交
3929
{
3930
	if (!cifsInode)
3931
		return true;
3932

3933 3934
	if (is_inode_writable(cifsInode)) {
		/* This inode is open for write at least once */
3935 3936 3937
		struct cifs_sb_info *cifs_sb;

		cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
S
Steve French 已提交
3938
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
S
Steve French 已提交
3939
			/* since no page cache to corrupt on directio
3940
			we can change size safely */
3941
			return true;
3942 3943
		}

S
Steve French 已提交
3944
		if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3945
			return true;
3946

3947
		return false;
3948
	} else
3949
		return true;
L
Linus Torvalds 已提交
3950 3951
}

N
Nick Piggin 已提交
3952 3953 3954
static int cifs_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
L
Linus Torvalds 已提交
3955
{
3956
	int oncethru = 0;
3957 3958
	pgoff_t index = pos >> PAGE_SHIFT;
	loff_t offset = pos & (PAGE_SIZE - 1);
3959 3960 3961 3962
	loff_t page_start = pos & PAGE_MASK;
	loff_t i_size;
	struct page *page;
	int rc = 0;
N
Nick Piggin 已提交
3963

3964
	cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
N
Nick Piggin 已提交
3965

3966
start:
3967
	page = grab_cache_page_write_begin(mapping, index, flags);
3968 3969 3970 3971
	if (!page) {
		rc = -ENOMEM;
		goto out;
	}
3972

3973 3974
	if (PageUptodate(page))
		goto out;
3975

3976 3977 3978 3979 3980
	/*
	 * If we write a full page it will be up to date, no need to read from
	 * the server. If the write is short, we'll end up doing a sync write
	 * instead.
	 */
3981
	if (len == PAGE_SIZE)
3982
		goto out;
3983

3984 3985 3986 3987 3988 3989
	/*
	 * optimize away the read when we have an oplock, and we're not
	 * expecting to use any of the data we'd be reading in. That
	 * is, when the page lies beyond the EOF, or straddles the EOF
	 * and the write will cover all of the existing data.
	 */
3990
	if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
3991 3992 3993 3994 3995
		i_size = i_size_read(mapping->host);
		if (page_start >= i_size ||
		    (offset == 0 && (pos + len) >= i_size)) {
			zero_user_segments(page, 0, offset,
					   offset + len,
3996
					   PAGE_SIZE);
3997 3998 3999 4000 4001 4002 4003 4004 4005 4006
			/*
			 * PageChecked means that the parts of the page
			 * to which we're not writing are considered up
			 * to date. Once the data is copied to the
			 * page, it can be set uptodate.
			 */
			SetPageChecked(page);
			goto out;
		}
	}
N
Nick Piggin 已提交
4007

4008
	if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4009 4010 4011 4012 4013 4014
		/*
		 * might as well read a page, it is fast enough. If we get
		 * an error, we don't need to return it. cifs_write_end will
		 * do a sync write instead since PG_uptodate isn't set.
		 */
		cifs_readpage_worker(file, page, &page_start);
4015
		put_page(page);
4016 4017
		oncethru = 1;
		goto start;
4018 4019 4020 4021
	} else {
		/* we could try using another file handle if there is one -
		   but how would we lock it to prevent close of that handle
		   racing with this read? In any case
N
Nick Piggin 已提交
4022
		   this will be written out by write_end so is fine */
L
Linus Torvalds 已提交
4023
	}
4024 4025 4026
out:
	*pagep = page;
	return rc;
L
Linus Torvalds 已提交
4027 4028
}

4029 4030 4031 4032 4033 4034 4035 4036
static int cifs_release_page(struct page *page, gfp_t gfp)
{
	if (PagePrivate(page))
		return 0;

	return cifs_fscache_release_page(page, gfp);
}

4037 4038
static void cifs_invalidate_page(struct page *page, unsigned int offset,
				 unsigned int length)
4039 4040 4041
{
	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);

4042
	if (offset == 0 && length == PAGE_SIZE)
4043 4044 4045
		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
}

4046 4047 4048 4049
static int cifs_launder_page(struct page *page)
{
	int rc = 0;
	loff_t range_start = page_offset(page);
4050
	loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4051 4052 4053 4054 4055 4056 4057
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 0,
		.range_start = range_start,
		.range_end = range_end,
	};

4058
	cifs_dbg(FYI, "Launder page: %p\n", page);
4059 4060 4061 4062 4063 4064 4065 4066

	if (clear_page_dirty_for_io(page))
		rc = cifs_writepage_locked(page, &wbc);

	cifs_fscache_invalidate_page(page, page->mapping->host);
	return rc;
}

4067
void cifs_oplock_break(struct work_struct *work)
4068 4069 4070
{
	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
						  oplock_break);
4071
	struct inode *inode = d_inode(cfile->dentry);
4072
	struct cifsInodeInfo *cinode = CIFS_I(inode);
4073
	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4074
	struct TCP_Server_Info *server = tcon->ses->server;
4075
	int rc = 0;
4076

4077
	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4078
			TASK_UNINTERRUPTIBLE);
4079 4080 4081 4082

	server->ops->downgrade_oplock(server, cinode,
		test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));

4083
	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4084
						cifs_has_mand_locks(cinode)) {
4085 4086
		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
			 inode);
4087
		cinode->oplock = 0;
4088 4089
	}

4090
	if (inode && S_ISREG(inode->i_mode)) {
4091
		if (CIFS_CACHE_READ(cinode))
4092
			break_lease(inode, O_RDONLY);
S
Steve French 已提交
4093
		else
4094
			break_lease(inode, O_WRONLY);
4095
		rc = filemap_fdatawrite(inode->i_mapping);
4096
		if (!CIFS_CACHE_READ(cinode)) {
4097 4098
			rc = filemap_fdatawait(inode->i_mapping);
			mapping_set_error(inode->i_mapping, rc);
4099
			cifs_zap_mapping(inode);
4100
		}
4101
		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4102 4103
	}

4104 4105
	rc = cifs_push_locks(cfile);
	if (rc)
4106
		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4107

4108 4109 4110 4111 4112 4113
	/*
	 * releasing stale oplock after recent reconnect of smb session using
	 * a now incorrect file handle is not a data integrity issue but do
	 * not bother sending an oplock release if session to server still is
	 * disconnected since oplock already released by the server
	 */
4114
	if (!cfile->oplock_break_cancelled) {
4115 4116
		rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
							     cinode);
4117
		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4118
	}
4119
	cifs_done_oplock_break(cinode);
4120 4121
}

4122 4123 4124 4125 4126 4127 4128 4129 4130 4131
/*
 * The presence of cifs_direct_io() in the address space ops vector
 * allowes open() O_DIRECT flags which would have failed otherwise.
 *
 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
 * so this method should never be called.
 *
 * Direct IO is not yet supported in the cached mode. 
 */
static ssize_t
4132
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4133 4134 4135 4136 4137 4138 4139 4140 4141
{
        /*
         * FIXME
         * Eventually need to support direct IO for non forcedirectio mounts
         */
        return -EINVAL;
}


4142
const struct address_space_operations cifs_addr_ops = {
L
Linus Torvalds 已提交
4143 4144 4145
	.readpage = cifs_readpage,
	.readpages = cifs_readpages,
	.writepage = cifs_writepage,
4146
	.writepages = cifs_writepages,
N
Nick Piggin 已提交
4147 4148
	.write_begin = cifs_write_begin,
	.write_end = cifs_write_end,
L
Linus Torvalds 已提交
4149
	.set_page_dirty = __set_page_dirty_nobuffers,
4150
	.releasepage = cifs_release_page,
4151
	.direct_IO = cifs_direct_io,
4152
	.invalidatepage = cifs_invalidate_page,
4153
	.launder_page = cifs_launder_page,
L
Linus Torvalds 已提交
4154
};
D
Dave Kleikamp 已提交
4155 4156 4157 4158 4159 4160

/*
 * cifs_readpages requires the server to support a buffer large enough to
 * contain the header plus one complete page of data.  Otherwise, we need
 * to leave cifs_readpages out of the address space operations.
 */
4161
const struct address_space_operations cifs_addr_ops_smallbuf = {
D
Dave Kleikamp 已提交
4162 4163 4164
	.readpage = cifs_readpage,
	.writepage = cifs_writepage,
	.writepages = cifs_writepages,
N
Nick Piggin 已提交
4165 4166
	.write_begin = cifs_write_begin,
	.write_end = cifs_write_end,
D
Dave Kleikamp 已提交
4167
	.set_page_dirty = __set_page_dirty_nobuffers,
4168 4169
	.releasepage = cifs_release_page,
	.invalidatepage = cifs_invalidate_page,
4170
	.launder_page = cifs_launder_page,
D
Dave Kleikamp 已提交
4171
};