misc.c 34.5 KB
Newer Older
S
Steve French 已提交
1
// SPDX-License-Identifier: LGPL-2.1
L
Linus Torvalds 已提交
2 3
/*
 *
S
Steve French 已提交
4
 *   Copyright (C) International Business Machines  Corp., 2002,2008
L
Linus Torvalds 已提交
5 6 7 8 9 10 11
 *   Author(s): Steve French (sfrench@us.ibm.com)
 *
 */

#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/mempool.h>
12
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "smberr.h"
#include "nterr.h"
19
#include "cifs_unicode.h"
20
#include "smb2pdu.h"
21
#include "cifsfs.h"
22 23
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dns_resolve.h"
24
#include "dfs_cache.h"
25
#include "dfs.h"
26
#endif
27
#include "fs_context.h"
28
#include "cached_dir.h"
L
Linus Torvalds 已提交
29 30 31 32

extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;

S
Steve French 已提交
33 34 35 36
/* The xid serves as a useful identifier for each incoming vfs request,
   in a similar way to the mid which is useful to track each sent smb,
   and CurrentXid can also provide a running counter (although it
   will eventually wrap past zero) of the total vfs operations handled
L
Linus Torvalds 已提交
37 38 39
   since the cifs fs was mounted */

unsigned int
40
_get_xid(void)
L
Linus Torvalds 已提交
41 42 43 44 45
{
	unsigned int xid;

	spin_lock(&GlobalMid_Lock);
	GlobalTotalActiveXid++;
46 47

	/* keep high water mark for number of simultaneous ops in filesystem */
L
Linus Torvalds 已提交
48
	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
49
		GlobalMaxActiveXid = GlobalTotalActiveXid;
S
Steve French 已提交
50
	if (GlobalTotalActiveXid > 65000)
51
		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
L
Linus Torvalds 已提交
52 53 54 55 56 57
	xid = GlobalCurrentXid++;
	spin_unlock(&GlobalMid_Lock);
	return xid;
}

void
58
_free_xid(unsigned int xid)
L
Linus Torvalds 已提交
59 60
{
	spin_lock(&GlobalMid_Lock);
S
Steve French 已提交
61
	/* if (GlobalTotalActiveXid == 0)
L
Linus Torvalds 已提交
62 63 64 65 66
		BUG(); */
	GlobalTotalActiveXid--;
	spin_unlock(&GlobalMid_Lock);
}

67
struct cifs_ses *
L
Linus Torvalds 已提交
68 69
sesInfoAlloc(void)
{
70
	struct cifs_ses *ret_buf;
L
Linus Torvalds 已提交
71

72
	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
L
Linus Torvalds 已提交
73 74
	if (ret_buf) {
		atomic_inc(&sesInfoAllocCount);
75
		spin_lock_init(&ret_buf->ses_lock);
76
		ret_buf->ses_status = SES_NEW;
77 78
		++ret_buf->ses_count;
		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
79
		INIT_LIST_HEAD(&ret_buf->tcon_list);
80
		mutex_init(&ret_buf->session_mutex);
81
		spin_lock_init(&ret_buf->iface_lock);
82
		INIT_LIST_HEAD(&ret_buf->iface_list);
83
		spin_lock_init(&ret_buf->chan_lock);
L
Linus Torvalds 已提交
84 85 86 87 88
	}
	return ret_buf;
}

void
89
sesInfoFree(struct cifs_ses *buf_to_free)
L
Linus Torvalds 已提交
90
{
91 92
	struct cifs_server_iface *iface = NULL, *niface = NULL;

L
Linus Torvalds 已提交
93
	if (buf_to_free == NULL) {
94
		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
L
Linus Torvalds 已提交
95 96 97 98
		return;
	}

	atomic_dec(&sesInfoAllocCount);
J
Jesper Juhl 已提交
99 100 101
	kfree(buf_to_free->serverOS);
	kfree(buf_to_free->serverDomain);
	kfree(buf_to_free->serverNOS);
102
	kfree_sensitive(buf_to_free->password);
103
	kfree(buf_to_free->user_name);
104
	kfree(buf_to_free->domainName);
105
	kfree_sensitive(buf_to_free->auth_key.response);
106 107 108 109 110
	spin_lock(&buf_to_free->iface_lock);
	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
				 iface_head)
		kref_put(&iface->refcount, release_iface);
	spin_unlock(&buf_to_free->iface_lock);
111
	kfree_sensitive(buf_to_free);
L
Linus Torvalds 已提交
112 113
}

114
struct cifs_tcon *
L
Linus Torvalds 已提交
115 116
tconInfoAlloc(void)
{
117
	struct cifs_tcon *ret_buf;
J
Joe Perches 已提交
118 119 120 121

	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
	if (!ret_buf)
		return NULL;
122 123
	ret_buf->cfids = init_cached_dirs();
	if (!ret_buf->cfids) {
J
Joe Perches 已提交
124 125
		kfree(ret_buf);
		return NULL;
L
Linus Torvalds 已提交
126
	}
J
Joe Perches 已提交
127 128

	atomic_inc(&tconInfoAllocCount);
129
	ret_buf->status = TID_NEW;
J
Joe Perches 已提交
130
	++ret_buf->tc_count;
131
	spin_lock_init(&ret_buf->tc_lock);
J
Joe Perches 已提交
132 133 134 135 136 137
	INIT_LIST_HEAD(&ret_buf->openFileList);
	INIT_LIST_HEAD(&ret_buf->tcon_list);
	spin_lock_init(&ret_buf->open_file_lock);
	spin_lock_init(&ret_buf->stat_lock);
	atomic_set(&ret_buf->num_local_opens, 0);
	atomic_set(&ret_buf->num_remote_opens, 0);
138 139 140
#ifdef CONFIG_CIFS_DFS_UPCALL
	INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
#endif
J
Joe Perches 已提交
141

L
Linus Torvalds 已提交
142 143 144 145
	return ret_buf;
}

void
146
tconInfoFree(struct cifs_tcon *tcon)
L
Linus Torvalds 已提交
147
{
148
	if (tcon == NULL) {
149
		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
L
Linus Torvalds 已提交
150 151
		return;
	}
152
	free_cached_dirs(tcon->cfids);
L
Linus Torvalds 已提交
153
	atomic_dec(&tconInfoAllocCount);
154 155
	kfree(tcon->nativeFileSystem);
	kfree_sensitive(tcon->password);
156 157 158
#ifdef CONFIG_CIFS_DFS_UPCALL
	dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
#endif
159
	kfree(tcon);
L
Linus Torvalds 已提交
160 161 162 163 164 165
}

struct smb_hdr *
cifs_buf_get(void)
{
	struct smb_hdr *ret_buf = NULL;
166 167 168 169
	/*
	 * SMB2 header is bigger than CIFS one - no problems to clean some
	 * more bytes for CIFS.
	 */
170
	size_t buf_size = sizeof(struct smb2_hdr);
171

172 173 174 175 176 177
	/*
	 * We could use negotiated size instead of max_msgsize -
	 * but it may be more efficient to always alloc same size
	 * albeit slightly larger than necessary and maxbuffersize
	 * defaults to this and can not be bigger.
	 */
178
	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
L
Linus Torvalds 已提交
179 180 181

	/* clear the first few header bytes */
	/* for most paths, more is cleared in header_assemble */
182
	memset(ret_buf, 0, buf_size + 3);
183
	atomic_inc(&buf_alloc_count);
184
#ifdef CONFIG_CIFS_STATS2
185
	atomic_inc(&total_buf_alloc_count);
186
#endif /* CONFIG_CIFS_STATS2 */
L
Linus Torvalds 已提交
187 188 189 190 191 192 193 194

	return ret_buf;
}

void
cifs_buf_release(void *buf_to_free)
{
	if (buf_to_free == NULL) {
195
		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
L
Linus Torvalds 已提交
196 197
		return;
	}
S
Steve French 已提交
198
	mempool_free(buf_to_free, cifs_req_poolp);
L
Linus Torvalds 已提交
199

200
	atomic_dec(&buf_alloc_count);
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208
	return;
}

struct smb_hdr *
cifs_small_buf_get(void)
{
	struct smb_hdr *ret_buf = NULL;

S
Steve French 已提交
209 210 211
/* We could use negotiated size instead of max_msgsize -
   but it may be more efficient to always alloc same size
   albeit slightly larger than necessary and maxbuffersize
L
Linus Torvalds 已提交
212
   defaults to this and can not be bigger */
213
	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
L
Linus Torvalds 已提交
214 215
	/* No need to clear memory here, cleared in header assemble */
	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
216
	atomic_inc(&small_buf_alloc_count);
217
#ifdef CONFIG_CIFS_STATS2
218
	atomic_inc(&total_small_buf_alloc_count);
219 220
#endif /* CONFIG_CIFS_STATS2 */

L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228
	return ret_buf;
}

void
cifs_small_buf_release(void *buf_to_free)
{

	if (buf_to_free == NULL) {
229
		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
L
Linus Torvalds 已提交
230 231
		return;
	}
S
Steve French 已提交
232
	mempool_free(buf_to_free, cifs_sm_req_poolp);
L
Linus Torvalds 已提交
233

234
	atomic_dec(&small_buf_alloc_count);
L
Linus Torvalds 已提交
235 236 237
	return;
}

238 239 240 241 242 243 244 245 246
void
free_rsp_buf(int resp_buftype, void *rsp)
{
	if (resp_buftype == CIFS_SMALL_BUFFER)
		cifs_small_buf_release(rsp);
	else if (resp_buftype == CIFS_LARGE_BUFFER)
		cifs_buf_release(rsp);
}

247 248
/* NB: MID can not be set if treeCon not passed in, in that
   case it is responsbility of caller to set the mid */
L
Linus Torvalds 已提交
249 250
void
header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
251
		const struct cifs_tcon *treeCon, int word_count
L
Linus Torvalds 已提交
252 253 254 255
		/* length of fixed section (word count) in two byte units  */)
{
	char *temp = (char *) buffer;

S
Steve French 已提交
256
	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
L
Linus Torvalds 已提交
257

258
	buffer->smb_buf_length = cpu_to_be32(
S
Steve French 已提交
259
	    (2 * word_count) + sizeof(struct smb_hdr) -
L
Linus Torvalds 已提交
260
	    4 /*  RFC 1001 length field does not count */  +
261
	    2 /* for bcc field itself */) ;
L
Linus Torvalds 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276

	buffer->Protocol[0] = 0xFF;
	buffer->Protocol[1] = 'S';
	buffer->Protocol[2] = 'M';
	buffer->Protocol[3] = 'B';
	buffer->Command = smb_command;
	buffer->Flags = 0x00;	/* case sensitive */
	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
	buffer->Pid = cpu_to_le16((__u16)current->tgid);
	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
	if (treeCon) {
		buffer->Tid = treeCon->tid;
		if (treeCon->ses) {
			if (treeCon->ses->capabilities & CAP_UNICODE)
				buffer->Flags2 |= SMBFLG2_UNICODE;
S
Steve French 已提交
277
			if (treeCon->ses->capabilities & CAP_STATUS32)
L
Linus Torvalds 已提交
278
				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
S
Steve French 已提交
279

280 281
			/* Uid is not converted */
			buffer->Uid = treeCon->ses->Suid;
282 283
			if (treeCon->ses->server)
				buffer->Mid = get_next_mid(treeCon->ses->server);
L
Linus Torvalds 已提交
284 285 286
		}
		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
			buffer->Flags2 |= SMBFLG2_DFS;
287 288
		if (treeCon->nocase)
			buffer->Flags  |= SMBFLG_CASELESS;
S
Steve French 已提交
289
		if ((treeCon->ses) && (treeCon->ses->server))
290
			if (treeCon->ses->server->sign)
L
Linus Torvalds 已提交
291 292 293 294 295 296 297 298
				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
	}

/*  endian conversion of flags is now done just before sending */
	buffer->WordCount = (char) word_count;
	return;
}

299
static int
300
check_smb_hdr(struct smb_hdr *smb)
L
Linus Torvalds 已提交
301
{
302 303
	/* does it have the right SMB "signature" ? */
	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
304 305
		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
			 *(unsigned int *)smb->Protocol);
306 307 308 309 310 311 312 313 314 315 316
		return 1;
	}

	/* if it's a response then accept */
	if (smb->Flags & SMBFLG_RESPONSE)
		return 0;

	/* only one valid case where server sends us request */
	if (smb->Command == SMB_COM_LOCKING_ANDX)
		return 0;

317 318
	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
		 get_mid(smb));
L
Linus Torvalds 已提交
319 320 321 322
	return 1;
}

int
323
checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
L
Linus Torvalds 已提交
324
{
325
	struct smb_hdr *smb = (struct smb_hdr *)buf;
J
Jeff Layton 已提交
326
	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
327
	__u32 clc_len;  /* calculated length */
328 329
	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
		 total_read, rfclen);
330

J
Jeff Layton 已提交
331 332 333
	/* is this frame too small to even get to a BCC? */
	if (total_read < 2 + sizeof(struct smb_hdr)) {
		if ((total_read >= sizeof(struct smb_hdr) - 1)
L
Linus Torvalds 已提交
334
			    && (smb->Status.CifsError != 0)) {
J
Jeff Layton 已提交
335
			/* it's an error return */
336 337 338
			smb->WordCount = 0;
			/* some error cases do not return wct and bcc */
			return 0;
J
Jeff Layton 已提交
339
		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
340
				(smb->WordCount == 0)) {
S
Steve French 已提交
341
			char *tmp = (char *)smb;
342 343 344 345 346 347 348 349 350 351 352
			/* Need to work around a bug in two servers here */
			/* First, check if the part of bcc they sent was zero */
			if (tmp[sizeof(struct smb_hdr)] == 0) {
				/* some servers return only half of bcc
				 * on simple responses (wct, bcc both zero)
				 * in particular have seen this on
				 * ulogoffX and FindClose. This leaves
				 * one byte of bcc potentially unitialized
				 */
				/* zero rest of bcc */
				tmp[sizeof(struct smb_hdr)+1] = 0;
353
				return 0;
L
Linus Torvalds 已提交
354
			}
355
			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
356
		} else {
357
			cifs_dbg(VFS, "Length less than smb header size\n");
L
Linus Torvalds 已提交
358
		}
J
Jeff Layton 已提交
359
		return -EIO;
L
Linus Torvalds 已提交
360 361
	}

J
Jeff Layton 已提交
362
	/* otherwise, there is enough to get to the BCC */
363
	if (check_smb_hdr(smb))
J
Jeff Layton 已提交
364
		return -EIO;
365
	clc_len = smbCalcSize(smb);
366

J
Jeff Layton 已提交
367
	if (4 + rfclen != total_read) {
368 369
		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
			 rfclen);
J
Jeff Layton 已提交
370
		return -EIO;
371 372
	}

J
Jeff Layton 已提交
373
	if (4 + rfclen != clc_len) {
374
		__u16 mid = get_mid(smb);
375
		/* check if bcc wrapped around for large read responses */
J
Jeff Layton 已提交
376
		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
377
			/* check if lengths match mod 64K */
J
Jeff Layton 已提交
378
			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
S
Steve French 已提交
379
				return 0; /* bcc wrapped */
380
		}
381
		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
382
			 clc_len, 4 + rfclen, mid);
383

J
Jeff Layton 已提交
384
		if (4 + rfclen < clc_len) {
385
			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
386
				 rfclen, mid);
J
Jeff Layton 已提交
387 388
			return -EIO;
		} else if (rfclen > clc_len + 512) {
389 390 391 392 393 394 395 396 397
			/*
			 * Some servers (Windows XP in particular) send more
			 * data than the lengths in the SMB packet would
			 * indicate on certain calls (byte range locks and
			 * trans2 find first calls in particular). While the
			 * client can handle such a frame by ignoring the
			 * trailing data, we choose limit the amount of extra
			 * data to 512 bytes.
			 */
398
			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
399
				 rfclen, mid);
J
Jeff Layton 已提交
400
			return -EIO;
401
		}
L
Linus Torvalds 已提交
402
	}
403
	return 0;
L
Linus Torvalds 已提交
404
}
405 406

bool
407
is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
S
Steve French 已提交
408
{
409
	struct smb_hdr *buf = (struct smb_hdr *)buffer;
S
Steve French 已提交
410
	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
411
	struct TCP_Server_Info *pserver;
412 413
	struct cifs_ses *ses;
	struct cifs_tcon *tcon;
414
	struct cifsInodeInfo *pCifsInode;
L
Linus Torvalds 已提交
415 416
	struct cifsFileInfo *netfile;

417
	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
S
Steve French 已提交
418
	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
L
Linus Torvalds 已提交
419
	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
S
Steve French 已提交
420
		struct smb_com_transaction_change_notify_rsp *pSMBr =
L
Linus Torvalds 已提交
421
			(struct smb_com_transaction_change_notify_rsp *)buf;
S
Steve French 已提交
422
		struct file_notify_information *pnotify;
L
Linus Torvalds 已提交
423
		__u32 data_offset = 0;
424 425
		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);

426
		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
L
Linus Torvalds 已提交
427 428
			data_offset = le32_to_cpu(pSMBr->DataOffset);

429 430
			if (data_offset >
			    len - sizeof(struct file_notify_information)) {
J
Joe Perches 已提交
431
				cifs_dbg(FYI, "Invalid data_offset %u\n",
432 433 434
					 data_offset);
				return true;
			}
435 436
			pnotify = (struct file_notify_information *)
				((char *)&pSMBr->hdr.Protocol + data_offset);
437
			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
438
				 pnotify->FileName, pnotify->Action);
S
Steve French 已提交
439
			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
440
				sizeof(struct smb_hdr)+60); */
441
			return true;
L
Linus Torvalds 已提交
442
		}
S
Steve French 已提交
443
		if (pSMBr->hdr.Status.CifsError) {
444
			cifs_dbg(FYI, "notify err 0x%x\n",
445
				 pSMBr->hdr.Status.CifsError);
446
			return true;
L
Linus Torvalds 已提交
447
		}
448
		return false;
S
Steve French 已提交
449
	}
S
Steve French 已提交
450
	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
451
		return false;
S
Steve French 已提交
452
	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
L
Linus Torvalds 已提交
453 454 455 456
		/* no sense logging error on invalid handle on oplock
		   break - harmless race between close request and oplock
		   break response is expected from time to time writing out
		   large dirty files cached on the client */
S
Steve French 已提交
457 458
		if ((NT_STATUS_INVALID_HANDLE) ==
		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
J
Joe Perches 已提交
459
			cifs_dbg(FYI, "Invalid handle on oplock break\n");
460
			return true;
S
Steve French 已提交
461
		} else if (ERRbadfid ==
L
Linus Torvalds 已提交
462
		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
463
			return true;
L
Linus Torvalds 已提交
464
		} else {
465
			return false; /* on valid oplock brk we get "request" */
L
Linus Torvalds 已提交
466 467
		}
	}
S
Steve French 已提交
468
	if (pSMB->hdr.WordCount != 8)
469
		return false;
L
Linus Torvalds 已提交
470

471
	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
472
		 pSMB->LockType, pSMB->OplockLevel);
S
Steve French 已提交
473
	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
474
		return false;
L
Linus Torvalds 已提交
475

476 477 478
	/* If server is a channel, select the primary channel */
	pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;

L
Linus Torvalds 已提交
479
	/* look up tcon based on tid & uid */
480
	spin_lock(&cifs_tcp_ses_lock);
481
	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
482
		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
483 484 485
			if (tcon->tid != buf->Tid)
				continue;

486
			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
487
			spin_lock(&tcon->open_file_lock);
488
			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
489
				if (pSMB->Fid != netfile->fid.netfid)
490 491
					continue;

492
				cifs_dbg(FYI, "file id match, oplock break\n");
493
				pCifsInode = CIFS_I(d_inode(netfile->dentry));
494

495 496 497
				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
					&pCifsInode->flags);

498 499
				netfile->oplock_epoch = 0;
				netfile->oplock_level = pSMB->OplockLevel;
500
				netfile->oplock_break_cancelled = false;
501
				cifs_queue_oplock_break(netfile);
502

503
				spin_unlock(&tcon->open_file_lock);
504
				spin_unlock(&cifs_tcp_ses_lock);
505
				return true;
L
Linus Torvalds 已提交
506
			}
507
			spin_unlock(&tcon->open_file_lock);
508
			spin_unlock(&cifs_tcp_ses_lock);
509
			cifs_dbg(FYI, "No matching file for oplock break\n");
510
			return true;
L
Linus Torvalds 已提交
511 512
		}
	}
513
	spin_unlock(&cifs_tcp_ses_lock);
514
	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
515
	return true;
L
Linus Torvalds 已提交
516 517 518
}

void
519
dump_smb(void *buf, int smb_buf_length)
L
Linus Torvalds 已提交
520 521 522 523
{
	if (traceSMB == 0)
		return;

524 525
	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
		       smb_buf_length, true);
L
Linus Torvalds 已提交
526
}
527

528 529 530 531
void
cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
532 533 534 535 536
		struct cifs_tcon *tcon = NULL;

		if (cifs_sb->master_tlink)
			tcon = cifs_sb_master_tcon(cifs_sb);

537
		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
538
		cifs_sb->mnt_cifs_serverino_autodisabled = true;
J
Joe Perches 已提交
539
		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
540
			 tcon ? tcon->tree_name : "new server");
J
Joe Perches 已提交
541
		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
542 543
		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");

544 545
	}
}
P
Pavel Shilovsky 已提交
546

547
void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
P
Pavel Shilovsky 已提交
548
{
549
	oplock &= 0xF;
P
Pavel Shilovsky 已提交
550

551
	if (oplock == OPLOCK_EXCLUSIVE) {
552
		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
553
		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
554
			 &cinode->netfs.inode);
555
	} else if (oplock == OPLOCK_READ) {
556
		cinode->oplock = CIFS_CACHE_READ_FLG;
557
		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
558
			 &cinode->netfs.inode);
559 560
	} else
		cinode->oplock = 0;
P
Pavel Shilovsky 已提交
561
}
562

563 564 565 566 567 568 569 570 571 572
/*
 * We wait for oplock breaks to be processed before we attempt to perform
 * writes.
 */
int cifs_get_writer(struct cifsInodeInfo *cinode)
{
	int rc;

start:
	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
573
			 TASK_KILLABLE);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	if (rc)
		return rc;

	spin_lock(&cinode->writers_lock);
	if (!cinode->writers)
		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
	cinode->writers++;
	/* Check to see if we have started servicing an oplock break */
	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
		cinode->writers--;
		if (cinode->writers == 0) {
			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
		}
		spin_unlock(&cinode->writers_lock);
		goto start;
	}
	spin_unlock(&cinode->writers_lock);
	return 0;
}

void cifs_put_writer(struct cifsInodeInfo *cinode)
{
	spin_lock(&cinode->writers_lock);
	cinode->writers--;
	if (cinode->writers == 0) {
		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
	}
	spin_unlock(&cinode->writers_lock);
}

606 607
/**
 * cifs_queue_oplock_break - queue the oplock break handler for cfile
608
 * @cfile: The file to break the oplock on
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
 *
 * This function is called from the demultiplex thread when it
 * receives an oplock break for @cfile.
 *
 * Assumes the tcon->open_file_lock is held.
 * Assumes cfile->file_info_lock is NOT held.
 */
void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
{
	/*
	 * Bump the handle refcount now while we hold the
	 * open_file_lock to enforce the validity of it for the oplock
	 * break handler. The matching put is done at the end of the
	 * handler.
	 */
	cifsFileInfo_get(cfile);

	queue_work(cifsoplockd_wq, &cfile->oplock_break);
}

629 630 631 632 633 634
void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
{
	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
}

635 636 637 638
bool
backup_cred(struct cifs_sb_info *cifs_sb)
{
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
639
		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
640 641 642
			return true;
	}
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
643
		if (in_group_p(cifs_sb->ctx->backupgid))
644 645 646 647 648
			return true;
	}

	return false;
}
649 650 651 652

void
cifs_del_pending_open(struct cifs_pending_open *open)
{
653
	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
654
	list_del(&open->olist);
655
	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
}

void
cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
			     struct cifs_pending_open *open)
{
	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
	open->oplock = CIFS_OPLOCK_NO_CHANGE;
	open->tlink = tlink;
	fid->pending_open = open;
	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
}

void
cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
		      struct cifs_pending_open *open)
{
673
	spin_lock(&tlink_tcon(tlink)->open_file_lock);
674
	cifs_add_pending_open_locked(fid, tlink, open);
675
	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
676
}
677

678 679
/*
 * Critical section which runs after acquiring deferred_lock.
680 681
 * As there is no reference count on cifs_deferred_close, pdclose
 * should not be used outside deferred_lock.
682
 */
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
bool
cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
{
	struct cifs_deferred_close *dclose;

	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
		if ((dclose->netfid == cfile->fid.netfid) &&
			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
			*pdclose = dclose;
			return true;
		}
	}
	return false;
}

699 700 701
/*
 * Critical section which runs after acquiring deferred_lock.
 */
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
void
cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
{
	bool is_deferred = false;
	struct cifs_deferred_close *pdclose;

	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
	if (is_deferred) {
		kfree(dclose);
		return;
	}

	dclose->tlink = cfile->tlink;
	dclose->netfid = cfile->fid.netfid;
	dclose->persistent_fid = cfile->fid.persistent_fid;
	dclose->volatile_fid = cfile->fid.volatile_fid;
	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
}

721 722 723
/*
 * Critical section which runs after acquiring deferred_lock.
 */
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
void
cifs_del_deferred_close(struct cifsFileInfo *cfile)
{
	bool is_deferred = false;
	struct cifs_deferred_close *dclose;

	is_deferred = cifs_is_deferred_close(cfile, &dclose);
	if (!is_deferred)
		return;
	list_del(&dclose->dlist);
	kfree(dclose);
}

void
cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
{
	struct cifsFileInfo *cfile = NULL;
741 742
	struct file_list *tmp_list, *tmp_next_list;
	struct list_head file_head;
743 744 745

	if (cifs_inode == NULL)
		return;
746

747 748
	INIT_LIST_HEAD(&file_head);
	spin_lock(&cifs_inode->open_file_lock);
749
	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
750
		if (delayed_work_pending(&cfile->deferred)) {
751
			if (cancel_delayed_work(&cfile->deferred)) {
752 753
				cifs_del_deferred_close(cfile);

754 755
				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
				if (tmp_list == NULL)
756
					break;
757 758 759
				tmp_list->cfile = cfile;
				list_add_tail(&tmp_list->list, &file_head);
			}
760
		}
761
	}
762 763 764 765 766 767 768
	spin_unlock(&cifs_inode->open_file_lock);

	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
		_cifsFileInfo_put(tmp_list->cfile, true, false);
		list_del(&tmp_list->list);
		kfree(tmp_list);
	}
769 770
}

771 772 773 774
void
cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
	struct cifsFileInfo *cfile;
775 776
	struct file_list *tmp_list, *tmp_next_list;
	struct list_head file_head;
777

778
	INIT_LIST_HEAD(&file_head);
779
	spin_lock(&tcon->open_file_lock);
780
	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
781
		if (delayed_work_pending(&cfile->deferred)) {
782
			if (cancel_delayed_work(&cfile->deferred)) {
783 784
				cifs_del_deferred_close(cfile);

785 786
				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
				if (tmp_list == NULL)
787
					break;
788 789 790
				tmp_list->cfile = cfile;
				list_add_tail(&tmp_list->list, &file_head);
			}
791
		}
792 793
	}
	spin_unlock(&tcon->open_file_lock);
794 795 796 797 798 799

	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
		_cifsFileInfo_put(tmp_list->cfile, true, false);
		list_del(&tmp_list->list);
		kfree(tmp_list);
	}
800
}
801 802 803 804 805 806 807 808 809 810 811 812
void
cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
{
	struct cifsFileInfo *cfile;
	struct file_list *tmp_list, *tmp_next_list;
	struct list_head file_head;
	void *page;
	const char *full_path;

	INIT_LIST_HEAD(&file_head);
	page = alloc_dentry_path();
	spin_lock(&tcon->open_file_lock);
813
	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
814 815 816 817
		full_path = build_path_from_dentry(cfile->dentry, page);
		if (strstr(full_path, path)) {
			if (delayed_work_pending(&cfile->deferred)) {
				if (cancel_delayed_work(&cfile->deferred)) {
818 819
					cifs_del_deferred_close(cfile);

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
					if (tmp_list == NULL)
						break;
					tmp_list->cfile = cfile;
					list_add_tail(&tmp_list->list, &file_head);
				}
			}
		}
	}
	spin_unlock(&tcon->open_file_lock);

	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
		_cifsFileInfo_put(tmp_list->cfile, true, false);
		list_del(&tmp_list->list);
		kfree(tmp_list);
	}
	free_dentry_path(page);
}
838

839
/* parses DFS referral V3 structure
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
 * caller is responsible for freeing target_nodes
 * returns:
 * - on success - 0
 * - on failure - errno
 */
int
parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
		    unsigned int *num_of_nodes,
		    struct dfs_info3_param **target_nodes,
		    const struct nls_table *nls_codepage, int remap,
		    const char *searchName, bool is_unicode)
{
	int i, rc = 0;
	char *data_end;
	struct dfs_referral_level_3 *ref;

	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);

	if (*num_of_nodes < 1) {
		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
			 *num_of_nodes);
		rc = -EINVAL;
		goto parse_DFS_referrals_exit;
	}

	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
	if (ref->VersionNumber != cpu_to_le16(3)) {
		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
			 le16_to_cpu(ref->VersionNumber));
		rc = -EINVAL;
		goto parse_DFS_referrals_exit;
	}

	/* get the upper boundary of the resp buffer */
	data_end = (char *)rsp + rsp_size;

	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));

	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
				GFP_KERNEL);
	if (*target_nodes == NULL) {
		rc = -ENOMEM;
		goto parse_DFS_referrals_exit;
	}

	/* collect necessary data from referrals */
	for (i = 0; i < *num_of_nodes; i++) {
		char *temp;
		int max_len;
		struct dfs_info3_param *node = (*target_nodes)+i;

		node->flags = le32_to_cpu(rsp->DFSFlags);
		if (is_unicode) {
			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
						GFP_KERNEL);
			if (tmp == NULL) {
				rc = -ENOMEM;
				goto parse_DFS_referrals_exit;
			}
			cifsConvertToUTF16((__le16 *) tmp, searchName,
					   PATH_MAX, nls_codepage, remap);
			node->path_consumed = cifs_utf16_bytes(tmp,
					le16_to_cpu(rsp->PathConsumed),
					nls_codepage);
			kfree(tmp);
		} else
			node->path_consumed = le16_to_cpu(rsp->PathConsumed);

		node->server_type = le16_to_cpu(ref->ServerType);
		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);

		/* copy DfsPath */
		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
		max_len = data_end - temp;
		node->path_name = cifs_strndup_from_utf16(temp, max_len,
						is_unicode, nls_codepage);
		if (!node->path_name) {
			rc = -ENOMEM;
			goto parse_DFS_referrals_exit;
		}

		/* copy link target UNC */
		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
		max_len = data_end - temp;
		node->node_name = cifs_strndup_from_utf16(temp, max_len,
						is_unicode, nls_codepage);
		if (!node->node_name) {
			rc = -ENOMEM;
			goto parse_DFS_referrals_exit;
		}

932 933
		node->ttl = le32_to_cpu(ref->TimeToLive);

934 935 936 937 938 939 940 941 942 943 944
		ref++;
	}

parse_DFS_referrals_exit:
	if (rc) {
		free_dfs_info_array(*target_nodes, *num_of_nodes);
		*target_nodes = NULL;
		*num_of_nodes = 0;
	}
	return rc;
}
945 946 947 948 949 950

struct cifs_aio_ctx *
cifs_aio_ctx_alloc(void)
{
	struct cifs_aio_ctx *ctx;

951 952 953 954 955
	/*
	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
	 * to false so that we know when we have to unreference pages within
	 * cifs_aio_ctx_release()
	 */
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;

	INIT_LIST_HEAD(&ctx->list);
	mutex_init(&ctx->aio_mutex);
	init_completion(&ctx->done);
	kref_init(&ctx->refcount);
	return ctx;
}

void
cifs_aio_ctx_release(struct kref *refcount)
{
	struct cifs_aio_ctx *ctx = container_of(refcount,
					struct cifs_aio_ctx, refcount);

	cifsFileInfo_put(ctx->cfile);
974 975 976

	/*
	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
977 978 979
	 * which means that iov_iter_extract_pages() was a success and thus
	 * that we may have references or pins on pages that we need to
	 * release.
980 981
	 */
	if (ctx->bv) {
982 983
		if (ctx->should_dirty || ctx->bv_need_unpin) {
			unsigned int i;
984

985 986 987 988 989 990 991 992
			for (i = 0; i < ctx->nr_pinned_pages; i++) {
				struct page *page = ctx->bv[i].bv_page;

				if (ctx->should_dirty)
					set_page_dirty(page);
				if (ctx->bv_need_unpin)
					unpin_user_page(page);
			}
993 994 995 996
		}
		kvfree(ctx->bv);
	}

997 998 999
	kfree(ctx);
}

1000 1001
/**
 * cifs_alloc_hash - allocate hash and hash context together
1002
 * @name: The name of the crypto hash algo
1003
 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1004 1005
 *
 * The caller has to make sure @sdesc is initialized to either NULL or
1006
 * a valid context. It can be freed via cifs_free_hash().
1007 1008
 */
int
1009
cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1010 1011
{
	int rc = 0;
1012
	struct crypto_shash *alg = NULL;
1013

1014
	if (*sdesc)
1015 1016
		return 0;

1017 1018 1019 1020
	alg = crypto_alloc_shash(name, 0, 0);
	if (IS_ERR(alg)) {
		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
		rc = PTR_ERR(alg);
1021 1022 1023 1024
		*sdesc = NULL;
		return rc;
	}

1025
	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1026
	if (*sdesc == NULL) {
1027 1028
		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
		crypto_free_shash(alg);
1029 1030 1031
		return -ENOMEM;
	}

1032
	(*sdesc)->tfm = alg;
1033 1034 1035 1036 1037
	return 0;
}

/**
 * cifs_free_hash - free hash and hash context together
1038
 * @sdesc: Where to find the pointer to the hash TFM
1039
 *
1040
 * Freeing a NULL descriptor is safe.
1041 1042
 */
void
1043
cifs_free_hash(struct shash_desc **sdesc)
1044
{
1045 1046 1047 1048 1049 1050 1051 1052
	if (unlikely(!sdesc) || !*sdesc)
		return;

	if ((*sdesc)->tfm) {
		crypto_free_shash((*sdesc)->tfm);
		(*sdesc)->tfm = NULL;
	}

1053
	kfree_sensitive(*sdesc);
1054 1055
	*sdesc = NULL;
}
1056

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
void extract_unc_hostname(const char *unc, const char **h, size_t *len)
{
	const char *end;

	/* skip initial slashes */
	while (*unc && (*unc == '\\' || *unc == '/'))
		unc++;

	end = unc;

	while (*end && !(*end == '\\' || *end == '/'))
		end++;

	*h = unc;
	*len = end - unc;
}
1073 1074 1075

/**
 * copy_path_name - copy src path to dst, possibly truncating
1076 1077
 * @dst: The destination buffer
 * @src: The source name
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
 *
 * returns number of bytes written (including trailing nul)
 */
int copy_path_name(char *dst, const char *src)
{
	int name_len;

	/*
	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
	 * will truncate and strlen(dst) will be PATH_MAX-1
	 */
	name_len = strscpy(dst, src, PATH_MAX);
	if (WARN_ON_ONCE(name_len < 0))
		name_len = PATH_MAX-1;

	/* we count the trailing nul */
	name_len++;
	return name_len;
}
1097 1098

struct super_cb_data {
1099
	void *data;
1100 1101 1102
	struct super_block *sb;
};

1103
static void tcp_super_cb(struct super_block *sb, void *arg)
1104
{
1105 1106
	struct super_cb_data *sd = arg;
	struct TCP_Server_Info *server = sd->data;
1107 1108 1109
	struct cifs_sb_info *cifs_sb;
	struct cifs_tcon *tcon;

1110
	if (sd->sb)
1111 1112 1113 1114
		return;

	cifs_sb = CIFS_SB(sb);
	tcon = cifs_sb_master_tcon(cifs_sb);
1115 1116
	if (tcon->ses->server == server)
		sd->sb = sb;
1117 1118
}

1119 1120
static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
					    void *data)
1121
{
1122 1123
	struct super_cb_data sd = {
		.data = data,
1124 1125
		.sb = NULL,
	};
1126 1127 1128
	struct file_system_type **fs_type = (struct file_system_type *[]) {
		&cifs_fs_type, &smb3_fs_type, NULL,
	};
1129

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	for (; *fs_type; fs_type++) {
		iterate_supers_type(*fs_type, f, &sd);
		if (sd.sb) {
			/*
			 * Grab an active reference in order to prevent automounts (DFS links)
			 * of expiring and then freeing up our cifs superblock pointer while
			 * we're doing failover.
			 */
			cifs_sb_active(sd.sb);
			return sd.sb;
		}
	}
	return ERR_PTR(-EINVAL);
1143 1144
}

1145
static void __cifs_put_super(struct super_block *sb)
1146 1147 1148 1149 1150
{
	if (!IS_ERR_OR_NULL(sb))
		cifs_sb_deactive(sb);
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
{
	return __cifs_get_super(tcp_super_cb, server);
}

void cifs_put_tcp_super(struct super_block *sb)
{
	__cifs_put_super(sb);
}

#ifdef CONFIG_CIFS_DFS_UPCALL
1162 1163 1164 1165 1166
int match_target_ip(struct TCP_Server_Info *server,
		    const char *share, size_t share_len,
		    bool *result)
{
	int rc;
1167 1168
	char *target;
	struct sockaddr_storage ss;
1169 1170 1171 1172

	*result = false;

	target = kzalloc(share_len + 3, GFP_KERNEL);
1173 1174
	if (!target)
		return -ENOMEM;
1175 1176 1177 1178 1179

	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);

	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);

1180 1181
	rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
	kfree(target);
1182

1183 1184
	if (rc < 0)
		return rc;
1185

1186
	spin_lock(&server->srv_lock);
1187
	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1188
	spin_unlock(&server->srv_lock);
1189
	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1190
	return 0;
1191 1192
}

1193
int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1194
{
1195 1196
	kfree(cifs_sb->prepath);

1197
	if (prefix && *prefix) {
A
Al Viro 已提交
1198
		cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
1199 1200
		if (!cifs_sb->prepath)
			return -ENOMEM;
1201 1202 1203 1204 1205 1206

		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
	} else
		cifs_sb->prepath = NULL;

	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1207
	return 0;
1208
}
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274

/*
 * Handle weird Windows SMB server behaviour. It responds with
 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
 * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
 * non-ASCII unicode symbols.
 */
int cifs_inval_name_dfs_link_error(const unsigned int xid,
				   struct cifs_tcon *tcon,
				   struct cifs_sb_info *cifs_sb,
				   const char *full_path,
				   bool *islink)
{
	struct cifs_ses *ses = tcon->ses;
	size_t len;
	char *path;
	char *ref_path;

	*islink = false;

	/*
	 * Fast path - skip check when @full_path doesn't have a prefix path to
	 * look up or tcon is not DFS.
	 */
	if (strlen(full_path) < 2 || !cifs_sb ||
	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
		return 0;

	/*
	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
	 * to get a referral to figure out whether it is an DFS link.
	 */
	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
	path = kmalloc(len, GFP_KERNEL);
	if (!path)
		return -ENOMEM;

	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
					    cifs_remap(cifs_sb));
	kfree(path);

	if (IS_ERR(ref_path)) {
		if (PTR_ERR(ref_path) != -EINVAL)
			return PTR_ERR(ref_path);
	} else {
		struct dfs_info3_param *refs = NULL;
		int num_refs = 0;

		/*
		 * XXX: we are not using dfs_cache_find() here because we might
		 * end filling all the DFS cache and thus potentially
		 * removing cached DFS targets that the client would eventually
		 * need during failover.
		 */
		if (ses->server->ops->get_dfs_refer &&
		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
						     &num_refs, cifs_sb->local_nls,
						     cifs_remap(cifs_sb)))
			*islink = refs[0].server_type == DFS_TYPE_LINK;
		free_dfs_info_array(refs, num_refs);
		kfree(ref_path);
	}
	return 0;
}
1275
#endif
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
{
	int timeout = 10;
	int rc;

	spin_lock(&server->srv_lock);
	if (server->tcpStatus != CifsNeedReconnect) {
		spin_unlock(&server->srv_lock);
		return 0;
	}
	timeout *= server->nr_targets;
	spin_unlock(&server->srv_lock);

	/*
	 * Give demultiplex thread up to 10 seconds to each target available for
	 * reconnect -- should be greater than cifs socket timeout which is 7
	 * seconds.
	 *
	 * On "soft" mounts we wait once. Hard mounts keep retrying until
	 * process is killed or server comes back on-line.
	 */
	do {
		rc = wait_event_interruptible_timeout(server->response_q,
						      (server->tcpStatus != CifsNeedReconnect),
						      timeout * HZ);
		if (rc < 0) {
			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
				 __func__);
			return -ERESTARTSYS;
		}

		/* are we still trying to reconnect? */
		spin_lock(&server->srv_lock);
		if (server->tcpStatus != CifsNeedReconnect) {
			spin_unlock(&server->srv_lock);
			return 0;
		}
		spin_unlock(&server->srv_lock);
	} while (retry);

	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
	return -EHOSTDOWN;
}