提交 1c3ddfe5 编写于 作者: L Linus Torvalds

Merge git://git.samba.org/sfrench/cifs-2.6

Pull CIFS fixes from Steve French

* git://git.samba.org/sfrench/cifs-2.6:
  cifs: clean up ordering in exit_cifs
  cifs: clean up call to cifs_dfs_release_automount_timer()
  CIFS: Delete echo_retries module parm
  CIFS: Prepare credits code for a slot reservation
  CIFS: Make wait_for_free_request killable
  CIFS: Introduce credit-based flow control
  CIFS: Simplify inFlight logic
  cifs: fix issue mounting of DFS ROOT when redirecting from one domain controller to the next
  CIFS: Respect negotiated MaxMpxCount
  CIFS: Fix a spurious error in cifs_push_posix_locks
......@@ -753,10 +753,6 @@ module loading or during the runtime by using the interface
i.e. echo "value" > /sys/module/cifs/parameters/<param>
1. echo_retries - The number of echo attempts before giving up and
reconnecting to the server. The default is 5. The value 0
means never reconnect.
2. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default.
1. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default.
[Y/y/1]. To disable use any of [N/n/0].
......@@ -171,8 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "TCP status: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
server->tcpStatus, server->srv_count,
server->sec_mode,
atomic_read(&server->inFlight));
server->sec_mode, in_flight(server));
#ifdef CONFIG_CIFS_STATS2
seq_printf(m, " In Send: %d In MaxReq Wait: %d",
......
......@@ -76,12 +76,7 @@ MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
unsigned int cifs_max_pending = CIFS_MAX_REQ;
module_param(cifs_max_pending, int, 0444);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
"Default: 50 Range: 2 to 256");
unsigned short echo_retries = 5;
module_param(echo_retries, ushort, 0644);
MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
"reconnecting server. Default: 5. 0 means "
"never reconnect.");
"Default: 32767 Range: 2 to 32767.");
module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:"
"y/Y/1");
......@@ -1111,9 +1106,9 @@ init_cifs(void)
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
cFYI(1, "cifs_max_pending set to min of 2");
} else if (cifs_max_pending > 256) {
cifs_max_pending = 256;
cFYI(1, "cifs_max_pending set to max of 256");
} else if (cifs_max_pending > CIFS_MAX_REQ) {
cifs_max_pending = CIFS_MAX_REQ;
cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
}
rc = cifs_fscache_register();
......@@ -1175,11 +1170,8 @@ static void __exit
exit_cifs(void)
{
cFYI(DBG2, "exit_cifs");
cifs_proc_clean();
cifs_fscache_unregister();
#ifdef CONFIG_CIFS_DFS_UPCALL
unregister_filesystem(&cifs_fs_type);
cifs_dfs_release_automount_timer();
#endif
#ifdef CONFIG_CIFS_ACL
cifs_destroy_idmaptrees();
exit_cifs_idmap();
......@@ -1187,10 +1179,11 @@ exit_cifs(void)
#ifdef CONFIG_CIFS_UPCALL
unregister_key_type(&cifs_spnego_key_type);
#endif
unregister_filesystem(&cifs_fs_type);
cifs_destroy_inodecache();
cifs_destroy_mids();
cifs_destroy_request_bufs();
cifs_destroy_mids();
cifs_destroy_inodecache();
cifs_fscache_unregister();
cifs_proc_clean();
}
MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
......
......@@ -55,14 +55,9 @@
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurrently. It also matches the most common
* value of max multiplex returned by servers. We may
* eventually want to use the negotiated value (in case
* future servers can handle more) when we are more confident that
* we will not have problems oveloading the socket with pending
* write data.
* on one socket concurrently.
*/
#define CIFS_MAX_REQ 50
#define CIFS_MAX_REQ 32767
#define RFC1001_NAME_LEN 15
#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
......@@ -255,7 +250,9 @@ struct TCP_Server_Info {
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
int credits; /* send no more requests at once */
unsigned int in_flight; /* number of requests on the wire to server */
spinlock_t req_lock; /* protect the two values above */
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
......@@ -263,6 +260,7 @@ struct TCP_Server_Info {
bool session_estab; /* mark when very first sess is established */
u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
bool oplocks:1; /* enable oplocks */
unsigned int maxReq; /* Clients should submit no more */
/* than maxReq distinct unanswered SMBs to the server when using */
/* multiplexed reads or writes */
......@@ -307,6 +305,36 @@ struct TCP_Server_Info {
#endif
};
static inline unsigned int
in_flight(struct TCP_Server_Info *server)
{
unsigned int num;
spin_lock(&server->req_lock);
num = server->in_flight;
spin_unlock(&server->req_lock);
return num;
}
static inline int*
get_credits_field(struct TCP_Server_Info *server)
{
/*
* This will change to switch statement when we reserve slots for echos
* and oplock breaks.
*/
return &server->credits;
}
static inline bool
has_credits(struct TCP_Server_Info *server, int *credits)
{
int num;
spin_lock(&server->req_lock);
num = *credits;
spin_unlock(&server->req_lock);
return num > 0;
}
/*
* Macros to allow the TCP_Server_Info->net field and related code to drop out
* when CONFIG_NET_NS isn't set.
......@@ -1010,9 +1038,6 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
/* reconnect after this many failed echo attempts */
GLOBAL_EXTERN unsigned short echo_retries;
#ifdef CONFIG_CIFS_ACL
GLOBAL_EXTERN struct rb_root uidtree;
GLOBAL_EXTERN struct rb_root gidtree;
......
......@@ -88,6 +88,9 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
extern void cifs_add_credits(struct TCP_Server_Info *server,
const unsigned int add);
extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
extern bool is_valid_oplock_break(struct smb_hdr *smb,
struct TCP_Server_Info *);
......@@ -168,7 +171,13 @@ extern struct smb_vol *cifs_get_volume_info(char *mount_data,
const char *devname);
extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
extern void cifs_umount(struct cifs_sb_info *);
#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
extern void cifs_dfs_release_automount_timer(void);
#else /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
#define cifs_dfs_release_automount_timer() do { } while (0)
#endif /* ! IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) */
void cifs_proc_init(void);
void cifs_proc_clean(void);
......
......@@ -458,7 +458,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
goto neg_err_exit;
}
server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxReq = min_t(unsigned int,
le16_to_cpu(rsp->MaxMpxCount),
cifs_max_pending);
cifs_set_credits(server, server->maxReq);
server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
/* even though we do not use raw we might as well set this
......@@ -564,7 +567,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
/* one byte, so no need to convert this or EncryptionKeyLen from
little endian */
server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
cifs_max_pending);
cifs_set_credits(server, server->maxReq);
/* probably no need to store and check maxvcs */
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
......@@ -716,8 +721,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = mid->callback_data;
DeleteMidQEntry(mid);
atomic_dec(&server->inFlight);
wake_up(&server->request_q);
cifs_add_credits(server, 1);
}
int
......@@ -1669,8 +1673,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
queue_work(system_nrt_wq, &rdata->work);
DeleteMidQEntry(mid);
atomic_dec(&server->inFlight);
wake_up(&server->request_q);
cifs_add_credits(server, 1);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
......@@ -2110,8 +2113,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
queue_work(system_nrt_wq, &wdata->work);
DeleteMidQEntry(mid);
atomic_dec(&tcon->ses->server->inFlight);
wake_up(&tcon->ses->server->request_q);
cifs_add_credits(tcon->ses->server, 1);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
......
......@@ -373,12 +373,22 @@ allocate_buffers(struct TCP_Server_Info *server)
static bool
server_unresponsive(struct TCP_Server_Info *server)
{
if (echo_retries > 0 && server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
/*
* We need to wait 2 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
* 2s client gets a response
* 30s echo workqueue job pops, and decides we got a response recently
* and don't need to send another
* ...
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
if (server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) {
cERROR(1, "Server %s has not responded in %d seconds. "
"Reconnecting...", server->hostname,
(echo_retries * SMB_ECHO_INTERVAL / HZ));
(2 * SMB_ECHO_INTERVAL) / HZ);
cifs_reconnect(server);
wake_up(&server->response_q);
return true;
......@@ -642,19 +652,11 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
spin_unlock(&GlobalMid_Lock);
wake_up_all(&server->response_q);
/*
* Check if we have blocked requests that need to free. Note that
* cifs_max_pending is normally 50, but can be set at module install
* time to as little as two.
*/
spin_lock(&GlobalMid_Lock);
if (atomic_read(&server->inFlight) >= cifs_max_pending)
atomic_set(&server->inFlight, cifs_max_pending - 1);
/*
* We do not want to set the max_pending too low or we could end up
* with the counter going negative.
*/
spin_unlock(&GlobalMid_Lock);
/* check if we have blocked requests that need to free */
spin_lock(&server->req_lock);
if (server->credits <= 0)
server->credits = 1;
spin_unlock(&server->req_lock);
/*
* Although there should not be any requests blocked on this queue it
* can not hurt to be paranoid and try to wake up requests that may
......@@ -1909,7 +1911,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
tcp_ses->in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
......@@ -3371,7 +3374,7 @@ cifs_ra_pages(struct cifs_sb_info *cifs_sb)
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
int rc = 0;
int rc;
int xid;
struct cifs_ses *pSesInfo;
struct cifs_tcon *tcon;
......@@ -3398,6 +3401,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
FreeXid(xid);
}
#endif
rc = 0;
tcon = NULL;
pSesInfo = NULL;
srvTcp = NULL;
......@@ -3759,9 +3763,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
if (server->maxBuf != 0)
return 0;
cifs_set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN) {
/* retry only once on 1st time connection */
cifs_set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
......
......@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
}
tcon = tlink_tcon(tlink);
if (enable_oplocks)
if (tcon->ses->server->oplocks)
oplock = REQ_OPLOCK;
if (nd)
......@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
__u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
__u32 oplock;
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
......@@ -518,6 +518,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
pTcon = tlink_tcon(tlink);
oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0;
/*
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
......
......@@ -380,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file)
cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
inode, file->f_flags, full_path);
if (enable_oplocks)
if (tcon->ses->server->oplocks)
oplock = REQ_OPLOCK;
else
oplock = 0;
......@@ -505,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
cFYI(1, "inode = 0x%p file flags 0x%x for %s",
inode, pCifsFile->f_flags, full_path);
if (enable_oplocks)
if (tcon->ses->server->oplocks)
oplock = REQ_OPLOCK;
else
oplock = 0;
......@@ -960,9 +960,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
INIT_LIST_HEAD(&locks_to_send);
/*
* Allocating count locks is enough because no locks can be added to
* the list while we are holding cinode->lock_mutex that protects
* locking operations of this inode.
* Allocating count locks is enough because no FL_POSIX locks can be
* added to the list while we are holding cinode->lock_mutex that
* protects locking operations of this inode.
*/
for (; i < count; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
......@@ -973,18 +973,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
list_add_tail(&lck->llist, &locks_to_send);
}
i = 0;
el = locks_to_send.next;
lock_flocks();
cifs_for_each_lock(cfile->dentry->d_inode, before) {
flock = *before;
if ((flock->fl_flags & FL_POSIX) == 0)
continue;
if (el == &locks_to_send) {
/* something is really wrong */
/*
* The list ended. We don't have enough allocated
* structures - something is really wrong.
*/
cERROR(1, "Can't push all brlocks!");
break;
}
flock = *before;
if ((flock->fl_flags & FL_POSIX) == 0)
continue;
length = 1 + flock->fl_end - flock->fl_start;
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
......@@ -996,7 +998,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
lck->length = length;
lck->type = type;
lck->offset = flock->fl_start;
i++;
el = el->next;
}
unlock_flocks();
......
......@@ -690,3 +690,22 @@ backup_cred(struct cifs_sb_info *cifs_sb)
return false;
}
void
cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
{
spin_lock(&server->req_lock);
server->credits += add;
server->in_flight--;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
}
void
cifs_set_credits(struct TCP_Server_Info *server, const int val)
{
spin_lock(&server->req_lock);
server->credits = val;
server->oplocks = val > 1 ? enable_oplocks : false;
spin_unlock(&server->req_lock);
}
......@@ -254,44 +254,60 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
return smb_sendv(server, &iov, 1);
}
static int wait_for_free_request(struct TCP_Server_Info *server,
const int long_op)
static int
wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
int *credits)
{
if (long_op == CIFS_ASYNC_OP) {
int rc;
spin_lock(&server->req_lock);
if (optype == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
atomic_inc(&server->inFlight);
server->in_flight++;
*credits -= 1;
spin_unlock(&server->req_lock);
return 0;
}
spin_lock(&GlobalMid_Lock);
while (1) {
if (atomic_read(&server->inFlight) >= cifs_max_pending) {
spin_unlock(&GlobalMid_Lock);
if (*credits <= 0) {
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
wait_event(server->request_q,
atomic_read(&server->inFlight)
< cifs_max_pending);
rc = wait_event_killable(server->request_q,
has_credits(server, credits));
cifs_num_waiters_dec(server);
spin_lock(&GlobalMid_Lock);
if (rc)
return rc;
spin_lock(&server->req_lock);
} else {
if (server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
spin_unlock(&server->req_lock);
return -ENOENT;
}
/* can not count locking commands against total
as they are allowed to block on server */
/*
* Can not count locking commands against total
* as they are allowed to block on server.
*/
/* update # of requests on the wire to server */
if (long_op != CIFS_BLOCKING_OP)
atomic_inc(&server->inFlight);
spin_unlock(&GlobalMid_Lock);
if (optype != CIFS_BLOCKING_OP) {
*credits -= 1;
server->in_flight++;
}
spin_unlock(&server->req_lock);
break;
}
}
return 0;
}
static int
wait_for_free_request(struct TCP_Server_Info *server, const int optype)
{
return wait_for_free_credits(server, optype, get_credits_field(server));
}
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
......@@ -359,7 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
mid = AllocMidQEntry(hdr, server);
if (mid == NULL) {
mutex_unlock(&server->srv_mutex);
atomic_dec(&server->inFlight);
cifs_add_credits(server, 1);
wake_up(&server->request_q);
return -ENOMEM;
}
......@@ -392,7 +408,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
return rc;
out_err:
delete_mid(mid);
atomic_dec(&server->inFlight);
cifs_add_credits(server, 1);
wake_up(&server->request_q);
return rc;
}
......@@ -564,8 +580,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(in_buf);
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
......@@ -601,8 +616,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(in_buf);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
......@@ -612,8 +626,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
......@@ -637,8 +650,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
midQ->resp_buf = NULL;
out:
delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
......@@ -688,8 +700,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
......@@ -721,8 +732,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
......@@ -730,8 +740,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
......@@ -747,8 +756,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
cifs_add_credits(ses->server, 1);
return rc;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册