* Multichannel patch series reconnect bug
@ 2022-01-14 21:19 Steve French
0 siblings, 0 replies; only message in thread
From: Steve French @ 2022-01-14 21:19 UTC (permalink / raw)
To: Shyam Prasad N; +Cc: Paulo Alcantara, Enzo Matsumiya, CIFS
[-- Attachment #1: Type: text/plain, Size: 2838 bytes --]
I have narrowed down the multichannel patch series bug which causes a
few of the DFS regression tests to fail. Looks like it is in this
part of the patch
cifs-check-reconnects-for-channels-of-active-tcons-t.patch
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f88d2b10045a..4c2048a8e464 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -119,6 +119,7 @@ enum statusEnum {
CifsInSessSetup,
CifsNeedTcon,
CifsInTcon,
+ CifsNeedFilesInvalidate,
CifsInFilesInvalidate
};
@@ -925,6 +926,7 @@ struct cifs_chan {
*/
struct cifs_ses {
struct list_head smb_ses_list;
+ struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct cifs_tcon *tcon_ipc;
struct mutex session_mutex;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1dafaf7c4e5e..128c71b48002 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -335,6 +335,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
mutex_unlock(&server->srv_mutex);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
@@ -4399,9 +4400,22 @@ int cifs_tree_connect(const unsigned int xid,
struct cifs_tcon *tcon, const stru
char *tree;
struct dfs_info3_param ref = {0};
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+ (tcon->tidStatus != CifsNew &&
+ tcon->tidStatus != CifsNeedTcon)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ tcon->tidStatus = CifsInTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
- if (!tree)
- return -ENOMEM;
+ if (!tree) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
@@ -4433,11 +4447,18 @@ int cifs_tree_connect(const unsigned int xid,
struct cifs_tcon *tcon, const stru
kfree(tree);
cifs_put_tcp_super(sb);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
return rc;
}
#else
int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nlsc)
{
+ int rc;
const struct smb_version_operations *ops = tcon->ses->server->ops;
/* only send once per connect */
@@ -4451,6 +4472,13 @@ int cifs_tree_connect(const unsigned int xid,
struct cifs_tcon *tcon, const stru
tcon->tidStatus = CifsInTcon;
spin_unlock(&cifs_tcp_ses_lock);
- return ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
+ return rc;
}
#endif
--
Thanks,
Steve
[-- Attachment #2: check-recon-small.patch --]
[-- Type: text/x-patch, Size: 2631 bytes --]
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f88d2b10045a..4c2048a8e464 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -119,6 +119,7 @@ enum statusEnum {
CifsInSessSetup,
CifsNeedTcon,
CifsInTcon,
+ CifsNeedFilesInvalidate,
CifsInFilesInvalidate
};
@@ -925,6 +926,7 @@ struct cifs_chan {
*/
struct cifs_ses {
struct list_head smb_ses_list;
+ struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct cifs_tcon *tcon_ipc;
struct mutex session_mutex;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1dafaf7c4e5e..128c71b48002 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -335,6 +335,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
mutex_unlock(&server->srv_mutex);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
@@ -4399,9 +4400,22 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
char *tree;
struct dfs_info3_param ref = {0};
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+ (tcon->tidStatus != CifsNew &&
+ tcon->tidStatus != CifsNeedTcon)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ tcon->tidStatus = CifsInTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
- if (!tree)
- return -ENOMEM;
+ if (!tree) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
@@ -4433,11 +4447,18 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
kfree(tree);
cifs_put_tcp_super(sb);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
return rc;
}
#else
int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
{
+ int rc;
const struct smb_version_operations *ops = tcon->ses->server->ops;
/* only send once per connect */
@@ -4451,6 +4472,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
tcon->tidStatus = CifsInTcon;
spin_unlock(&cifs_tcp_ses_lock);
- return ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+
+ return rc;
}
#endif
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2022-01-14 21:19 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-14 21:19 Multichannel patch series reconnect bug Steve French
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).