Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6: cifs: mangle existing header for SMB_COM_NT_CANCEL cifs: remove code for setting timeouts on requests [CIFS] cifs: reconnect unresponsive servers cifs: set up recurring workqueue job to do SMB echo requests cifs: add ability to send an echo request cifs: add cifs_call_async cifs: allow for different handling of received response cifs: clean up sync_mid_result cifs: don't reconnect server when we don't get a response cifs: wait indefinitely for responses cifs: Use mask of ACEs for SID Everyone to calculate all three permissions user, group, and other cifs: Fix regression during share-level security mounts (Repost) [CIFS] Update cifs version number cifs: move mid result processing into common function cifs: move locked sections out of DeleteMidQEntry and AllocMidQEntry cifs: clean up accesses to midCount cifs: make wait_for_free_request take a TCP_Server_Info pointer cifs: no need to mark smb_ses_list as cifs_demultiplex_thread is exiting cifs: don't fail writepages on -EAGAIN errors CIFS: Fix oplock break handling (try #2)
This commit is contained in:
commit
5cdec1fca2
|
@ -79,11 +79,11 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
|
|||
spin_lock(&GlobalMid_Lock);
|
||||
list_for_each(tmp, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||
cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
|
||||
cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
|
||||
mid_entry->midState,
|
||||
(int)mid_entry->command,
|
||||
mid_entry->pid,
|
||||
mid_entry->tsk,
|
||||
mid_entry->callback_data,
|
||||
mid_entry->mid);
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
|
||||
|
@ -218,11 +218,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
|||
mid_entry = list_entry(tmp3, struct mid_q_entry,
|
||||
qhead);
|
||||
seq_printf(m, "\tState: %d com: %d pid:"
|
||||
" %d tsk: %p mid %d\n",
|
||||
" %d cbdata: %p mid %d\n",
|
||||
mid_entry->midState,
|
||||
(int)mid_entry->command,
|
||||
mid_entry->pid,
|
||||
mid_entry->tsk,
|
||||
mid_entry->callback_data,
|
||||
mid_entry->mid);
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
@ -331,7 +331,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
|
|||
atomic_read(&totSmBufAllocCount));
|
||||
#endif /* CONFIG_CIFS_STATS2 */
|
||||
|
||||
seq_printf(m, "Operations (MIDs): %d\n", midCount.counter);
|
||||
seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
|
||||
seq_printf(m,
|
||||
"\n%d session %d share reconnects\n",
|
||||
tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
|
||||
|
|
|
@ -41,9 +41,12 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
|
|||
;
|
||||
|
||||
|
||||
/* security id for everyone */
|
||||
/* security id for everyone/world system group */
|
||||
static const struct cifs_sid sid_everyone = {
|
||||
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
|
||||
/* security id for Authenticated Users system group */
|
||||
static const struct cifs_sid sid_authusers = {
|
||||
1, 1, {0, 0, 0, 0, 0, 5}, {11} };
|
||||
/* group users */
|
||||
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
|
||||
|
||||
|
@ -365,7 +368,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
|
|||
if (num_aces > 0) {
|
||||
umode_t user_mask = S_IRWXU;
|
||||
umode_t group_mask = S_IRWXG;
|
||||
umode_t other_mask = S_IRWXO;
|
||||
umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
|
||||
|
||||
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
|
||||
GFP_KERNEL);
|
||||
|
@ -390,6 +393,12 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
|
|||
ppace[i]->type,
|
||||
&fattr->cf_mode,
|
||||
&other_mask);
|
||||
if (compare_sids(&(ppace[i]->sid), &sid_authusers))
|
||||
access_flags_to_mode(ppace[i]->access_req,
|
||||
ppace[i]->type,
|
||||
&fattr->cf_mode,
|
||||
&other_mask);
|
||||
|
||||
|
||||
/* memcpy((void *)(&(cifscred->aces[i])),
|
||||
(void *)ppace[i],
|
||||
|
|
|
@ -77,7 +77,11 @@ unsigned int cifs_max_pending = CIFS_MAX_REQ;
|
|||
module_param(cifs_max_pending, int, 0);
|
||||
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
|
||||
"Default: 50 Range: 2 to 256");
|
||||
|
||||
unsigned short echo_retries = 5;
|
||||
module_param(echo_retries, ushort, 0644);
|
||||
MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
|
||||
"reconnecting server. Default: 5. 0 means "
|
||||
"never reconnect.");
|
||||
extern mempool_t *cifs_sm_req_poolp;
|
||||
extern mempool_t *cifs_req_poolp;
|
||||
extern mempool_t *cifs_mid_poolp;
|
||||
|
|
|
@ -118,5 +118,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
|
|||
extern const struct export_operations cifs_export_ops;
|
||||
#endif /* EXPERIMENTAL */
|
||||
|
||||
#define CIFS_VERSION "1.68"
|
||||
#define CIFS_VERSION "1.69"
|
||||
#endif /* _CIFSFS_H */
|
||||
|
|
|
@ -218,6 +218,7 @@ struct TCP_Server_Info {
|
|||
bool sec_kerberosu2u; /* supports U2U Kerberos */
|
||||
bool sec_ntlmssp; /* supports NTLMSSP */
|
||||
bool session_estab; /* mark when very first sess is established */
|
||||
struct delayed_work echo; /* echo ping workqueue job */
|
||||
#ifdef CONFIG_CIFS_FSCACHE
|
||||
struct fscache_cookie *fscache; /* client index cache cookie */
|
||||
#endif
|
||||
|
@ -508,6 +509,18 @@ static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
|
|||
|
||||
#endif
|
||||
|
||||
struct mid_q_entry;
|
||||
|
||||
/*
|
||||
* This is the prototype for the mid callback function. When creating one,
|
||||
* take special care to avoid deadlocks. Things to bear in mind:
|
||||
*
|
||||
* - it will be called by cifsd
|
||||
* - the GlobalMid_Lock will be held
|
||||
* - the mid will be removed from the pending_mid_q list
|
||||
*/
|
||||
typedef void (mid_callback_t)(struct mid_q_entry *mid);
|
||||
|
||||
/* one of these for every pending CIFS request to the server */
|
||||
struct mid_q_entry {
|
||||
struct list_head qhead; /* mids waiting on reply from this server */
|
||||
|
@ -519,7 +532,8 @@ struct mid_q_entry {
|
|||
unsigned long when_sent; /* time when smb send finished */
|
||||
unsigned long when_received; /* when demux complete (taken off wire) */
|
||||
#endif
|
||||
struct task_struct *tsk; /* task waiting for response */
|
||||
mid_callback_t *callback; /* call completion callback */
|
||||
void *callback_data; /* general purpose pointer for callback */
|
||||
struct smb_hdr *resp_buf; /* response buffer */
|
||||
int midState; /* wish this were enum but can not pass to wait_event */
|
||||
__u8 command; /* smb command code */
|
||||
|
@ -622,12 +636,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
|
|||
#define CIFS_IOVEC 4 /* array of response buffers */
|
||||
|
||||
/* Type of Request to SendReceive2 */
|
||||
#define CIFS_STD_OP 0 /* normal request timeout */
|
||||
#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */
|
||||
#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */
|
||||
#define CIFS_BLOCKING_OP 4 /* operation can block */
|
||||
#define CIFS_ASYNC_OP 8 /* do not wait for response */
|
||||
#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */
|
||||
#define CIFS_BLOCKING_OP 1 /* operation can block */
|
||||
#define CIFS_ASYNC_OP 2 /* do not wait for response */
|
||||
#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */
|
||||
#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
|
||||
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
|
||||
#define CIFS_NO_RESP 0x040 /* no response buffer required */
|
||||
|
@ -790,6 +801,9 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
|
|||
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
|
||||
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
|
||||
|
||||
/* reconnect after this many failed echo attempts */
|
||||
GLOBAL_EXTERN unsigned short echo_retries;
|
||||
|
||||
void cifs_oplock_break(struct work_struct *work);
|
||||
void cifs_oplock_break_get(struct cifsFileInfo *cfile);
|
||||
void cifs_oplock_break_put(struct cifsFileInfo *cfile);
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#define SMB_COM_SETATTR 0x09 /* trivial response */
|
||||
#define SMB_COM_LOCKING_ANDX 0x24 /* trivial response */
|
||||
#define SMB_COM_COPY 0x29 /* trivial rsp, fail filename ignrd*/
|
||||
#define SMB_COM_ECHO 0x2B /* echo request */
|
||||
#define SMB_COM_OPEN_ANDX 0x2D /* Legacy open for old servers */
|
||||
#define SMB_COM_READ_ANDX 0x2E
|
||||
#define SMB_COM_WRITE_ANDX 0x2F
|
||||
|
@ -760,6 +761,20 @@ typedef struct smb_com_tconx_rsp_ext {
|
|||
*
|
||||
*/
|
||||
|
||||
typedef struct smb_com_echo_req {
|
||||
struct smb_hdr hdr;
|
||||
__le16 EchoCount;
|
||||
__le16 ByteCount;
|
||||
char Data[1];
|
||||
} __attribute__((packed)) ECHO_REQ;
|
||||
|
||||
typedef struct smb_com_echo_rsp {
|
||||
struct smb_hdr hdr;
|
||||
__le16 SequenceNumber;
|
||||
__le16 ByteCount;
|
||||
char Data[1];
|
||||
} __attribute__((packed)) ECHO_RSP;
|
||||
|
||||
typedef struct smb_com_logoff_andx_req {
|
||||
struct smb_hdr hdr; /* wct = 2 */
|
||||
__u8 AndXCommand;
|
||||
|
|
|
@ -61,6 +61,12 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
|
|||
const char *fullpath, const struct dfs_info3_param *ref,
|
||||
char **devname);
|
||||
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
|
||||
extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
|
||||
struct TCP_Server_Info *server);
|
||||
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
|
||||
extern int cifs_call_async(struct TCP_Server_Info *server,
|
||||
struct smb_hdr *in_buf, mid_callback_t *callback,
|
||||
void *cbdata);
|
||||
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
|
||||
struct smb_hdr * /* input */ ,
|
||||
struct smb_hdr * /* out */ ,
|
||||
|
@ -347,12 +353,13 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
|
|||
const __u16 netfid, const __u64 len,
|
||||
const __u64 offset, const __u32 numUnlock,
|
||||
const __u32 numLock, const __u8 lockType,
|
||||
const bool waitFlag);
|
||||
const bool waitFlag, const __u8 oplock_level);
|
||||
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
|
||||
const __u16 smb_file_id, const int get_flag,
|
||||
const __u64 len, struct file_lock *,
|
||||
const __u16 lock_type, const bool waitFlag);
|
||||
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
|
||||
extern int CIFSSMBEcho(struct TCP_Server_Info *server);
|
||||
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
|
||||
|
||||
extern struct cifsSesInfo *sesInfoAlloc(void);
|
||||
|
|
|
@ -706,6 +706,53 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a no-op for now. We're not really interested in the reply, but
|
||||
* rather in the fact that the server sent one and that server->lstrp
|
||||
* gets updated.
|
||||
*
|
||||
* FIXME: maybe we should consider checking that the reply matches request?
|
||||
*/
|
||||
static void
|
||||
cifs_echo_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
struct TCP_Server_Info *server = mid->callback_data;
|
||||
|
||||
DeleteMidQEntry(mid);
|
||||
atomic_dec(&server->inFlight);
|
||||
wake_up(&server->request_q);
|
||||
}
|
||||
|
||||
int
|
||||
CIFSSMBEcho(struct TCP_Server_Info *server)
|
||||
{
|
||||
ECHO_REQ *smb;
|
||||
int rc = 0;
|
||||
|
||||
cFYI(1, "In echo request");
|
||||
|
||||
rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* set up echo request */
|
||||
smb->hdr.Tid = cpu_to_le16(0xffff);
|
||||
smb->hdr.WordCount = cpu_to_le16(1);
|
||||
smb->EchoCount = cpu_to_le16(1);
|
||||
smb->ByteCount = cpu_to_le16(1);
|
||||
smb->Data[0] = 'a';
|
||||
smb->hdr.smb_buf_length += 3;
|
||||
|
||||
rc = cifs_call_async(server, (struct smb_hdr *)smb,
|
||||
cifs_echo_callback, server);
|
||||
if (rc)
|
||||
cFYI(1, "Echo request failed: %d", rc);
|
||||
|
||||
cifs_small_buf_release(smb);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
|
||||
{
|
||||
|
@ -1193,7 +1240,7 @@ OldOpenRetry:
|
|||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
/* long_op set to 1 to allow for oplock break timeouts */
|
||||
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
|
||||
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
|
||||
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
|
||||
cifs_stats_inc(&tcon->num_opens);
|
||||
if (rc) {
|
||||
cFYI(1, "Error in Open = %d", rc);
|
||||
|
@ -1306,7 +1353,7 @@ openRetry:
|
|||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
/* long_op set to 1 to allow for oplock break timeouts */
|
||||
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
|
||||
(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
|
||||
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
|
||||
cifs_stats_inc(&tcon->num_opens);
|
||||
if (rc) {
|
||||
cFYI(1, "Error in Open = %d", rc);
|
||||
|
@ -1388,7 +1435,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
|
|||
iov[0].iov_base = (char *)pSMB;
|
||||
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
|
||||
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
|
||||
&resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
|
||||
&resp_buf_type, CIFS_LOG_ERROR);
|
||||
cifs_stats_inc(&tcon->num_reads);
|
||||
pSMBr = (READ_RSP *)iov[0].iov_base;
|
||||
if (rc) {
|
||||
|
@ -1663,7 +1710,8 @@ int
|
|||
CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
|
||||
const __u16 smb_file_id, const __u64 len,
|
||||
const __u64 offset, const __u32 numUnlock,
|
||||
const __u32 numLock, const __u8 lockType, const bool waitFlag)
|
||||
const __u32 numLock, const __u8 lockType,
|
||||
const bool waitFlag, const __u8 oplock_level)
|
||||
{
|
||||
int rc = 0;
|
||||
LOCK_REQ *pSMB = NULL;
|
||||
|
@ -1691,6 +1739,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
|
|||
pSMB->NumberOfLocks = cpu_to_le16(numLock);
|
||||
pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
|
||||
pSMB->LockType = lockType;
|
||||
pSMB->OplockLevel = oplock_level;
|
||||
pSMB->AndXCommand = 0xFF; /* none */
|
||||
pSMB->Fid = smb_file_id; /* netfid stays le */
|
||||
|
||||
|
@ -3087,7 +3136,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
|
|||
iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
|
||||
|
||||
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
|
||||
CIFS_STD_OP);
|
||||
0);
|
||||
cifs_stats_inc(&tcon->num_acl_get);
|
||||
if (rc) {
|
||||
cFYI(1, "Send error in QuerySecDesc = %d", rc);
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
#define CIFS_PORT 445
|
||||
#define RFC1001_PORT 139
|
||||
|
||||
/* SMB echo "timeout" -- FIXME: tunable? */
|
||||
#define SMB_ECHO_INTERVAL (60 * HZ)
|
||||
|
||||
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
|
||||
unsigned char *p24);
|
||||
|
||||
|
@ -152,6 +155,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|||
|
||||
/* before reconnecting the tcp session, mark the smb session (uid)
|
||||
and the tid bad so they are not used until reconnected */
|
||||
cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each(tmp, &server->smb_ses_list) {
|
||||
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
|
||||
|
@ -163,7 +167,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|||
}
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
/* do not want to be sending data on a socket we are freeing */
|
||||
cFYI(1, "%s: tearing down socket", __func__);
|
||||
mutex_lock(&server->srv_mutex);
|
||||
if (server->ssocket) {
|
||||
cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
|
||||
|
@ -180,22 +186,20 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|||
kfree(server->session_key.response);
|
||||
server->session_key.response = NULL;
|
||||
server->session_key.len = 0;
|
||||
server->lstrp = jiffies;
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
|
||||
/* mark submitted MIDs for retry and issue callback */
|
||||
cFYI(1, "%s: issuing mid callbacks", __func__);
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_for_each(tmp, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct
|
||||
mid_q_entry,
|
||||
qhead);
|
||||
if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
|
||||
/* Mark other intransit requests as needing
|
||||
retry so we do not immediately mark the
|
||||
session bad again (ie after we reconnect
|
||||
below) as they timeout too */
|
||||
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||
if (mid_entry->midState == MID_REQUEST_SUBMITTED)
|
||||
mid_entry->midState = MID_RETRY_NEEDED;
|
||||
}
|
||||
list_del_init(&mid_entry->qhead);
|
||||
mid_entry->callback(mid_entry);
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
|
||||
while ((server->tcpStatus != CifsExiting) &&
|
||||
(server->tcpStatus != CifsGood)) {
|
||||
|
@ -212,10 +216,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
|||
if (server->tcpStatus != CifsExiting)
|
||||
server->tcpStatus = CifsGood;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
/* atomic_set(&server->inFlight,0);*/
|
||||
wake_up(&server->response_q);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -334,6 +337,26 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
|
|||
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_echo_request(struct work_struct *work)
|
||||
{
|
||||
int rc;
|
||||
struct TCP_Server_Info *server = container_of(work,
|
||||
struct TCP_Server_Info, echo.work);
|
||||
|
||||
/* no need to ping if we got a response recently */
|
||||
if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
|
||||
goto requeue_echo;
|
||||
|
||||
rc = CIFSSMBEcho(server);
|
||||
if (rc)
|
||||
cFYI(1, "Unable to send echo request to server: %s",
|
||||
server->hostname);
|
||||
|
||||
requeue_echo:
|
||||
queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
||||
{
|
||||
|
@ -345,8 +368,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
|||
struct msghdr smb_msg;
|
||||
struct kvec iov;
|
||||
struct socket *csocket = server->ssocket;
|
||||
struct list_head *tmp;
|
||||
struct cifsSesInfo *ses;
|
||||
struct list_head *tmp, *tmp2;
|
||||
struct task_struct *task_to_wake = NULL;
|
||||
struct mid_q_entry *mid_entry;
|
||||
char temp;
|
||||
|
@ -399,7 +421,20 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
|
|||
smb_msg.msg_control = NULL;
|
||||
smb_msg.msg_controllen = 0;
|
||||
pdu_length = 4; /* enough to get RFC1001 header */
|
||||
|
||||
incomplete_rcv:
|
||||
if (echo_retries > 0 &&
|
||||
time_after(jiffies, server->lstrp +
|
||||
(echo_retries * SMB_ECHO_INTERVAL))) {
|
||||
cERROR(1, "Server %s has not responded in %d seconds. "
|
||||
"Reconnecting...", server->hostname,
|
||||
(echo_retries * SMB_ECHO_INTERVAL / HZ));
|
||||
cifs_reconnect(server);
|
||||
csocket = server->ssocket;
|
||||
wake_up(&server->response_q);
|
||||
continue;
|
||||
}
|
||||
|
||||
length =
|
||||
kernel_recvmsg(csocket, &smb_msg,
|
||||
&iov, 1, pdu_length, 0 /* BB other flags? */);
|
||||
|
@ -559,10 +594,11 @@ incomplete_rcv:
|
|||
continue;
|
||||
}
|
||||
|
||||
mid_entry = NULL;
|
||||
server->lstrp = jiffies;
|
||||
|
||||
task_to_wake = NULL;
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_for_each(tmp, &server->pending_mid_q) {
|
||||
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||
|
||||
if ((mid_entry->mid == smb_buffer->Mid) &&
|
||||
|
@ -603,20 +639,19 @@ incomplete_rcv:
|
|||
mid_entry->resp_buf = smb_buffer;
|
||||
mid_entry->largeBuf = isLargeBuf;
|
||||
multi_t2_fnd:
|
||||
task_to_wake = mid_entry->tsk;
|
||||
mid_entry->midState = MID_RESPONSE_RECEIVED;
|
||||
list_del_init(&mid_entry->qhead);
|
||||
mid_entry->callback(mid_entry);
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
mid_entry->when_received = jiffies;
|
||||
#endif
|
||||
/* so we do not time out requests to server
|
||||
which is still responding (since server could
|
||||
be busy but not dead) */
|
||||
server->lstrp = jiffies;
|
||||
break;
|
||||
}
|
||||
mid_entry = NULL;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
if (task_to_wake) {
|
||||
|
||||
if (mid_entry != NULL) {
|
||||
/* Was previous buf put in mpx struct for multi-rsp? */
|
||||
if (!isMultiRsp) {
|
||||
/* smb buffer will be freed by user thread */
|
||||
|
@ -625,11 +660,10 @@ multi_t2_fnd:
|
|||
else
|
||||
smallbuf = NULL;
|
||||
}
|
||||
wake_up_process(task_to_wake);
|
||||
} else if (!is_valid_oplock_break(smb_buffer, server) &&
|
||||
!isMultiRsp) {
|
||||
cERROR(1, "No task to wake, unknown frame received! "
|
||||
"NumMids %d", midCount.counter);
|
||||
"NumMids %d", atomic_read(&midCount));
|
||||
cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
|
||||
sizeof(struct smb_hdr));
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
|
@ -677,44 +711,16 @@ multi_t2_fnd:
|
|||
if (smallbuf) /* no sense logging a debug message if NULL */
|
||||
cifs_small_buf_release(smallbuf);
|
||||
|
||||
/*
|
||||
* BB: we shouldn't have to do any of this. It shouldn't be
|
||||
* possible to exit from the thread with active SMB sessions
|
||||
*/
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
if (list_empty(&server->pending_mid_q)) {
|
||||
/* loop through server session structures attached to this and
|
||||
mark them dead */
|
||||
list_for_each(tmp, &server->smb_ses_list) {
|
||||
ses = list_entry(tmp, struct cifsSesInfo,
|
||||
smb_ses_list);
|
||||
ses->status = CifsExiting;
|
||||
ses->server = NULL;
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
} else {
|
||||
/* although we can not zero the server struct pointer yet,
|
||||
since there are active requests which may depnd on them,
|
||||
mark the corresponding SMB sessions as exiting too */
|
||||
list_for_each(tmp, &server->smb_ses_list) {
|
||||
ses = list_entry(tmp, struct cifsSesInfo,
|
||||
smb_ses_list);
|
||||
ses->status = CifsExiting;
|
||||
}
|
||||
|
||||
if (!list_empty(&server->pending_mid_q)) {
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_for_each(tmp, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||
if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
|
||||
cFYI(1, "Clearing Mid 0x%x - waking up ",
|
||||
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
|
||||
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
|
||||
cFYI(1, "Clearing Mid 0x%x - issuing callback",
|
||||
mid_entry->mid);
|
||||
task_to_wake = mid_entry->tsk;
|
||||
if (task_to_wake)
|
||||
wake_up_process(task_to_wake);
|
||||
}
|
||||
list_del_init(&mid_entry->qhead);
|
||||
mid_entry->callback(mid_entry);
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
/* 1/8th of sec is more than enough time for them to exit */
|
||||
msleep(125);
|
||||
}
|
||||
|
@ -732,18 +738,6 @@ multi_t2_fnd:
|
|||
coming home not much else we can do but free the memory */
|
||||
}
|
||||
|
||||
/* last chance to mark ses pointers invalid
|
||||
if there are any pointing to this (e.g
|
||||
if a crazy root user tried to kill cifsd
|
||||
kernel thread explicitly this might happen) */
|
||||
/* BB: This shouldn't be necessary, see above */
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each(tmp, &server->smb_ses_list) {
|
||||
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
|
||||
ses->server = NULL;
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
kfree(server->hostname);
|
||||
task_to_wake = xchg(&server->tsk, NULL);
|
||||
kfree(server);
|
||||
|
@ -1612,6 +1606,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
|
|||
list_del_init(&server->tcp_ses_list);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
cancel_delayed_work_sync(&server->echo);
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
server->tcpStatus = CifsExiting;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
@ -1701,8 +1697,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
|
|||
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
|
||||
tcp_ses->session_estab = false;
|
||||
tcp_ses->sequence_number = 0;
|
||||
tcp_ses->lstrp = jiffies;
|
||||
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
|
||||
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
|
||||
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
|
||||
|
||||
/*
|
||||
* at this point we are the only ones with the pointer
|
||||
|
@ -1751,6 +1749,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
|
|||
|
||||
cifs_fscache_get_client_cookie(tcp_ses);
|
||||
|
||||
/* queue echo request delayed work */
|
||||
queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
|
||||
|
||||
return tcp_ses;
|
||||
|
||||
out_err_crypto_release:
|
||||
|
@ -2965,7 +2966,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
|
|||
bcc_ptr++; /* skip password */
|
||||
/* already aligned so no need to do it below */
|
||||
} else {
|
||||
pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
|
||||
pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
|
||||
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
|
||||
specified as required (when that support is added to
|
||||
the vfs in the future) as only NTLM or the much
|
||||
|
@ -2983,7 +2984,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
|
|||
#endif /* CIFS_WEAK_PW_HASH */
|
||||
SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
|
||||
|
||||
bcc_ptr += CIFS_SESS_KEY_SIZE;
|
||||
bcc_ptr += CIFS_AUTH_RESP_SIZE;
|
||||
if (ses->capabilities & CAP_UNICODE) {
|
||||
/* must align unicode strings */
|
||||
*bcc_ptr = 0; /* null byte password */
|
||||
|
@ -3021,7 +3022,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
|
|||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
|
||||
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
|
||||
CIFS_STD_OP);
|
||||
0);
|
||||
|
||||
/* above now done in SendReceive */
|
||||
if ((rc == 0) && (tcon != NULL)) {
|
||||
|
|
114
fs/cifs/file.c
114
fs/cifs/file.c
|
@ -726,12 +726,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
|
|||
|
||||
/* BB we could chain these into one lock request BB */
|
||||
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
|
||||
0, 1, lockType, 0 /* wait flag */ );
|
||||
0, 1, lockType, 0 /* wait flag */, 0);
|
||||
if (rc == 0) {
|
||||
rc = CIFSSMBLock(xid, tcon, netfid, length,
|
||||
pfLock->fl_start, 1 /* numUnlock */ ,
|
||||
0 /* numLock */ , lockType,
|
||||
0 /* wait flag */ );
|
||||
0 /* wait flag */, 0);
|
||||
pfLock->fl_type = F_UNLCK;
|
||||
if (rc != 0)
|
||||
cERROR(1, "Error unlocking previously locked "
|
||||
|
@ -748,13 +748,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
|
|||
rc = CIFSSMBLock(xid, tcon, netfid, length,
|
||||
pfLock->fl_start, 0, 1,
|
||||
lockType | LOCKING_ANDX_SHARED_LOCK,
|
||||
0 /* wait flag */);
|
||||
0 /* wait flag */, 0);
|
||||
if (rc == 0) {
|
||||
rc = CIFSSMBLock(xid, tcon, netfid,
|
||||
length, pfLock->fl_start, 1, 0,
|
||||
lockType |
|
||||
LOCKING_ANDX_SHARED_LOCK,
|
||||
0 /* wait flag */);
|
||||
0 /* wait flag */, 0);
|
||||
pfLock->fl_type = F_RDLCK;
|
||||
if (rc != 0)
|
||||
cERROR(1, "Error unlocking "
|
||||
|
@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
|
|||
|
||||
if (numLock) {
|
||||
rc = CIFSSMBLock(xid, tcon, netfid, length,
|
||||
pfLock->fl_start,
|
||||
0, numLock, lockType, wait_flag);
|
||||
pfLock->fl_start, 0, numLock, lockType,
|
||||
wait_flag, 0);
|
||||
|
||||
if (rc == 0) {
|
||||
/* For Windows locks we must store them. */
|
||||
|
@ -818,9 +818,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
|
|||
(pfLock->fl_start + length) >=
|
||||
(li->offset + li->length)) {
|
||||
stored_rc = CIFSSMBLock(xid, tcon,
|
||||
netfid,
|
||||
li->length, li->offset,
|
||||
1, 0, li->type, false);
|
||||
netfid, li->length,
|
||||
li->offset, 1, 0,
|
||||
li->type, false, 0);
|
||||
if (stored_rc)
|
||||
rc = stored_rc;
|
||||
else {
|
||||
|
@ -839,29 +839,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the timeout on write requests past EOF. For some servers (Windows)
|
||||
* these calls can be very long.
|
||||
*
|
||||
* If we're writing >10M past the EOF we give a 180s timeout. Anything less
|
||||
* than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
|
||||
* The 10M cutoff is totally arbitrary. A better scheme for this would be
|
||||
* welcome if someone wants to suggest one.
|
||||
*
|
||||
* We may be able to do a better job with this if there were some way to
|
||||
* declare that a file should be sparse.
|
||||
*/
|
||||
static int
|
||||
cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
|
||||
{
|
||||
if (offset <= cifsi->server_eof)
|
||||
return CIFS_STD_OP;
|
||||
else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
|
||||
return CIFS_VLONG_OP;
|
||||
else
|
||||
return CIFS_LONG_OP;
|
||||
}
|
||||
|
||||
/* update the file size (if needed) after a write */
|
||||
static void
|
||||
cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
|
||||
|
@ -882,7 +859,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
|
|||
unsigned int total_written;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct cifsTconInfo *pTcon;
|
||||
int xid, long_op;
|
||||
int xid;
|
||||
struct cifsFileInfo *open_file;
|
||||
struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
||||
|
||||
|
@ -903,7 +880,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
|
|||
|
||||
xid = GetXid();
|
||||
|
||||
long_op = cifs_write_timeout(cifsi, *poffset);
|
||||
for (total_written = 0; write_size > total_written;
|
||||
total_written += bytes_written) {
|
||||
rc = -EAGAIN;
|
||||
|
@ -931,7 +907,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
|
|||
min_t(const int, cifs_sb->wsize,
|
||||
write_size - total_written),
|
||||
*poffset, &bytes_written,
|
||||
NULL, write_data + total_written, long_op);
|
||||
NULL, write_data + total_written, 0);
|
||||
}
|
||||
if (rc || (bytes_written == 0)) {
|
||||
if (total_written)
|
||||
|
@ -944,8 +920,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
|
|||
cifs_update_eof(cifsi, *poffset, bytes_written);
|
||||
*poffset += bytes_written;
|
||||
}
|
||||
long_op = CIFS_STD_OP; /* subsequent writes fast -
|
||||
15 seconds is plenty */
|
||||
}
|
||||
|
||||
cifs_stats_bytes_written(pTcon, total_written);
|
||||
|
@ -974,7 +948,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
|
|||
unsigned int total_written;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct cifsTconInfo *pTcon;
|
||||
int xid, long_op;
|
||||
int xid;
|
||||
struct dentry *dentry = open_file->dentry;
|
||||
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
|
||||
|
||||
|
@ -987,7 +961,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
|
|||
|
||||
xid = GetXid();
|
||||
|
||||
long_op = cifs_write_timeout(cifsi, *poffset);
|
||||
for (total_written = 0; write_size > total_written;
|
||||
total_written += bytes_written) {
|
||||
rc = -EAGAIN;
|
||||
|
@ -1017,7 +990,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
|
|||
rc = CIFSSMBWrite2(xid, pTcon,
|
||||
open_file->netfid, len,
|
||||
*poffset, &bytes_written,
|
||||
iov, 1, long_op);
|
||||
iov, 1, 0);
|
||||
} else
|
||||
rc = CIFSSMBWrite(xid, pTcon,
|
||||
open_file->netfid,
|
||||
|
@ -1025,7 +998,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
|
|||
write_size - total_written),
|
||||
*poffset, &bytes_written,
|
||||
write_data + total_written,
|
||||
NULL, long_op);
|
||||
NULL, 0);
|
||||
}
|
||||
if (rc || (bytes_written == 0)) {
|
||||
if (total_written)
|
||||
|
@ -1038,8 +1011,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
|
|||
cifs_update_eof(cifsi, *poffset, bytes_written);
|
||||
*poffset += bytes_written;
|
||||
}
|
||||
long_op = CIFS_STD_OP; /* subsequent writes fast -
|
||||
15 seconds is plenty */
|
||||
}
|
||||
|
||||
cifs_stats_bytes_written(pTcon, total_written);
|
||||
|
@ -1239,7 +1210,7 @@ static int cifs_writepages(struct address_space *mapping,
|
|||
struct pagevec pvec;
|
||||
int rc = 0;
|
||||
int scanned = 0;
|
||||
int xid, long_op;
|
||||
int xid;
|
||||
|
||||
cifs_sb = CIFS_SB(mapping->host->i_sb);
|
||||
|
||||
|
@ -1377,43 +1348,67 @@ retry:
|
|||
break;
|
||||
}
|
||||
if (n_iov) {
|
||||
retry_write:
|
||||
open_file = find_writable_file(CIFS_I(mapping->host),
|
||||
false);
|
||||
if (!open_file) {
|
||||
cERROR(1, "No writable handles for inode");
|
||||
rc = -EBADF;
|
||||
} else {
|
||||
long_op = cifs_write_timeout(cifsi, offset);
|
||||
rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
|
||||
bytes_to_write, offset,
|
||||
&bytes_written, iov, n_iov,
|
||||
long_op);
|
||||
0);
|
||||
cifsFileInfo_put(open_file);
|
||||
cifs_update_eof(cifsi, offset, bytes_written);
|
||||
}
|
||||
|
||||
if (rc || bytes_written < bytes_to_write) {
|
||||
cERROR(1, "Write2 ret %d, wrote %d",
|
||||
rc, bytes_written);
|
||||
mapping_set_error(mapping, rc);
|
||||
} else {
|
||||
cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
|
||||
|
||||
/*
|
||||
* For now, treat a short write as if nothing got
|
||||
* written. A zero length write however indicates
|
||||
* ENOSPC or EFBIG. We have no way to know which
|
||||
* though, so call it ENOSPC for now. EFBIG would
|
||||
* get translated to AS_EIO anyway.
|
||||
*
|
||||
* FIXME: make it take into account the data that did
|
||||
* get written
|
||||
*/
|
||||
if (rc == 0) {
|
||||
if (bytes_written == 0)
|
||||
rc = -ENOSPC;
|
||||
else if (bytes_written < bytes_to_write)
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
|
||||
/* retry on data-integrity flush */
|
||||
if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
|
||||
goto retry_write;
|
||||
|
||||
/* fix the stats and EOF */
|
||||
if (bytes_written > 0) {
|
||||
cifs_stats_bytes_written(tcon, bytes_written);
|
||||
cifs_update_eof(cifsi, offset, bytes_written);
|
||||
}
|
||||
|
||||
for (i = 0; i < n_iov; i++) {
|
||||
page = pvec.pages[first + i];
|
||||
/* Should we also set page error on
|
||||
success rc but too little data written? */
|
||||
/* BB investigate retry logic on temporary
|
||||
server crash cases and how recovery works
|
||||
when page marked as error */
|
||||
if (rc)
|
||||
/* on retryable write error, redirty page */
|
||||
if (rc == -EAGAIN)
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
else if (rc != 0)
|
||||
SetPageError(page);
|
||||
kunmap(page);
|
||||
unlock_page(page);
|
||||
end_page_writeback(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
||||
if (rc != -EAGAIN)
|
||||
mapping_set_error(mapping, rc);
|
||||
else
|
||||
rc = 0;
|
||||
|
||||
if ((wbc->nr_to_write -= n_iov) <= 0)
|
||||
done = 1;
|
||||
index = next;
|
||||
|
@ -2192,7 +2187,8 @@ void cifs_oplock_break(struct work_struct *work)
|
|||
*/
|
||||
if (!cfile->oplock_break_cancelled) {
|
||||
rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
|
||||
0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
|
||||
0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
|
||||
cinode->clientCanCacheRead ? 1 : 0);
|
||||
cFYI(1, "Oplock release rc = %d", rc);
|
||||
}
|
||||
|
||||
|
|
|
@ -571,7 +571,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
|
|||
pCifsInode = CIFS_I(netfile->dentry->d_inode);
|
||||
|
||||
cifs_set_oplock_level(pCifsInode,
|
||||
pSMB->OplockLevel);
|
||||
pSMB->OplockLevel ? OPLOCK_READ : 0);
|
||||
/*
|
||||
* cifs_oplock_break_put() can't be called
|
||||
* from here. Get reference after queueing
|
||||
|
|
|
@ -879,7 +879,7 @@ ssetup_ntlmssp_authenticate:
|
|||
BCC_LE(smb_buf) = cpu_to_le16(count);
|
||||
|
||||
rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
|
||||
CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
|
||||
CIFS_LOG_ERROR);
|
||||
/* SMB request buf freed in SendReceive2 */
|
||||
|
||||
pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
|
||||
|
|
|
@ -36,7 +36,13 @@
|
|||
|
||||
extern mempool_t *cifs_mid_poolp;
|
||||
|
||||
static struct mid_q_entry *
|
||||
static void
|
||||
wake_up_task(struct mid_q_entry *mid)
|
||||
{
|
||||
wake_up_process(mid->callback_data);
|
||||
}
|
||||
|
||||
struct mid_q_entry *
|
||||
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
||||
{
|
||||
struct mid_q_entry *temp;
|
||||
|
@ -58,28 +64,28 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
|
|||
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
|
||||
/* when mid allocated can be before when sent */
|
||||
temp->when_alloc = jiffies;
|
||||
temp->tsk = current;
|
||||
|
||||
/*
|
||||
* The default is for the mid to be synchronous, so the
|
||||
* default callback just wakes up the current task.
|
||||
*/
|
||||
temp->callback = wake_up_task;
|
||||
temp->callback_data = current;
|
||||
}
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_add_tail(&temp->qhead, &server->pending_mid_q);
|
||||
atomic_inc(&midCount);
|
||||
temp->midState = MID_REQUEST_ALLOCATED;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return temp;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
DeleteMidQEntry(struct mid_q_entry *midEntry)
|
||||
{
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
unsigned long now;
|
||||
#endif
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
midEntry->midState = MID_FREE;
|
||||
list_del(&midEntry->qhead);
|
||||
atomic_dec(&midCount);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
if (midEntry->largeBuf)
|
||||
cifs_buf_release(midEntry->resp_buf);
|
||||
else
|
||||
|
@ -103,6 +109,16 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
|
|||
mempool_free(midEntry, cifs_mid_poolp);
|
||||
}
|
||||
|
||||
static void
|
||||
delete_mid(struct mid_q_entry *mid)
|
||||
{
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_del(&mid->qhead);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
||||
DeleteMidQEntry(mid);
|
||||
}
|
||||
|
||||
static int
|
||||
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
|
||||
{
|
||||
|
@ -244,31 +260,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
|
|||
return smb_sendv(server, &iov, 1);
|
||||
}
|
||||
|
||||
static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
|
||||
static int wait_for_free_request(struct TCP_Server_Info *server,
|
||||
const int long_op)
|
||||
{
|
||||
if (long_op == CIFS_ASYNC_OP) {
|
||||
/* oplock breaks must not be held up */
|
||||
atomic_inc(&ses->server->inFlight);
|
||||
atomic_inc(&server->inFlight);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
while (1) {
|
||||
if (atomic_read(&ses->server->inFlight) >=
|
||||
cifs_max_pending){
|
||||
if (atomic_read(&server->inFlight) >= cifs_max_pending) {
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
atomic_inc(&ses->server->num_waiters);
|
||||
atomic_inc(&server->num_waiters);
|
||||
#endif
|
||||
wait_event(ses->server->request_q,
|
||||
atomic_read(&ses->server->inFlight)
|
||||
wait_event(server->request_q,
|
||||
atomic_read(&server->inFlight)
|
||||
< cifs_max_pending);
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
atomic_dec(&ses->server->num_waiters);
|
||||
atomic_dec(&server->num_waiters);
|
||||
#endif
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
} else {
|
||||
if (ses->server->tcpStatus == CifsExiting) {
|
||||
if (server->tcpStatus == CifsExiting) {
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -278,7 +294,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
|
|||
|
||||
/* update # of requests on the wire to server */
|
||||
if (long_op != CIFS_BLOCKING_OP)
|
||||
atomic_inc(&ses->server->inFlight);
|
||||
atomic_inc(&server->inFlight);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
break;
|
||||
}
|
||||
|
@ -308,54 +324,82 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
|
|||
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
|
||||
if (*ppmidQ == NULL)
|
||||
return -ENOMEM;
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wait_for_response(struct cifsSesInfo *ses,
|
||||
struct mid_q_entry *midQ,
|
||||
unsigned long timeout,
|
||||
unsigned long time_to_wait)
|
||||
static int
|
||||
wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
||||
{
|
||||
unsigned long curr_timeout;
|
||||
int error;
|
||||
|
||||
for (;;) {
|
||||
curr_timeout = timeout + jiffies;
|
||||
wait_event_timeout(ses->server->response_q,
|
||||
midQ->midState != MID_REQUEST_SUBMITTED, timeout);
|
||||
error = wait_event_killable(server->response_q,
|
||||
midQ->midState != MID_REQUEST_SUBMITTED);
|
||||
if (error < 0)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (time_after(jiffies, curr_timeout) &&
|
||||
(midQ->midState == MID_REQUEST_SUBMITTED) &&
|
||||
((ses->server->tcpStatus == CifsGood) ||
|
||||
(ses->server->tcpStatus == CifsNew))) {
|
||||
|
||||
unsigned long lrt;
|
||||
|
||||
/* We timed out. Is the server still
|
||||
sending replies ? */
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
lrt = ses->server->lstrp;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
||||
/* Calculate time_to_wait past last receive time.
|
||||
Although we prefer not to time out if the
|
||||
server is still responding - we will time
|
||||
out if the server takes more than 15 (or 45
|
||||
or 180) seconds to respond to this request
|
||||
and has not responded to any request from
|
||||
other threads on the client within 10 seconds */
|
||||
lrt += time_to_wait;
|
||||
if (time_after(jiffies, lrt)) {
|
||||
/* No replies for time_to_wait. */
|
||||
cERROR(1, "server not responding");
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Send a SMB request and set the callback function in the mid to handle
|
||||
* the result. Caller is responsible for dealing with timeouts.
|
||||
*/
|
||||
int
|
||||
cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
||||
mid_callback_t *callback, void *cbdata)
|
||||
{
|
||||
int rc;
|
||||
struct mid_q_entry *mid;
|
||||
|
||||
rc = wait_for_free_request(server, CIFS_ASYNC_OP);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_lock(&server->srv_mutex);
|
||||
mid = AllocMidQEntry(in_buf, server);
|
||||
if (mid == NULL) {
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* put it on the pending_mid_q */
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_add_tail(&mid->qhead, &server->pending_mid_q);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
||||
rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
|
||||
if (rc) {
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
mid->callback = callback;
|
||||
mid->callback_data = cbdata;
|
||||
mid->midState = MID_REQUEST_SUBMITTED;
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
atomic_inc(&server->inSend);
|
||||
#endif
|
||||
rc = smb_send(server, in_buf, in_buf->smb_buf_length);
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
atomic_dec(&server->inSend);
|
||||
mid->when_sent = jiffies;
|
||||
#endif
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
if (rc)
|
||||
goto out_err;
|
||||
|
||||
return rc;
|
||||
out_err:
|
||||
delete_mid(mid);
|
||||
atomic_dec(&server->inFlight);
|
||||
wake_up(&server->request_q);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Send an SMB Request. No response info (other than return code)
|
||||
|
@ -382,6 +426,81 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
|
||||
mid->mid, mid->midState);
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
/* ensure that it's no longer on the pending_mid_q */
|
||||
list_del_init(&mid->qhead);
|
||||
|
||||
switch (mid->midState) {
|
||||
case MID_RESPONSE_RECEIVED:
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return rc;
|
||||
case MID_REQUEST_SUBMITTED:
|
||||
/* socket is going down, reject all calls */
|
||||
if (server->tcpStatus == CifsExiting) {
|
||||
cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
|
||||
__func__, mid->mid, mid->command, mid->midState);
|
||||
rc = -EHOSTDOWN;
|
||||
break;
|
||||
}
|
||||
case MID_RETRY_NEEDED:
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
default:
|
||||
cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
|
||||
mid->mid, mid->midState);
|
||||
rc = -EIO;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
||||
DeleteMidQEntry(mid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* An NT cancel request header looks just like the original request except:
|
||||
*
|
||||
* The Command is SMB_COM_NT_CANCEL
|
||||
* The WordCount is zeroed out
|
||||
* The ByteCount is zeroed out
|
||||
*
|
||||
* This function mangles an existing request buffer into a
|
||||
* SMB_COM_NT_CANCEL request and then sends it.
|
||||
*/
|
||||
static int
|
||||
send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
|
||||
struct mid_q_entry *mid)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/* -4 for RFC1001 length and +2 for BCC field */
|
||||
in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2;
|
||||
in_buf->Command = SMB_COM_NT_CANCEL;
|
||||
in_buf->WordCount = 0;
|
||||
BCC_LE(in_buf) = 0;
|
||||
|
||||
mutex_lock(&server->srv_mutex);
|
||||
rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
|
||||
if (rc) {
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
return rc;
|
||||
}
|
||||
rc = smb_send(server, in_buf, in_buf->smb_buf_length);
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
|
||||
cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
|
||||
in_buf->Mid, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
||||
struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
|
||||
|
@ -390,7 +509,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
int rc = 0;
|
||||
int long_op;
|
||||
unsigned int receive_len;
|
||||
unsigned long timeout;
|
||||
struct mid_q_entry *midQ;
|
||||
struct smb_hdr *in_buf = iov[0].iov_base;
|
||||
|
||||
|
@ -413,7 +531,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
to the same server. We may make this configurable later or
|
||||
use ses->maxReq */
|
||||
|
||||
rc = wait_for_free_request(ses, long_op);
|
||||
rc = wait_for_free_request(ses->server, long_op);
|
||||
if (rc) {
|
||||
cifs_small_buf_release(in_buf);
|
||||
return rc;
|
||||
|
@ -457,65 +575,20 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
if (long_op == CIFS_STD_OP)
|
||||
timeout = 15 * HZ;
|
||||
else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
|
||||
timeout = 180 * HZ;
|
||||
else if (long_op == CIFS_LONG_OP)
|
||||
timeout = 45 * HZ; /* should be greater than
|
||||
servers oplock break timeout (about 43 seconds) */
|
||||
else if (long_op == CIFS_ASYNC_OP)
|
||||
if (long_op == CIFS_ASYNC_OP)
|
||||
goto out;
|
||||
else if (long_op == CIFS_BLOCKING_OP)
|
||||
timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
|
||||
else {
|
||||
cERROR(1, "unknown timeout flag %d", long_op);
|
||||
rc = -EIO;
|
||||
|
||||
rc = wait_for_response(ses->server, midQ);
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* wait for 15 seconds or until woken up due to response arriving or
|
||||
due to last connection to this server being unmounted */
|
||||
if (signal_pending(current)) {
|
||||
/* if signal pending do not hold up user for full smb timeout
|
||||
but we still give response a chance to complete */
|
||||
timeout = 2 * HZ;
|
||||
}
|
||||
|
||||
/* No user interrupts in wait - wreaks havoc with performance */
|
||||
wait_for_response(ses, midQ, timeout, 10 * HZ);
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
|
||||
if (midQ->resp_buf == NULL) {
|
||||
cERROR(1, "No response to cmd %d mid %d",
|
||||
midQ->command, midQ->mid);
|
||||
if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
||||
if (ses->server->tcpStatus == CifsExiting)
|
||||
rc = -EHOSTDOWN;
|
||||
else {
|
||||
ses->server->tcpStatus = CifsNeedReconnect;
|
||||
midQ->midState = MID_RETRY_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != -EHOSTDOWN) {
|
||||
if (midQ->midState == MID_RETRY_NEEDED) {
|
||||
rc = -EAGAIN;
|
||||
cFYI(1, "marking request for retry");
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
DeleteMidQEntry(midQ);
|
||||
/* Update # of requests on wire to server */
|
||||
rc = sync_mid_result(midQ, ses->server);
|
||||
if (rc != 0) {
|
||||
atomic_dec(&ses->server->inFlight);
|
||||
wake_up(&ses->server->request_q);
|
||||
return rc;
|
||||
}
|
||||
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
receive_len = midQ->resp_buf->smb_buf_length;
|
||||
|
||||
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
||||
|
@ -564,14 +637,14 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
if ((flags & CIFS_NO_RESP) == 0)
|
||||
midQ->resp_buf = NULL; /* mark it so buf will
|
||||
not be freed by
|
||||
DeleteMidQEntry */
|
||||
delete_mid */
|
||||
} else {
|
||||
rc = -EIO;
|
||||
cFYI(1, "Bad MID state?");
|
||||
}
|
||||
|
||||
out:
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
atomic_dec(&ses->server->inFlight);
|
||||
wake_up(&ses->server->request_q);
|
||||
|
||||
|
@ -585,7 +658,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
{
|
||||
int rc = 0;
|
||||
unsigned int receive_len;
|
||||
unsigned long timeout;
|
||||
struct mid_q_entry *midQ;
|
||||
|
||||
if (ses == NULL) {
|
||||
|
@ -610,7 +682,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
rc = wait_for_free_request(ses, long_op);
|
||||
rc = wait_for_free_request(ses->server, long_op);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -649,64 +721,20 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
if (long_op == CIFS_STD_OP)
|
||||
timeout = 15 * HZ;
|
||||
/* wait for 15 seconds or until woken up due to response arriving or
|
||||
due to last connection to this server being unmounted */
|
||||
else if (long_op == CIFS_ASYNC_OP)
|
||||
if (long_op == CIFS_ASYNC_OP)
|
||||
goto out;
|
||||
else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
|
||||
timeout = 180 * HZ;
|
||||
else if (long_op == CIFS_LONG_OP)
|
||||
timeout = 45 * HZ; /* should be greater than
|
||||
servers oplock break timeout (about 43 seconds) */
|
||||
else if (long_op == CIFS_BLOCKING_OP)
|
||||
timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
|
||||
else {
|
||||
cERROR(1, "unknown timeout flag %d", long_op);
|
||||
rc = -EIO;
|
||||
|
||||
rc = wait_for_response(ses->server, midQ);
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
/* if signal pending do not hold up user for full smb timeout
|
||||
but we still give response a chance to complete */
|
||||
timeout = 2 * HZ;
|
||||
}
|
||||
|
||||
/* No user interrupts in wait - wreaks havoc with performance */
|
||||
wait_for_response(ses, midQ, timeout, 10 * HZ);
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
if (midQ->resp_buf == NULL) {
|
||||
cERROR(1, "No response for cmd %d mid %d",
|
||||
midQ->command, midQ->mid);
|
||||
if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
||||
if (ses->server->tcpStatus == CifsExiting)
|
||||
rc = -EHOSTDOWN;
|
||||
else {
|
||||
ses->server->tcpStatus = CifsNeedReconnect;
|
||||
midQ->midState = MID_RETRY_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != -EHOSTDOWN) {
|
||||
if (midQ->midState == MID_RETRY_NEEDED) {
|
||||
rc = -EAGAIN;
|
||||
cFYI(1, "marking request for retry");
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
DeleteMidQEntry(midQ);
|
||||
/* Update # of requests on wire to server */
|
||||
rc = sync_mid_result(midQ, ses->server);
|
||||
if (rc != 0) {
|
||||
atomic_dec(&ses->server->inFlight);
|
||||
wake_up(&ses->server->request_q);
|
||||
return rc;
|
||||
}
|
||||
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
receive_len = midQ->resp_buf->smb_buf_length;
|
||||
|
||||
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
||||
|
@ -755,36 +783,13 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
|
|||
}
|
||||
|
||||
out:
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
atomic_dec(&ses->server->inFlight);
|
||||
wake_up(&ses->server->request_q);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
|
||||
|
||||
static int
|
||||
send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
|
||||
struct mid_q_entry *midQ)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cifsSesInfo *ses = tcon->ses;
|
||||
__u16 mid = in_buf->Mid;
|
||||
|
||||
header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
|
||||
in_buf->Mid = mid;
|
||||
mutex_lock(&ses->server->srv_mutex);
|
||||
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
|
||||
if (rc) {
|
||||
mutex_unlock(&ses->server->srv_mutex);
|
||||
return rc;
|
||||
}
|
||||
rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
|
||||
mutex_unlock(&ses->server->srv_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
|
||||
blocking lock to return. */
|
||||
|
||||
|
@ -807,7 +812,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
pSMB->hdr.Mid = GetNextMid(ses->server);
|
||||
|
||||
return SendReceive(xid, ses, in_buf, out_buf,
|
||||
&bytes_returned, CIFS_STD_OP);
|
||||
&bytes_returned, 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -845,7 +850,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
|
||||
rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -863,7 +868,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
|
||||
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
|
||||
if (rc) {
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
mutex_unlock(&ses->server->srv_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
@ -880,7 +885,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
mutex_unlock(&ses->server->srv_mutex);
|
||||
|
||||
if (rc < 0) {
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -899,10 +904,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
if (in_buf->Command == SMB_COM_TRANSACTION2) {
|
||||
/* POSIX lock. We send a NT_CANCEL SMB to cause the
|
||||
blocking lock to return. */
|
||||
|
||||
rc = send_nt_cancel(tcon, in_buf, midQ);
|
||||
rc = send_nt_cancel(ses->server, in_buf, midQ);
|
||||
if (rc) {
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
|
@ -914,47 +918,22 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
/* If we get -ENOLCK back the lock may have
|
||||
already been removed. Don't exit in this case. */
|
||||
if (rc && rc != -ENOLCK) {
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait 5 seconds for the response. */
|
||||
if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
|
||||
if (wait_for_response(ses->server, midQ) == 0) {
|
||||
/* We got the response - restart system call. */
|
||||
rstart = 1;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
if (midQ->resp_buf) {
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
receive_len = midQ->resp_buf->smb_buf_length;
|
||||
} else {
|
||||
cERROR(1, "No response for cmd %d mid %d",
|
||||
midQ->command, midQ->mid);
|
||||
if (midQ->midState == MID_REQUEST_SUBMITTED) {
|
||||
if (ses->server->tcpStatus == CifsExiting)
|
||||
rc = -EHOSTDOWN;
|
||||
else {
|
||||
ses->server->tcpStatus = CifsNeedReconnect;
|
||||
midQ->midState = MID_RETRY_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != -EHOSTDOWN) {
|
||||
if (midQ->midState == MID_RETRY_NEEDED) {
|
||||
rc = -EAGAIN;
|
||||
cFYI(1, "marking request for retry");
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
DeleteMidQEntry(midQ);
|
||||
rc = sync_mid_result(midQ, ses->server);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
receive_len = midQ->resp_buf->smb_buf_length;
|
||||
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
|
||||
cERROR(1, "Frame too large received. Length: %d Xid: %d",
|
||||
receive_len, xid);
|
||||
|
@ -1001,7 +980,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
|
|||
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
|
||||
|
||||
out:
|
||||
DeleteMidQEntry(midQ);
|
||||
delete_mid(midQ);
|
||||
if (rstart && rc == -EACCES)
|
||||
return -ERESTARTSYS;
|
||||
return rc;
|
||||
|
|
Loading…
Reference in New Issue