smb3: Fix regression in time handling
Fixes: cb7a69e605
("cifs: Initialize filesystem timestamp ranges")
Only very old servers (e.g. OS/2 and DOS) did not support
DCE TIME (100 nanosecond granularity). Fix the checks used
to set minimum and maximum times.
Fixes xfstest generic/258 (on 5.4-rc1 and later)
CC: Deepa Dinamani <deepa.kernel@gmail.com>
Acked-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
This commit is contained in:
parent
d0959b080b
commit
d4cfbf04b2
|
@ -169,19 +169,27 @@ cifs_read_super(struct super_block *sb)
|
|||
else
|
||||
sb->s_maxbytes = MAX_NON_LFS;
|
||||
|
||||
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
|
||||
/* Some very old servers like DOS and OS/2 used 2 second granularity */
|
||||
if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
|
||||
((tcon->ses->capabilities &
|
||||
tcon->ses->server->vals->cap_nt_find) == 0) &&
|
||||
!tcon->unix_ext) {
|
||||
sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
|
||||
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
|
||||
sb->s_time_min = ts.tv_sec;
|
||||
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
|
||||
cpu_to_le16(SMB_TIME_MAX), 0);
|
||||
sb->s_time_max = ts.tv_sec;
|
||||
} else {
|
||||
/*
|
||||
* Almost every server, including all SMB2+, uses DCE TIME
|
||||
* ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
|
||||
*/
|
||||
sb->s_time_gran = 100;
|
||||
|
||||
if (tcon->unix_ext) {
|
||||
ts = cifs_NTtimeToUnix(0);
|
||||
sb->s_time_min = ts.tv_sec;
|
||||
ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
|
||||
sb->s_time_max = ts.tv_sec;
|
||||
} else {
|
||||
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
|
||||
sb->s_time_min = ts.tv_sec;
|
||||
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), cpu_to_le16(SMB_TIME_MAX), 0);
|
||||
sb->s_time_max = ts.tv_sec;
|
||||
}
|
||||
|
||||
sb->s_magic = CIFS_MAGIC_NUMBER;
|
||||
|
|
Loading…
Reference in New Issue