forked from OSchip/llvm-project
[sanitizer] Port sanitizer_common to LoongArch
Initial libsanitizer support for LoongArch. It survived all GCC UBSan tests. Major changes: 1. LoongArch port of Linux kernel only supports `statx` for `stat` and its families. So we need to add `statx_to_stat` and use it for `stat`-like libcalls. The logic is "borrowed" from Glibc. 2. `sanitizer_syscall_linux_loongarch64.inc` is mostly duplicated from RISC-V port, as the syscall interface is almost same. Reviewed By: SixWeining, MaskRay, XiaodongLoong, vitalybuka Differential Revision: https://reviews.llvm.org/D129371
This commit is contained in:
parent
f72e0a8786
commit
dbec35ccf8
|
@ -3,6 +3,7 @@ set(ARM32 arm armhf)
|
|||
set(HEXAGON hexagon)
|
||||
set(X86 i386)
|
||||
set(X86_64 x86_64)
|
||||
set(LOONGARCH64 loongarch64)
|
||||
set(MIPS32 mips mipsel)
|
||||
set(MIPS64 mips64 mips64el)
|
||||
set(PPC32 powerpc powerpcspe)
|
||||
|
@ -24,7 +25,7 @@ endif()
|
|||
|
||||
set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${RISCV64}
|
||||
${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}
|
||||
${HEXAGON})
|
||||
${HEXAGON} ${LOONGARCH64})
|
||||
set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64}
|
||||
${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON})
|
||||
set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64})
|
||||
|
|
|
@ -190,6 +190,7 @@ set(SANITIZER_IMPL_HEADERS
|
|||
sanitizer_syscall_linux_arm.inc
|
||||
sanitizer_syscall_linux_x86_64.inc
|
||||
sanitizer_syscall_linux_riscv64.inc
|
||||
sanitizer_syscall_linux_loongarch64.inc
|
||||
sanitizer_syscalls_netbsd.inc
|
||||
sanitizer_thread_registry.h
|
||||
sanitizer_thread_safety.h
|
||||
|
|
|
@ -78,6 +78,10 @@
|
|||
#include <sys/personality.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && defined(__loongarch__)
|
||||
# include <sys/sysmacros.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
#include <sys/exec.h>
|
||||
#include <sys/procctl.h>
|
||||
|
@ -188,6 +192,8 @@ ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
|
|||
# include "sanitizer_syscall_linux_arm.inc"
|
||||
# elif SANITIZER_LINUX && defined(__hexagon__)
|
||||
# include "sanitizer_syscall_linux_hexagon.inc"
|
||||
# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
|
||||
# include "sanitizer_syscall_linux_loongarch64.inc"
|
||||
# else
|
||||
# include "sanitizer_syscall_generic.inc"
|
||||
# endif
|
||||
|
@ -290,6 +296,28 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
|
|||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && defined(__loongarch__)
|
||||
static void statx_to_stat(struct statx *in, struct stat *out) {
|
||||
internal_memset(out, 0, sizeof(*out));
|
||||
out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor);
|
||||
out->st_ino = in->stx_ino;
|
||||
out->st_mode = in->stx_mode;
|
||||
out->st_nlink = in->stx_nlink;
|
||||
out->st_uid = in->stx_uid;
|
||||
out->st_gid = in->stx_gid;
|
||||
out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor);
|
||||
out->st_size = in->stx_size;
|
||||
out->st_blksize = in->stx_blksize;
|
||||
out->st_blocks = in->stx_blocks;
|
||||
out->st_atime = in->stx_atime.tv_sec;
|
||||
out->st_atim.tv_nsec = in->stx_atime.tv_nsec;
|
||||
out->st_mtime = in->stx_mtime.tv_sec;
|
||||
out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec;
|
||||
out->st_ctime = in->stx_ctime.tv_sec;
|
||||
out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MIPS64
|
||||
// Undefine compatibility macros from <sys/stat.h>
|
||||
// so that they would not clash with the kernel_stat
|
||||
|
@ -341,52 +369,65 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
|
|||
#endif
|
||||
|
||||
uptr internal_stat(const char *path, void *buf) {
|
||||
#if SANITIZER_FREEBSD
|
||||
# if SANITIZER_FREEBSD
|
||||
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
|
||||
# elif SANITIZER_LINUX
|
||||
# if (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
|
||||
# elif SANITIZER_LINUX
|
||||
# if defined(__loongarch__)
|
||||
struct statx bufx;
|
||||
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
|
||||
AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
|
||||
statx_to_stat(&bufx, (struct stat *)buf);
|
||||
return res;
|
||||
# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
|
||||
(defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
|
||||
!SANITIZER_SPARC
|
||||
!SANITIZER_SPARC
|
||||
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
0);
|
||||
# else
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
|
||||
(uptr)&buf64, 0);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# endif
|
||||
# else
|
||||
# endif
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_lstat(const char *path, void *buf) {
|
||||
#if SANITIZER_FREEBSD
|
||||
# if SANITIZER_FREEBSD
|
||||
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
AT_SYMLINK_NOFOLLOW);
|
||||
# elif SANITIZER_LINUX
|
||||
# if (defined(_LP64) || SANITIZER_X32 || \
|
||||
# elif SANITIZER_LINUX
|
||||
# if defined(__loongarch__)
|
||||
struct statx bufx;
|
||||
int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
|
||||
AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
|
||||
STATX_BASIC_STATS, (uptr)&bufx);
|
||||
statx_to_stat(&bufx, (struct stat *)buf);
|
||||
return res;
|
||||
# elif (defined(_LP64) || SANITIZER_X32 || \
|
||||
(defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
|
||||
!SANITIZER_SPARC
|
||||
!SANITIZER_SPARC
|
||||
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
AT_SYMLINK_NOFOLLOW);
|
||||
# else
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
|
||||
(uptr)&buf64, AT_SYMLINK_NOFOLLOW);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# endif
|
||||
# else
|
||||
# endif
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_fstat(fd_t fd, void *buf) {
|
||||
|
@ -397,9 +438,15 @@ uptr internal_fstat(fd_t fd, void *buf) {
|
|||
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
|
||||
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
|
||||
return res;
|
||||
# else
|
||||
# elif SANITIZER_LINUX && defined(__loongarch__)
|
||||
struct statx bufx;
|
||||
int res = internal_syscall(SYSCALL(statx), fd, 0, AT_EMPTY_PATH,
|
||||
STATX_BASIC_STATS, (uptr)&bufx);
|
||||
statx_to_stat(&bufx, (struct stat *)buf);
|
||||
return res;
|
||||
# else
|
||||
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
|
||||
|
@ -445,15 +492,15 @@ uptr internal_unlink(const char *path) {
|
|||
}
|
||||
|
||||
uptr internal_rename(const char *oldpath, const char *newpath) {
|
||||
#if defined(__riscv) && defined(__linux__)
|
||||
# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
|
||||
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
|
||||
(uptr)newpath, 0);
|
||||
# elif SANITIZER_LINUX
|
||||
# elif SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
|
||||
(uptr)newpath);
|
||||
# else
|
||||
# else
|
||||
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
|
||||
# endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_sched_yield() {
|
||||
|
@ -2176,6 +2223,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
|
|||
*pc = ucontext->uc_mcontext.pc;
|
||||
*bp = ucontext->uc_mcontext.r30;
|
||||
*sp = ucontext->uc_mcontext.r29;
|
||||
# elif defined(__loongarch__)
|
||||
ucontext_t *ucontext = (ucontext_t *)context;
|
||||
*pc = ucontext->uc_mcontext.__pc;
|
||||
*bp = ucontext->uc_mcontext.__gregs[22];
|
||||
*sp = ucontext->uc_mcontext.__gregs[3];
|
||||
# else
|
||||
# error "Unsupported arch"
|
||||
# endif
|
||||
|
|
|
@ -272,6 +272,12 @@
|
|||
# define SANITIZER_RISCV64 0
|
||||
#endif
|
||||
|
||||
#if defined(__loongarch_lp64)
|
||||
# define SANITIZER_LOONGARCH64 1
|
||||
#else
|
||||
# define SANITIZER_LOONGARCH64 0
|
||||
#endif
|
||||
|
||||
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
|
||||
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
|
||||
// does not work well and we need to fallback to SizeClassAllocator32.
|
||||
|
|
|
@ -59,7 +59,8 @@ using namespace __sanitizer;
|
|||
|
||||
# if !defined(__powerpc64__) && !defined(__x86_64__) && \
|
||||
!defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \
|
||||
!defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__)
|
||||
!defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \
|
||||
!defined(__loongarch__)
|
||||
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
|
||||
#endif
|
||||
|
||||
|
|
|
@ -271,6 +271,10 @@ namespace __sanitizer {
|
|||
defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \
|
||||
defined(__hexagon__)
|
||||
# define SIZEOF_STRUCT_USTAT 20
|
||||
# elif defined(__loongarch__)
|
||||
// Not used. The minimum Glibc version available for LoongArch is 2.36
|
||||
// so ustat() wrapper is already gone.
|
||||
# define SIZEOF_STRUCT_USTAT 0
|
||||
# else
|
||||
# error Unknown size of struct ustat
|
||||
# endif
|
||||
|
|
|
@ -122,6 +122,9 @@ const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
|
|||
# elif defined(__hexagon__)
|
||||
const unsigned struct_kernel_stat_sz = 128;
|
||||
const unsigned struct_kernel_stat64_sz = 0;
|
||||
# elif defined(__loongarch__)
|
||||
const unsigned struct_kernel_stat_sz = 128;
|
||||
const unsigned struct_kernel_stat64_sz = 0;
|
||||
# endif
|
||||
struct __sanitizer_perf_event_attr {
|
||||
unsigned type;
|
||||
|
@ -142,7 +145,7 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
|
|||
|
||||
#if SANITIZER_LINUX
|
||||
|
||||
#if defined(__powerpc64__) || defined(__s390__)
|
||||
#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__)
|
||||
const unsigned struct___old_kernel_stat_sz = 0;
|
||||
#elif !defined(__sparc__)
|
||||
const unsigned struct___old_kernel_stat_sz = 32;
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Implementations of internal_syscall and internal_iserror for
|
||||
// Linux/loongarch64.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// About local register variables:
|
||||
// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
|
||||
//
|
||||
// Kernel ABI...
|
||||
// syscall number is passed in a7
|
||||
// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
|
||||
// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
|
||||
// are passed in: a0-a7 (confirmed by inspecting glibc sources).
|
||||
#define SYSCALL(name) __NR_##name
|
||||
|
||||
#define INTERNAL_SYSCALL_CLOBBERS "memory"
|
||||
|
||||
static uptr __internal_syscall(u64 nr) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0");
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "=r"(a0)
|
||||
: "r"(a7)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall0(n) (__internal_syscall)(n)
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall2(n, a1, a2) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
register u64 a2 asm("a2") = arg3;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1), "r"(a2)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall3(n, a1, a2, a3) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
|
||||
u64 arg4) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
register u64 a2 asm("a2") = arg3;
|
||||
register u64 a3 asm("a3") = arg4;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1), "r"(a2), "r"(a3)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall4(n, a1, a2, a3, a4) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
|
||||
long arg5) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
register u64 a2 asm("a2") = arg3;
|
||||
register u64 a3 asm("a3") = arg4;
|
||||
register u64 a4 asm("a4") = arg5;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
|
||||
(u64)(a5))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
|
||||
long arg5, long arg6) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
register u64 a2 asm("a2") = arg3;
|
||||
register u64 a3 asm("a3") = arg4;
|
||||
register u64 a4 asm("a4") = arg5;
|
||||
register u64 a5 asm("a5") = arg6;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
|
||||
(u64)(a5), (long)(a6))
|
||||
|
||||
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
|
||||
long arg5, long arg6, long arg7) {
|
||||
register u64 a7 asm("a7") = nr;
|
||||
register u64 a0 asm("a0") = arg1;
|
||||
register u64 a1 asm("a1") = arg2;
|
||||
register u64 a2 asm("a2") = arg3;
|
||||
register u64 a3 asm("a3") = arg4;
|
||||
register u64 a4 asm("a4") = arg5;
|
||||
register u64 a5 asm("a5") = arg6;
|
||||
register u64 a6 asm("a6") = arg7;
|
||||
__asm__ volatile("syscall 0\n\t"
|
||||
: "+r"(a0)
|
||||
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
|
||||
"r"(a6)
|
||||
: INTERNAL_SYSCALL_CLOBBERS);
|
||||
return a0;
|
||||
}
|
||||
#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
|
||||
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
|
||||
(u64)(a5), (long)(a6), (long)(a7))
|
||||
|
||||
#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
|
||||
#define __SYSCALL_NARGS(...) \
|
||||
__SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
|
||||
#define __SYSCALL_CONCAT_X(a, b) a##b
|
||||
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
|
||||
#define __SYSCALL_DISP(b, ...) \
|
||||
__SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
|
||||
|
||||
// Helper function used to avoid clobbering of errno.
|
||||
bool internal_iserror(uptr retval, int *internal_errno) {
|
||||
if (retval >= (uptr)-4095) {
|
||||
if (internal_errno)
|
||||
*internal_errno = -retval;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
Loading…
Reference in New Issue