mm: remove kern_addr_valid() completely
Most architectures (except arm64/x86/sparc) simply return 1 for kern_addr_valid(), which is only used in read_kcore(), and it calls copy_from_kernel_nofault() which could check whether the address is a valid kernel address. So as there is no need for kern_addr_valid(), let's remove it. Link: https://lkml.kernel.org/r/20221018074014.185687-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Heiko Carstens <hca@linux.ibm.com> [s390] Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Helge Deller <deller@gmx.de> [parisc] Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Acked-by: Guo Ren <guoren@kernel.org> [csky] Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: <aou@eecs.berkeley.edu> Cc: Borislav Petkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Chris Zankel <chris@zankel.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Jonas Bonn <jonas@southpole.se> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@rivosinc.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Will Deacon <will@kernel.org> Cc: Xuerui Wang <kernel@xen0n.name> Cc: Yoshinori Sato <ysato@users.osdn.me> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
65f199b2b4
commit
e025ab842e
|
@ -313,8 +313,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pmd_ERROR(e) \
|
||||
|
|
|
@ -120,8 +120,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#include <asm/hugepage.h>
|
||||
#endif
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#define pgd_none(pgd) (0)
|
||||
#define pgd_bad(pgd) (0)
|
||||
#define pgd_clear(pgdp)
|
||||
#define kern_addr_valid(addr) (1)
|
||||
/* FIXME */
|
||||
/*
|
||||
* PMD_SHIFT determines the size of the area a second-level page table can map
|
||||
* PGDIR_SHIFT determines what a third-level page table entry can map
|
||||
|
|
|
@ -298,10 +298,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
*/
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
|
||||
|
||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||
/* FIXME: this is not correct */
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
/*
|
||||
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
|
||||
*/
|
||||
|
|
|
@ -1021,8 +1021,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
|||
*/
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
|
||||
|
||||
extern int kern_addr_valid(unsigned long addr);
|
||||
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
|
||||
#define __HAVE_ARCH_PREPARE_TO_SWAP
|
||||
|
|
|
@ -814,53 +814,6 @@ void __init paging_init(void)
|
|||
create_idmap();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether a kernel address is valid (derived from arch/x86/).
|
||||
*/
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgdp;
|
||||
p4d_t *p4dp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
addr = arch_kasan_reset_tag(addr);
|
||||
if ((((long)addr) >> VA_BITS) != -1UL)
|
||||
return 0;
|
||||
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return 0;
|
||||
|
||||
p4dp = p4d_offset(pgdp, addr);
|
||||
if (p4d_none(READ_ONCE(*p4dp)))
|
||||
return 0;
|
||||
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
|
||||
if (pud_sect(pud))
|
||||
return pfn_valid(pud_pfn(pud));
|
||||
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_sect(pmd))
|
||||
return pfn_valid(pmd_pfn(pmd));
|
||||
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (pte_none(pte))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(pte_pfn(pte));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_hotplug_page_range(struct page *page, size_t size,
|
||||
struct vmem_altmap *altmap)
|
||||
|
|
|
@ -201,8 +201,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
|
||||
/*
|
||||
* This function is used to determine if a linear map page has been marked as
|
||||
* not-valid. Walk the page table and check the PTE_VALID bit. This is based
|
||||
* on kern_addr_valid(), which almost does what we need.
|
||||
* not-valid. Walk the page table and check the PTE_VALID bit.
|
||||
*
|
||||
* Because this is only called on the kernel linear map, p?d_sect() implies
|
||||
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
|
||||
|
|
|
@ -249,9 +249,6 @@ extern void paging_init(void);
|
|||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte);
|
||||
|
||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
|
|
|
@ -131,13 +131,6 @@ static inline void clear_page(void *page)
|
|||
|
||||
#define page_to_virt(page) __va(page_to_phys(page))
|
||||
|
||||
/*
|
||||
* For port to Hexagon Virtual Machine, MAYBE we check for attempts
|
||||
* to reference reserved HVM space, but in any case, the VM will be
|
||||
* protected.
|
||||
*/
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#include <asm/mem-layout.h>
|
||||
#include <asm-generic/memory_model.h>
|
||||
/* XXX Todo: implement assembly-optimized version of getorder. */
|
||||
|
|
|
@ -181,22 +181,6 @@ ia64_phys_addr_valid (unsigned long addr)
|
|||
return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
|
||||
* memory. For the return value to be meaningful, ADDR must be >=
|
||||
* PAGE_OFFSET. This operation can be relatively expensive (e.g.,
|
||||
* require a hash-, or multi-level tree-lookup or something of that
|
||||
* sort) but it guarantees to return TRUE only if accessing the page
|
||||
* at that address does not cause an error. Note that there may be
|
||||
* addresses for which kern_addr_valid() returns FALSE even though an
|
||||
* access would not cause an error (e.g., this is typically true for
|
||||
* memory mapped I/O regions.
|
||||
*
|
||||
* XXX Need to implement this for IA-64.
|
||||
*/
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
|
||||
/*
|
||||
* Now come the defines and routines to manage and access the three-level
|
||||
* page table.
|
||||
|
|
|
@ -421,8 +421,6 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
|
|||
__update_tlb(vma, address, (pte_t *)pmdp);
|
||||
}
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
|
||||
|
|
|
@ -145,8 +145,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
/* MMU-specific headers */
|
||||
|
||||
#ifdef CONFIG_SUN3
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#define pgd_none(pgd) (0)
|
||||
#define pgd_bad(pgd) (0)
|
||||
#define pgd_clear(pgdp)
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#define pmd_offset(a, b) ((void *)0)
|
||||
|
||||
#define PAGE_NONE __pgprot(0)
|
||||
|
|
|
@ -416,9 +416,6 @@ extern unsigned long iopa(unsigned long addr);
|
|||
#define IOMAP_NOCACHE_NONSER 2
|
||||
#define IOMAP_NO_COPYBACK 3
|
||||
|
||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
void do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
unsigned long error_code);
|
||||
|
||||
|
|
|
@ -550,8 +550,6 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
|
|||
__update_tlb(vma, address, pte);
|
||||
}
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
/*
|
||||
* Allow physical addresses to be fixed up to help 36-bit peripherals.
|
||||
*/
|
||||
|
|
|
@ -249,8 +249,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|||
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
extern void __init paging_init(void);
|
||||
extern void __init mmu_init(void);
|
||||
|
||||
|
|
|
@ -395,8 +395,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
typedef pte_t *pte_addr_t;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -23,21 +23,6 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
/*
|
||||
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
|
||||
* memory. For the return value to be meaningful, ADDR must be >=
|
||||
* PAGE_OFFSET. This operation can be relatively expensive (e.g.,
|
||||
* require a hash-, or multi-level tree-lookup or something of that
|
||||
* sort) but it guarantees to return TRUE only if accessing the page
|
||||
* at that address does not cause an error. Note that there may be
|
||||
* addresses for which kern_addr_valid() returns FALSE even though an
|
||||
* access would not cause an error (e.g., this is typically true for
|
||||
* memory mapped I/O regions.
|
||||
*
|
||||
* XXX Need to implement this for parisc.
|
||||
*/
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
/* This is for the serialization of PxTLB broadcasts. At least on the N class
|
||||
* systems, only one PxTLB inter processor broadcast can be active at any one
|
||||
* time on the Merced bus. */
|
||||
|
|
|
@ -81,13 +81,6 @@ void poking_init(void);
|
|||
extern unsigned long ioremap_bot;
|
||||
extern const pgprot_t protection_map[16];
|
||||
|
||||
/*
|
||||
* kern_addr_valid is intended to indicate whether an address is a valid
|
||||
* kernel address. Most 32-bit archs define it as always true (like this)
|
||||
* but most 64-bit archs actually perform a test. What should we do here?
|
||||
*/
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define pmd_large(pmd) 0
|
||||
#endif
|
||||
|
|
|
@ -801,8 +801,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
|||
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
||||
#define kern_addr_valid(addr) (1) /* FIXME */
|
||||
|
||||
extern char _start[];
|
||||
extern void *_dtb_early_va;
|
||||
extern uintptr_t _dtb_early_pa;
|
||||
|
|
|
@ -1773,8 +1773,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
extern int vmem_add_mapping(unsigned long start, unsigned long size);
|
||||
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
|
||||
extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
|
||||
|
|
|
@ -92,8 +92,6 @@ static inline unsigned long phys_addr_mask(void)
|
|||
|
||||
typedef pte_t *pte_addr_t;
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
||||
|
||||
struct vm_area_struct;
|
||||
|
|
|
@ -368,12 +368,6 @@ __get_iospace (unsigned long addr)
|
|||
}
|
||||
}
|
||||
|
||||
extern unsigned long *sparc_valid_addr_bitmap;
|
||||
|
||||
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
|
||||
#define kern_addr_valid(addr) \
|
||||
(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
|
||||
|
||||
/*
|
||||
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
|
||||
* its high 4 bits. These macros/functions put it there or get it from there.
|
||||
|
|
|
@ -37,8 +37,7 @@
|
|||
|
||||
#include "mm_32.h"
|
||||
|
||||
unsigned long *sparc_valid_addr_bitmap;
|
||||
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
|
||||
static unsigned long *sparc_valid_addr_bitmap;
|
||||
|
||||
unsigned long phys_base;
|
||||
EXPORT_SYMBOL(phys_base);
|
||||
|
|
|
@ -1667,7 +1667,6 @@ bool kern_addr_valid(unsigned long addr)
|
|||
|
||||
return pfn_valid(pte_pfn(*pte));
|
||||
}
|
||||
EXPORT_SYMBOL(kern_addr_valid);
|
||||
|
||||
static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
|
||||
unsigned long vend,
|
||||
|
|
|
@ -298,8 +298,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
|||
((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
/* Clear a kernel PTE and flush it from the TLB */
|
||||
#define kpte_clear_flush(ptep, vaddr) \
|
||||
do { \
|
||||
|
|
|
@ -47,15 +47,6 @@ do { \
|
|||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* kern_addr_valid() is (1) for FLATMEM and (0) for SPARSEMEM
|
||||
*/
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#else
|
||||
#define kern_addr_valid(kaddr) (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is used to calculate the .brk reservation for initial pagetables.
|
||||
* Enough space is reserved to allocate pagetables sufficient to cover all
|
||||
|
|
|
@ -240,7 +240,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
|
||||
|
||||
extern int kern_addr_valid(unsigned long addr);
|
||||
extern void cleanup_highmap(void);
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
|
|
@ -1416,47 +1416,6 @@ void mark_rodata_ro(void)
|
|||
debug_checkwx();
|
||||
}
|
||||
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (above != 0 && above != -1UL)
|
||||
return 0;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
return 0;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_present(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_present(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
return pfn_valid(pmd_pfn(*pmd));
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if (pte_none(*pte))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(pte_pfn(*pte));
|
||||
}
|
||||
|
||||
/*
|
||||
* Block size is the minimum amount of memory which can be hotplugged or
|
||||
* hotremoved. It must be power of two and must be equal or larger than
|
||||
|
|
|
@ -386,8 +386,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|||
|
||||
#else
|
||||
|
||||
#define kern_addr_valid(addr) (1)
|
||||
|
||||
extern void update_mmu_cache(struct vm_area_struct * vma,
|
||||
unsigned long address, pte_t *ptep);
|
||||
|
||||
|
|
|
@ -540,25 +540,17 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|||
fallthrough;
|
||||
case KCORE_VMEMMAP:
|
||||
case KCORE_TEXT:
|
||||
if (kern_addr_valid(start)) {
|
||||
/*
|
||||
* Using bounce buffer to bypass the
|
||||
* hardened user copy kernel text checks.
|
||||
*/
|
||||
if (copy_from_kernel_nofault(buf, (void *)start,
|
||||
tsz)) {
|
||||
if (clear_user(buffer, tsz)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (copy_to_user(buffer, buf, tsz)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Using bounce buffer to bypass the
|
||||
* hardened user copy kernel text checks.
|
||||
*/
|
||||
if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
|
||||
if (clear_user(buffer, tsz)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (clear_user(buffer, tsz)) {
|
||||
if (copy_to_user(buffer, buf, tsz)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue