2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:58:39 +08:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2019-08-27 03:08:10 +08:00
|
|
|
#include "xfs.h"
|
2006-10-20 14:28:16 +08:00
|
|
|
#include <linux/backing-dev.h>
|
2024-06-11 20:08:33 +08:00
|
|
|
#include <linux/nmi.h>
|
2011-03-07 07:00:35 +08:00
|
|
|
#include "xfs_message.h"
|
2019-08-27 03:08:10 +08:00
|
|
|
#include "xfs_trace.h"
|
2024-06-11 20:08:33 +08:00
|
|
|
#include "xfs_linux.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
void *
|
2012-04-02 18:24:04 +08:00
|
|
|
kmem_alloc(size_t size, xfs_km_flags_t flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-21 15:20:48 +08:00
|
|
|
int retries = 0;
|
|
|
|
gfp_t lflags = kmem_flags_convert(flags);
|
|
|
|
void *ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-08-27 03:08:10 +08:00
|
|
|
trace_kmem_alloc(size, flags, _RET_IP_);
|
|
|
|
|
2024-06-11 20:08:33 +08:00
|
|
|
if (xfs_kmem_alloc_by_vmalloc &&
|
|
|
|
size > (PAGE_SIZE * xfs_kmem_alloc_by_vmalloc) &&
|
|
|
|
xfs_kmem_alloc_large_dump_stack) {
|
|
|
|
xfs_warn(NULL, "%s size: %ld large than %ld\n",
|
|
|
|
__func__, size, PAGE_SIZE * xfs_kmem_alloc_by_vmalloc);
|
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
2024-06-11 20:08:33 +08:00
|
|
|
if (xfs_kmem_alloc_by_vmalloc && (size > PAGE_SIZE * xfs_kmem_alloc_by_vmalloc))
|
|
|
|
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
|
|
|
|
else
|
|
|
|
ptr = kmalloc(size, lflags);
|
2019-08-27 03:06:22 +08:00
|
|
|
if (ptr || (flags & KM_MAYFAIL))
|
2005-04-17 06:20:36 +08:00
|
|
|
return ptr;
|
2024-06-11 20:08:33 +08:00
|
|
|
if (!(++retries % 100)) {
|
2011-03-07 07:00:35 +08:00
|
|
|
xfs_err(NULL,
|
2024-06-11 20:08:33 +08:00
|
|
|
"%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x), flags: 0x%x",
|
2015-10-12 12:41:29 +08:00
|
|
|
current->comm, current->pid,
|
2024-06-11 20:08:33 +08:00
|
|
|
(unsigned int)size, __func__, lflags, flags);
|
|
|
|
if (xfs_kmem_fail_dump_stack == 1)
|
|
|
|
dump_stack();
|
|
|
|
else if (xfs_kmem_fail_dump_stack == 2)
|
|
|
|
trigger_all_cpu_backtrace();
|
|
|
|
else if (xfs_kmem_fail_dump_stack == 3)
|
|
|
|
show_mem(0, NULL);
|
|
|
|
}
|
2009-07-09 20:52:32 +08:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
xfs: add kmem_alloc_io()
Memory we use to submit for IO needs strict alignment to the
underlying driver contraints. Worst case, this is 512 bytes. Given
that all allocations for IO are always a power of 2 multiple of 512
bytes, the kernel heap provides natural alignment for objects of
these sizes and that suffices.
Until, of course, memory debugging of some kind is turned on (e.g.
red zones, poisoning, KASAN) and then the alignment of the heap
objects is thrown out the window. Then we get weird IO errors and
data corruption problems because drivers don't validate alignment
and do the wrong thing when passed unaligned memory buffers in bios.
TO fix this, introduce kmem_alloc_io(), which will guaranteeat least
512 byte alignment of buffers for IO, even if memory debugging
options are turned on. It is assumed that the minimum allocation
size will be 512 bytes, and that sizes will be power of 2 mulitples
of 512 bytes.
Use this everywhere we allocate buffers for IO.
This no longer fails with log recovery errors when KASAN is enabled
due to the brd driver not handling unaligned memory buffers:
# mkfs.xfs -f /dev/ram0 ; mount /dev/ram0 /mnt/test
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-27 03:08:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
|
|
|
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
|
|
|
|
* we need to tell memory reclaim that we are in such a context via
|
|
|
|
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
|
|
|
|
* and potentially deadlocking.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
__kmem_vmalloc(size_t size, xfs_km_flags_t flags)
|
2013-09-02 18:53:00 +08:00
|
|
|
{
|
2017-05-04 05:53:19 +08:00
|
|
|
unsigned nofs_flag = 0;
|
2013-09-02 18:53:00 +08:00
|
|
|
void *ptr;
|
xfs: add kmem_alloc_io()
Memory we use to submit for IO needs strict alignment to the
underlying driver contraints. Worst case, this is 512 bytes. Given
that all allocations for IO are always a power of 2 multiple of 512
bytes, the kernel heap provides natural alignment for objects of
these sizes and that suffices.
Until, of course, memory debugging of some kind is turned on (e.g.
red zones, poisoning, KASAN) and then the alignment of the heap
objects is thrown out the window. Then we get weird IO errors and
data corruption problems because drivers don't validate alignment
and do the wrong thing when passed unaligned memory buffers in bios.
TO fix this, introduce kmem_alloc_io(), which will guaranteeat least
512 byte alignment of buffers for IO, even if memory debugging
options are turned on. It is assumed that the minimum allocation
size will be 512 bytes, and that sizes will be power of 2 mulitples
of 512 bytes.
Use this everywhere we allocate buffers for IO.
This no longer fails with log recovery errors when KASAN is enabled
due to the brd driver not handling unaligned memory buffers:
# mkfs.xfs -f /dev/ram0 ; mount /dev/ram0 /mnt/test
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-27 03:08:39 +08:00
|
|
|
gfp_t lflags = kmem_flags_convert(flags);
|
2014-03-07 13:19:14 +08:00
|
|
|
|
2017-05-04 05:53:19 +08:00
|
|
|
if (flags & KM_NOFS)
|
|
|
|
nofs_flag = memalloc_nofs_save();
|
2014-03-07 13:19:14 +08:00
|
|
|
|
2018-03-07 09:03:28 +08:00
|
|
|
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
|
2014-03-07 13:19:14 +08:00
|
|
|
|
2017-05-04 05:53:19 +08:00
|
|
|
if (flags & KM_NOFS)
|
|
|
|
memalloc_nofs_restore(nofs_flag);
|
2014-03-07 13:19:14 +08:00
|
|
|
|
|
|
|
return ptr;
|
2013-09-02 18:53:00 +08:00
|
|
|
}
|
|
|
|
|
xfs: add kmem_alloc_io()
Memory we use to submit for IO needs strict alignment to the
underlying driver contraints. Worst case, this is 512 bytes. Given
that all allocations for IO are always a power of 2 multiple of 512
bytes, the kernel heap provides natural alignment for objects of
these sizes and that suffices.
Until, of course, memory debugging of some kind is turned on (e.g.
red zones, poisoning, KASAN) and then the alignment of the heap
objects is thrown out the window. Then we get weird IO errors and
data corruption problems because drivers don't validate alignment
and do the wrong thing when passed unaligned memory buffers in bios.
TO fix this, introduce kmem_alloc_io(), which will guaranteeat least
512 byte alignment of buffers for IO, even if memory debugging
options are turned on. It is assumed that the minimum allocation
size will be 512 bytes, and that sizes will be power of 2 mulitples
of 512 bytes.
Use this everywhere we allocate buffers for IO.
This no longer fails with log recovery errors when KASAN is enabled
due to the brd driver not handling unaligned memory buffers:
# mkfs.xfs -f /dev/ram0 ; mount /dev/ram0 /mnt/test
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-27 03:08:39 +08:00
|
|
|
/*
|
|
|
|
* Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
|
|
|
|
* to the @align_mask. We only guarantee alignment up to page size, we'll clamp
|
|
|
|
* alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
|
|
|
|
* aligned region.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
trace_kmem_alloc_io(size, flags, _RET_IP_);
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
|
|
|
|
align_mask = PAGE_SIZE - 1;
|
|
|
|
|
|
|
|
ptr = kmem_alloc(size, flags | KM_MAYFAIL);
|
|
|
|
if (ptr) {
|
|
|
|
if (!((uintptr_t)ptr & align_mask))
|
|
|
|
return ptr;
|
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
return __kmem_vmalloc(size, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
kmem_alloc_large(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
trace_kmem_alloc_large(size, flags, _RET_IP_);
|
|
|
|
|
|
|
|
ptr = kmem_alloc(size, flags | KM_MAYFAIL);
|
|
|
|
if (ptr)
|
|
|
|
return ptr;
|
|
|
|
return __kmem_vmalloc(size, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
void *
|
2016-04-06 07:47:01 +08:00
|
|
|
kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-04-06 07:47:01 +08:00
|
|
|
int retries = 0;
|
|
|
|
gfp_t lflags = kmem_flags_convert(flags);
|
|
|
|
void *ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-08-27 03:08:10 +08:00
|
|
|
trace_kmem_realloc(newsize, flags, _RET_IP_);
|
|
|
|
|
2016-04-06 07:47:01 +08:00
|
|
|
do {
|
|
|
|
ptr = krealloc(old, newsize, lflags);
|
2019-08-27 03:06:22 +08:00
|
|
|
if (ptr || (flags & KM_MAYFAIL))
|
2016-04-06 07:47:01 +08:00
|
|
|
return ptr;
|
|
|
|
if (!(++retries % 100))
|
|
|
|
xfs_err(NULL,
|
|
|
|
"%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
|
|
|
|
current->comm, current->pid,
|
|
|
|
newsize, __func__, lflags);
|
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
|
|
|
} while (1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2012-04-02 18:24:04 +08:00
|
|
|
kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-10-21 15:20:48 +08:00
|
|
|
int retries = 0;
|
|
|
|
gfp_t lflags = kmem_flags_convert(flags);
|
|
|
|
void *ptr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-08-27 03:08:10 +08:00
|
|
|
trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
ptr = kmem_cache_alloc(zone, lflags);
|
2019-08-27 03:06:22 +08:00
|
|
|
if (ptr || (flags & KM_MAYFAIL))
|
2005-04-17 06:20:36 +08:00
|
|
|
return ptr;
|
2024-06-11 20:08:33 +08:00
|
|
|
if (!(++retries % 100)) {
|
2011-03-07 07:00:35 +08:00
|
|
|
xfs_err(NULL,
|
2024-06-11 20:08:33 +08:00
|
|
|
"%s(%u) possible memory allocation deadlock in %s (mode:0x%x), size: 0x%x, flags: 0x%x",
|
2015-10-12 12:41:29 +08:00
|
|
|
current->comm, current->pid,
|
2024-06-11 20:08:33 +08:00
|
|
|
__func__, lflags, kmem_cache_size(zone), flags);
|
|
|
|
if (xfs_kmem_fail_dump_stack == 1)
|
|
|
|
dump_stack();
|
|
|
|
else if (xfs_kmem_fail_dump_stack == 2)
|
|
|
|
trigger_all_cpu_backtrace();
|
|
|
|
else if (xfs_kmem_fail_dump_stack == 3)
|
|
|
|
show_mem(0, NULL);
|
|
|
|
}
|
2009-07-09 20:52:32 +08:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/50);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (1);
|
|
|
|
}
|