Eight fixes, all cc:stable. One is for gcov and the remainder are MM.
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY6TcIgAKCRDdBJ7gKXxA ji/zAQDucpSw+HKksgDpO385EAdF4gQgYDi06zu/vjpF7Hd4KAEAoIX1ygHqHy3u z9xuulA9q84COV48Is9cU7eiijd0aQo= =QlXq -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-12-22-14-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull hotfixes from Andrew Morton: "Eight fixes, all cc:stable. One is for gcov and the remainder are MM" * tag 'mm-hotfixes-stable-2022-12-22-14-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: gcov: add support for checksum field test_maple_tree: add test for mas_spanning_rebalance() on insufficient data maple_tree: fix mas_spanning_rebalance() on insufficient data hugetlb: really allocate vma lock for all sharable vmas kmsan: export kmsan_handle_urb kmsan: include linux/vmalloc.h mm/mempolicy: fix memory leak in set_mempolicy_home_node system call mm, mremap: fix mremap() expanding vma with addr inside vma
This commit is contained in:
commit
699aee7b47
|
@ -82,6 +82,7 @@ struct gcov_fn_info {
|
|||
* @version: gcov version magic indicating the gcc version used for compilation
|
||||
* @next: list head for a singly-linked list
|
||||
* @stamp: uniquifying time stamp
|
||||
* @checksum: unique object checksum
|
||||
* @filename: name of the associated gcov data file
|
||||
* @merge: merge functions (null for unused counter type)
|
||||
* @n_functions: number of instrumented functions
|
||||
|
@ -94,6 +95,10 @@ struct gcov_info {
|
|||
unsigned int version;
|
||||
struct gcov_info *next;
|
||||
unsigned int stamp;
|
||||
/* Since GCC 12.1 a checksum field is added. */
|
||||
#if (__GNUC__ >= 12)
|
||||
unsigned int checksum;
|
||||
#endif
|
||||
const char *filename;
|
||||
void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
|
||||
unsigned int n_functions;
|
||||
|
|
|
@ -2994,7 +2994,9 @@ static int mas_spanning_rebalance(struct ma_state *mas,
|
|||
mast->free = &free;
|
||||
mast->destroy = &destroy;
|
||||
l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
|
||||
if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) &&
|
||||
|
||||
/* Check if this is not root and has sufficient data. */
|
||||
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
|
||||
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
|
||||
mast_spanning_rebalance(mast);
|
||||
|
||||
|
|
|
@ -2498,6 +2498,25 @@ static noinline void check_dup(struct maple_tree *mt)
|
|||
}
|
||||
}
|
||||
|
||||
static noinline void check_bnode_min_spanning(struct maple_tree *mt)
|
||||
{
|
||||
int i = 50;
|
||||
MA_STATE(mas, mt, 0, 0);
|
||||
|
||||
mt_set_non_kernel(9999);
|
||||
mas_lock(&mas);
|
||||
do {
|
||||
mas_set_range(&mas, i*10, i*10+9);
|
||||
mas_store(&mas, check_bnode_min_spanning);
|
||||
} while (i--);
|
||||
|
||||
mas_set_range(&mas, 240, 509);
|
||||
mas_store(&mas, NULL);
|
||||
mas_unlock(&mas);
|
||||
mas_destroy(&mas);
|
||||
mt_set_non_kernel(0);
|
||||
}
|
||||
|
||||
static DEFINE_MTREE(tree);
|
||||
static int maple_tree_seed(void)
|
||||
{
|
||||
|
@ -2742,6 +2761,10 @@ static int maple_tree_seed(void)
|
|||
check_dup(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
|
||||
check_bnode_min_spanning(&tree);
|
||||
mtree_destroy(&tree);
|
||||
|
||||
#if defined(BENCH)
|
||||
skip:
|
||||
#endif
|
||||
|
|
333
mm/hugetlb.c
333
mm/hugetlb.c
|
@ -255,6 +255,152 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
|
|||
return subpool_inode(file_inode(vma->vm_file));
|
||||
}
|
||||
|
||||
/*
|
||||
* hugetlb vma_lock helper routines
|
||||
*/
|
||||
static bool __vma_shareable_lock(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
|
||||
vma->vm_private_data;
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_read(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_read(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_write(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_write(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
if (!__vma_shareable_lock(vma))
|
||||
return 1;
|
||||
|
||||
return down_write_trylock(&vma_lock->rw_sema);
|
||||
}
|
||||
|
||||
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
lockdep_assert_held(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_release(struct kref *kref)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock = container_of(kref,
|
||||
struct hugetlb_vma_lock, refs);
|
||||
|
||||
kfree(vma_lock);
|
||||
}
|
||||
|
||||
static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
|
||||
{
|
||||
struct vm_area_struct *vma = vma_lock->vma;
|
||||
|
||||
/*
|
||||
* vma_lock structure may or not be released as a result of put,
|
||||
* it certainly will no longer be attached to vma so clear pointer.
|
||||
* Semaphore synchronizes access to vma_lock->vma field.
|
||||
*/
|
||||
vma_lock->vma = NULL;
|
||||
vma->vm_private_data = NULL;
|
||||
up_write(&vma_lock->rw_sema);
|
||||
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
|
||||
}
|
||||
|
||||
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_lock(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
__hugetlb_vma_unlock_write_put(vma_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* Only present in sharable vmas.
|
||||
*/
|
||||
if (!vma || !__vma_shareable_lock(vma))
|
||||
return;
|
||||
|
||||
if (vma->vm_private_data) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_write(&vma_lock->rw_sema);
|
||||
__hugetlb_vma_unlock_write_put(vma_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock;
|
||||
|
||||
/* Only establish in (flags) sharable vmas */
|
||||
if (!vma || !(vma->vm_flags & VM_MAYSHARE))
|
||||
return;
|
||||
|
||||
/* Should never get here with non-NULL vm_private_data */
|
||||
if (vma->vm_private_data)
|
||||
return;
|
||||
|
||||
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
|
||||
if (!vma_lock) {
|
||||
/*
|
||||
* If we can not allocate structure, then vma can not
|
||||
* participate in pmd sharing. This is only a possible
|
||||
* performance enhancement and memory saving issue.
|
||||
* However, the lock is also used to synchronize page
|
||||
* faults with truncation. If the lock is not present,
|
||||
* unlikely races could leave pages in a file past i_size
|
||||
* until the file is removed. Warn in the unlikely case of
|
||||
* allocation failure.
|
||||
*/
|
||||
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kref_init(&vma_lock->refs);
|
||||
init_rwsem(&vma_lock->rw_sema);
|
||||
vma_lock->vma = vma;
|
||||
vma->vm_private_data = vma_lock;
|
||||
}
|
||||
|
||||
/* Helper that removes a struct file_region from the resv_map cache and returns
|
||||
* it for use.
|
||||
*/
|
||||
|
@ -6613,7 +6759,8 @@ bool hugetlb_reserve_pages(struct inode *inode,
|
|||
}
|
||||
|
||||
/*
|
||||
* vma specific semaphore used for pmd sharing synchronization
|
||||
* vma specific semaphore used for pmd sharing and fault/truncation
|
||||
* synchronization
|
||||
*/
|
||||
hugetlb_vma_lock_alloc(vma);
|
||||
|
||||
|
@ -6869,149 +7016,6 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
|
|||
*end = ALIGN(*end, PUD_SIZE);
|
||||
}
|
||||
|
||||
static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
|
||||
vma->vm_private_data;
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_read(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_read(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_write(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
up_write(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
if (!__vma_shareable_flags_pmd(vma))
|
||||
return 1;
|
||||
|
||||
return down_write_trylock(&vma_lock->rw_sema);
|
||||
}
|
||||
|
||||
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
lockdep_assert_held(&vma_lock->rw_sema);
|
||||
}
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_release(struct kref *kref)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock = container_of(kref,
|
||||
struct hugetlb_vma_lock, refs);
|
||||
|
||||
kfree(vma_lock);
|
||||
}
|
||||
|
||||
static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
|
||||
{
|
||||
struct vm_area_struct *vma = vma_lock->vma;
|
||||
|
||||
/*
|
||||
* vma_lock structure may or not be released as a result of put,
|
||||
* it certainly will no longer be attached to vma so clear pointer.
|
||||
* Semaphore synchronizes access to vma_lock->vma field.
|
||||
*/
|
||||
vma_lock->vma = NULL;
|
||||
vma->vm_private_data = NULL;
|
||||
up_write(&vma_lock->rw_sema);
|
||||
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
|
||||
}
|
||||
|
||||
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__vma_shareable_flags_pmd(vma)) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
__hugetlb_vma_unlock_write_put(vma_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
|
||||
{
|
||||
/*
|
||||
* Only present in sharable vmas.
|
||||
*/
|
||||
if (!vma || !__vma_shareable_flags_pmd(vma))
|
||||
return;
|
||||
|
||||
if (vma->vm_private_data) {
|
||||
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
|
||||
|
||||
down_write(&vma_lock->rw_sema);
|
||||
__hugetlb_vma_unlock_write_put(vma_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
|
||||
{
|
||||
struct hugetlb_vma_lock *vma_lock;
|
||||
|
||||
/* Only establish in (flags) sharable vmas */
|
||||
if (!vma || !(vma->vm_flags & VM_MAYSHARE))
|
||||
return;
|
||||
|
||||
/* Should never get here with non-NULL vm_private_data */
|
||||
if (vma->vm_private_data)
|
||||
return;
|
||||
|
||||
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
|
||||
if (!vma_lock) {
|
||||
/*
|
||||
* If we can not allocate structure, then vma can not
|
||||
* participate in pmd sharing. This is only a possible
|
||||
* performance enhancement and memory saving issue.
|
||||
* However, the lock is also used to synchronize page
|
||||
* faults with truncation. If the lock is not present,
|
||||
* unlikely races could leave pages in a file past i_size
|
||||
* until the file is removed. Warn in the unlikely case of
|
||||
* allocation failure.
|
||||
*/
|
||||
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kref_init(&vma_lock->refs);
|
||||
init_rwsem(&vma_lock->rw_sema);
|
||||
vma_lock->vma = vma;
|
||||
vma->vm_private_data = vma_lock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
|
||||
* and returns the corresponding pte. While this is not necessary for the
|
||||
|
@ -7100,47 +7104,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
|
||||
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
|
||||
|
||||
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
void hugetlb_vma_lock_release(struct kref *kref)
|
||||
{
|
||||
}
|
||||
|
||||
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long addr, pud_t *pud)
|
||||
{
|
||||
|
|
|
@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
|
|||
urb->transfer_buffer_length,
|
||||
/*checked*/ false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsan_handle_urb);
|
||||
|
||||
static void kmsan_handle_dma_page(const void *addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <trace/events/printk.h>
|
||||
|
||||
static DEFINE_PER_CPU(int, per_cpu_var);
|
||||
|
|
|
@ -1540,6 +1540,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
|
|||
* the home node for vmas we already updated before.
|
||||
*/
|
||||
if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
|
||||
mpol_put(new);
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1016,7 +1016,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
|||
long pages = (new_len - old_len) >> PAGE_SHIFT;
|
||||
unsigned long extension_start = addr + old_len;
|
||||
unsigned long extension_end = addr + new_len;
|
||||
pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT);
|
||||
pgoff_t extension_pgoff = vma->vm_pgoff +
|
||||
((extension_start - vma->vm_start) >> PAGE_SHIFT);
|
||||
|
||||
if (vma->vm_flags & VM_ACCOUNT) {
|
||||
if (security_vm_enough_memory_mm(mm, pages)) {
|
||||
|
|
Loading…
Reference in New Issue