hotfixes for 5.18-rc7
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYnvwxgAKCRDdBJ7gKXxA jhymAQDvHnFT3F5ydvBqApbzrQRUk/+fkkQSrF/xYawknZNgkAEA6Tnh9XqYplJN bbmml6HTVvDjprEOCGakY/Kyz7qmdQ0= =SMJQ -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-05-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Seven MM fixes, three of which address issues added in the most recent merge window, four of which are cc:stable. Three non-MM fixes, none very serious" [ And yes, that's a real pull request from Andrew, not me creating a branch from emailed patches. Woo-hoo! ] * tag 'mm-hotfixes-stable-2022-05-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: MAINTAINERS: add a mailing list for DAMON development selftests: vm: Makefile: rename TARGETS to VMTARGETS mm/kfence: reset PG_slab and memcg_data before freeing __kfence_pool mailmap: add entry for martyna.szapar-mudlaw@intel.com arm[64]/memremap: don't abuse pfn_valid() to ensure presence of linear map procfs: prevent unprivileged processes accessing fdinfo dir mm: mremap: fix sign for EFAULT error return value mm/hwpoison: use pr_err() instead of dump_page() in get_any_page() mm/huge_memory: do not overkill when splitting huge_zero_page Revert "mm/memory-failure.c: skip huge_zero_page in memory_failure()"
This commit is contained in:
commit
364a453ab9
1
.mailmap
1
.mailmap
|
@ -251,6 +251,7 @@ Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
|
|||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
|
||||
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
|
||||
Mathieu Othacehe <m.othacehe@gmail.com>
|
||||
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
|
||||
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
|
||||
|
|
|
@ -5440,6 +5440,7 @@ F: net/ax25/sysctl_net_ax25.c
|
|||
|
||||
DATA ACCESS MONITOR
|
||||
M: SeongJae Park <sj@kernel.org>
|
||||
L: damon@lists.linux.dev
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-damon
|
||||
|
|
|
@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
|
|||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags);
|
||||
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -493,3 +493,11 @@ void __init early_ioremap_init(void)
|
|||
{
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
||||
return memblock_is_map_memory(pfn);
|
||||
}
|
||||
|
|
|
@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
|||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
|
||||
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags);
|
||||
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
||||
|
||||
#endif /* __ASM_IO_H */
|
||||
|
|
|
@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
|
|||
{
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
||||
return pfn_is_map_memory(pfn);
|
||||
}
|
||||
|
|
23
fs/proc/fd.c
23
fs/proc/fd.c
|
@ -72,7 +72,7 @@ out:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int seq_fdinfo_open(struct inode *inode, struct file *file)
|
||||
static int proc_fdinfo_access_allowed(struct inode *inode)
|
||||
{
|
||||
bool allowed = false;
|
||||
struct task_struct *task = get_proc_task(inode);
|
||||
|
@ -86,6 +86,16 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file)
|
|||
if (!allowed)
|
||||
return -EACCES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int seq_fdinfo_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = proc_fdinfo_access_allowed(inode);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return single_open(file, seq_show, inode);
|
||||
}
|
||||
|
||||
|
@ -348,12 +358,23 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
|
|||
proc_fdinfo_instantiate);
|
||||
}
|
||||
|
||||
static int proc_open_fdinfo(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = proc_fdinfo_access_allowed(inode);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct inode_operations proc_fdinfo_inode_operations = {
|
||||
.lookup = proc_lookupfdinfo,
|
||||
.setattr = proc_setattr,
|
||||
};
|
||||
|
||||
const struct file_operations proc_fdinfo_operations = {
|
||||
.open = proc_open_fdinfo,
|
||||
.read = generic_read_dir,
|
||||
.iterate_shared = proc_readfdinfo,
|
||||
.llseek = generic_file_llseek,
|
||||
|
|
|
@ -2495,11 +2495,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||
struct address_space *mapping = NULL;
|
||||
int extra_pins, ret;
|
||||
pgoff_t end;
|
||||
bool is_hzp;
|
||||
|
||||
VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
|
||||
VM_BUG_ON_PAGE(!PageLocked(head), head);
|
||||
VM_BUG_ON_PAGE(!PageCompound(head), head);
|
||||
|
||||
is_hzp = is_huge_zero_page(head);
|
||||
VM_WARN_ON_ONCE_PAGE(is_hzp, head);
|
||||
if (is_hzp)
|
||||
return -EBUSY;
|
||||
|
||||
if (PageWriteback(head))
|
||||
return -EBUSY;
|
||||
|
||||
|
|
|
@ -621,6 +621,16 @@ static bool __init kfence_init_pool_early(void)
|
|||
* fails for the first page, and therefore expect addr==__kfence_pool in
|
||||
* most failure cases.
|
||||
*/
|
||||
for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
|
||||
struct slab *slab = virt_to_slab(p);
|
||||
|
||||
if (!slab)
|
||||
continue;
|
||||
#ifdef CONFIG_MEMCG
|
||||
slab->memcg_data = 0;
|
||||
#endif
|
||||
__folio_clear_slab(slab_folio(slab));
|
||||
}
|
||||
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
|
||||
__kfence_pool = NULL;
|
||||
return false;
|
||||
|
|
|
@ -1274,7 +1274,7 @@ try_again:
|
|||
}
|
||||
out:
|
||||
if (ret == -EIO)
|
||||
dump_page(p, "hwpoison: unhandlable page");
|
||||
pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1860,19 +1860,6 @@ try_again:
|
|||
}
|
||||
|
||||
if (PageTransHuge(hpage)) {
|
||||
/*
|
||||
* Bail out before SetPageHasHWPoisoned() if hpage is
|
||||
* huge_zero_page, although PG_has_hwpoisoned is not
|
||||
* checked in set_huge_zero_page().
|
||||
*
|
||||
* TODO: Handle memory failure of huge_zero_page thoroughly.
|
||||
*/
|
||||
if (is_huge_zero_page(hpage)) {
|
||||
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
|
||||
res = -EBUSY;
|
||||
goto unlock_mutex;
|
||||
}
|
||||
|
||||
/*
|
||||
* The flag must be set after the refcount is bumped
|
||||
* otherwise it may race with THP split.
|
||||
|
|
|
@ -947,7 +947,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
|||
return -EINTR;
|
||||
vma = vma_lookup(mm, addr);
|
||||
if (!vma) {
|
||||
ret = EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,9 +57,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog
|
|||
CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
|
||||
CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
|
||||
|
||||
TARGETS := protection_keys
|
||||
BINARIES_32 := $(TARGETS:%=%_32)
|
||||
BINARIES_64 := $(TARGETS:%=%_64)
|
||||
VMTARGETS := protection_keys
|
||||
BINARIES_32 := $(VMTARGETS:%=%_32)
|
||||
BINARIES_64 := $(VMTARGETS:%=%_64)
|
||||
|
||||
ifeq ($(CAN_BUILD_WITH_NOPIE),1)
|
||||
CFLAGS += -no-pie
|
||||
|
@ -112,7 +112,7 @@ $(BINARIES_32): CFLAGS += -m32 -mxsave
|
|||
$(BINARIES_32): LDLIBS += -lrt -ldl -lm
|
||||
$(BINARIES_32): $(OUTPUT)/%_32: %.c
|
||||
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
|
||||
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
|
||||
$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t))))
|
||||
endif
|
||||
|
||||
ifeq ($(CAN_BUILD_X86_64),1)
|
||||
|
@ -120,7 +120,7 @@ $(BINARIES_64): CFLAGS += -m64 -mxsave
|
|||
$(BINARIES_64): LDLIBS += -lrt -ldl
|
||||
$(BINARIES_64): $(OUTPUT)/%_64: %.c
|
||||
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
|
||||
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
|
||||
$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t))))
|
||||
endif
|
||||
|
||||
# x86_64 users should be encouraged to install 32-bit libraries
|
||||
|
|
Loading…
Reference in New Issue