Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: drivers/pinctrl/intel/pinctrl-baytrail.c: fix build with gcc-4.4 update "mm/zsmalloc: don't fail if can't create debugfs info" dma-debug: avoid spinlock recursion when disabling dma-debug mm: oom_reaper: remove some bloat memcg: fix mem_cgroup_out_of_memory() return value. ocfs2: fix improper handling of return errno mm: slub: remove unused virt_to_obj() mm: kasan: remove unused 'reserved' field from struct kasan_alloc_meta mm: make CONFIG_DEFERRED_STRUCT_PAGE_INIT depends on !FLATMEM explicitly seqlock: fix raw_read_seqcount_latch()
This commit is contained in:
commit
e12fab28df
|
@ -153,8 +153,10 @@ struct byt_community {
|
||||||
.name = (n), \
|
.name = (n), \
|
||||||
.pins = (p), \
|
.pins = (p), \
|
||||||
.npins = ARRAY_SIZE((p)), \
|
.npins = ARRAY_SIZE((p)), \
|
||||||
.has_simple_funcs = 1, \
|
.has_simple_funcs = 1, \
|
||||||
.simple_funcs = (f), \
|
{ \
|
||||||
|
.simple_funcs = (f), \
|
||||||
|
}, \
|
||||||
.nfuncs = ARRAY_SIZE((f)), \
|
.nfuncs = ARRAY_SIZE((f)), \
|
||||||
}
|
}
|
||||||
#define PIN_GROUP_MIXED(n, p, f) \
|
#define PIN_GROUP_MIXED(n, p, f) \
|
||||||
|
@ -163,7 +165,9 @@ struct byt_community {
|
||||||
.pins = (p), \
|
.pins = (p), \
|
||||||
.npins = ARRAY_SIZE((p)), \
|
.npins = ARRAY_SIZE((p)), \
|
||||||
.has_simple_funcs = 0, \
|
.has_simple_funcs = 0, \
|
||||||
.mixed_funcs = (f), \
|
{ \
|
||||||
|
.mixed_funcs = (f), \
|
||||||
|
}, \
|
||||||
.nfuncs = ARRAY_SIZE((f)), \
|
.nfuncs = ARRAY_SIZE((f)), \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,12 +176,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
|
||||||
}
|
}
|
||||||
if (is_bad_inode(inode)) {
|
if (is_bad_inode(inode)) {
|
||||||
iput(inode);
|
iput(inode);
|
||||||
if ((flags & OCFS2_FI_FLAG_FILECHECK_CHK) ||
|
inode = ERR_PTR(rc);
|
||||||
(flags & OCFS2_FI_FLAG_FILECHECK_FIX))
|
|
||||||
/* Return OCFS2_FILECHECK_ERR_XXX related errno */
|
|
||||||
inode = ERR_PTR(rc);
|
|
||||||
else
|
|
||||||
inode = ERR_PTR(-ESTALE);
|
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -514,7 +514,9 @@ struct mm_struct {
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
atomic_long_t hugetlb_usage;
|
atomic_long_t hugetlb_usage;
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
struct work_struct async_put_work;
|
struct work_struct async_put_work;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void mm_init_cpumask(struct mm_struct *mm)
|
static inline void mm_init_cpumask(struct mm_struct *mm)
|
||||||
|
|
|
@ -2745,10 +2745,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
|
||||||
|
|
||||||
/* mmput gets rid of the mappings and all user-space */
|
/* mmput gets rid of the mappings and all user-space */
|
||||||
extern void mmput(struct mm_struct *);
|
extern void mmput(struct mm_struct *);
|
||||||
/* same as above but performs the slow path from the async kontext. Can
|
#ifdef CONFIG_MMU
|
||||||
|
/* same as above but performs the slow path from the async context. Can
|
||||||
* be called from the atomic context as well
|
* be called from the atomic context as well
|
||||||
*/
|
*/
|
||||||
extern void mmput_async(struct mm_struct *);
|
extern void mmput_async(struct mm_struct *);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Grab a reference to a task's mm, if it is not already going away */
|
/* Grab a reference to a task's mm, if it is not already going away */
|
||||||
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
||||||
|
|
|
@ -277,7 +277,7 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
|
||||||
|
|
||||||
static inline int raw_read_seqcount_latch(seqcount_t *s)
|
static inline int raw_read_seqcount_latch(seqcount_t *s)
|
||||||
{
|
{
|
||||||
return lockless_dereference(s->sequence);
|
return lockless_dereference(s)->sequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -331,7 +331,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
|
||||||
* unsigned seq, idx;
|
* unsigned seq, idx;
|
||||||
*
|
*
|
||||||
* do {
|
* do {
|
||||||
* seq = lockless_dereference(latch->seq);
|
* seq = lockless_dereference(latch)->seq;
|
||||||
*
|
*
|
||||||
* idx = seq & 0x01;
|
* idx = seq & 0x01;
|
||||||
* entry = data_query(latch->data[idx], ...);
|
* entry = data_query(latch->data[idx], ...);
|
||||||
|
|
|
@ -111,22 +111,6 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* virt_to_obj - returns address of the beginning of object.
|
|
||||||
* @s: object's kmem_cache
|
|
||||||
* @slab_page: address of slab page
|
|
||||||
* @x: address within object memory range
|
|
||||||
*
|
|
||||||
* Returns address of the beginning of object
|
|
||||||
*/
|
|
||||||
static inline void *virt_to_obj(struct kmem_cache *s,
|
|
||||||
const void *slab_page,
|
|
||||||
const void *x)
|
|
||||||
{
|
|
||||||
return (void *)x - ((x - slab_page) % s->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void object_err(struct kmem_cache *s, struct page *page,
|
void object_err(struct kmem_cache *s, struct page *page,
|
||||||
u8 *object, char *reason);
|
u8 *object, char *reason);
|
||||||
|
|
||||||
|
|
|
@ -736,6 +736,7 @@ void mmput(struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mmput);
|
EXPORT_SYMBOL_GPL(mmput);
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
static void mmput_async_fn(struct work_struct *work)
|
static void mmput_async_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
|
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
|
||||||
|
@ -749,6 +750,7 @@ void mmput_async(struct mm_struct *mm)
|
||||||
schedule_work(&mm->async_put_work);
|
schedule_work(&mm->async_put_work);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* set_mm_exe_file - change a reference to the mm's executable file
|
* set_mm_exe_file - change a reference to the mm's executable file
|
||||||
|
|
|
@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
|
||||||
spin_lock_irqsave(&free_entries_lock, flags);
|
spin_lock_irqsave(&free_entries_lock, flags);
|
||||||
|
|
||||||
if (list_empty(&free_entries)) {
|
if (list_empty(&free_entries)) {
|
||||||
pr_err("DMA-API: debugging out of memory - disabling\n");
|
|
||||||
global_disable = true;
|
global_disable = true;
|
||||||
spin_unlock_irqrestore(&free_entries_lock, flags);
|
spin_unlock_irqrestore(&free_entries_lock, flags);
|
||||||
|
pr_err("DMA-API: debugging out of memory - disabling\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -649,6 +649,7 @@ config DEFERRED_STRUCT_PAGE_INIT
|
||||||
default n
|
default n
|
||||||
depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
|
depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
|
||||||
depends on MEMORY_HOTPLUG
|
depends on MEMORY_HOTPLUG
|
||||||
|
depends on !FLATMEM
|
||||||
help
|
help
|
||||||
Ordinarily all struct pages are initialised during early boot in a
|
Ordinarily all struct pages are initialised during early boot in a
|
||||||
single thread. On very large machines this can take a considerable
|
single thread. On very large machines this can take a considerable
|
||||||
|
|
|
@ -77,7 +77,6 @@ struct kasan_alloc_meta {
|
||||||
struct kasan_track track;
|
struct kasan_track track;
|
||||||
u32 state : 2; /* enum kasan_state */
|
u32 state : 2; /* enum kasan_state */
|
||||||
u32 alloc_size : 30;
|
u32 alloc_size : 30;
|
||||||
u32 reserved;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qlist_node {
|
struct qlist_node {
|
||||||
|
|
|
@ -1302,6 +1302,8 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||||
mem_cgroup_iter_break(memcg, iter);
|
mem_cgroup_iter_break(memcg, iter);
|
||||||
if (chosen)
|
if (chosen)
|
||||||
put_task_struct(chosen);
|
put_task_struct(chosen);
|
||||||
|
/* Set a dummy value to return "true". */
|
||||||
|
chosen = (void *) 1;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
case OOM_SCAN_OK:
|
case OOM_SCAN_OK:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -45,6 +45,8 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
@ -483,16 +485,16 @@ static inline unsigned long zs_stat_get(struct size_class *class,
|
||||||
|
|
||||||
#ifdef CONFIG_ZSMALLOC_STAT
|
#ifdef CONFIG_ZSMALLOC_STAT
|
||||||
|
|
||||||
static int __init zs_stat_init(void)
|
static void __init zs_stat_init(void)
|
||||||
{
|
{
|
||||||
if (!debugfs_initialized())
|
if (!debugfs_initialized()) {
|
||||||
return -ENODEV;
|
pr_warn("debugfs not available, stat dir not created\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
|
zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
|
||||||
if (!zs_stat_root)
|
if (!zs_stat_root)
|
||||||
return -ENOMEM;
|
pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit zs_stat_exit(void)
|
static void __exit zs_stat_exit(void)
|
||||||
|
@ -577,8 +579,10 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
|
||||||
{
|
{
|
||||||
struct dentry *entry;
|
struct dentry *entry;
|
||||||
|
|
||||||
if (!zs_stat_root)
|
if (!zs_stat_root) {
|
||||||
|
pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
entry = debugfs_create_dir(name, zs_stat_root);
|
entry = debugfs_create_dir(name, zs_stat_root);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
|
@ -592,7 +596,8 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
pr_warn("%s: debugfs file entry <%s> creation failed\n",
|
pr_warn("%s: debugfs file entry <%s> creation failed\n",
|
||||||
name, "classes");
|
name, "classes");
|
||||||
return;
|
debugfs_remove_recursive(pool->stat_dentry);
|
||||||
|
pool->stat_dentry = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -602,9 +607,8 @@ static void zs_pool_stat_destroy(struct zs_pool *pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_ZSMALLOC_STAT */
|
#else /* CONFIG_ZSMALLOC_STAT */
|
||||||
static int __init zs_stat_init(void)
|
static void __init zs_stat_init(void)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit zs_stat_exit(void)
|
static void __exit zs_stat_exit(void)
|
||||||
|
@ -2011,17 +2015,10 @@ static int __init zs_init(void)
|
||||||
zpool_register_driver(&zs_zpool_driver);
|
zpool_register_driver(&zs_zpool_driver);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ret = zs_stat_init();
|
zs_stat_init();
|
||||||
if (ret) {
|
|
||||||
pr_err("zs stat initialization failed\n");
|
|
||||||
goto stat_fail;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stat_fail:
|
|
||||||
#ifdef CONFIG_ZPOOL
|
|
||||||
zpool_unregister_driver(&zs_zpool_driver);
|
|
||||||
#endif
|
|
||||||
notifier_fail:
|
notifier_fail:
|
||||||
zs_unregister_cpu_notifier();
|
zs_unregister_cpu_notifier();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue