Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "17 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: arch: define weak abort() mm, oom_reaper: fix memory corruption kernel: make groups_sort calling a responsibility group_info allocators mm/frame_vector.c: release a semaphore in 'get_vaddr_frames()' tools/slabinfo-gnuplot: force to use bash shell kcov: fix comparison callback signature mm/slab.c: do not hash pointers when debugging slab mm/page_alloc.c: avoid excessive IRQ disabled times in free_unref_page_list() mm/memory.c: mark wp_huge_pmd() inline to prevent build failure scripts/faddr2line: fix CROSS_COMPILE unset error Documentation/vm/zswap.txt: update with same-value filled page feature exec: avoid gcc-8 warning for get_task_comm autofs: fix careless error in recent commit string.h: workaround for increased stack usage mm/kmemleak.c: make cond_resched() rate-limiting more efficient lib/rbtree,drm/mm: add rbtree_replace_node_cached() include/linux/idr.h: add #include <linux/bug.h>
This commit is contained in:
commit
18d40eae7f
|
@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its
|
|||
original compressor. Once all pages are removed from an old zpool, the zpool
|
||||
and its compressor are freed.
|
||||
|
||||
Some of the pages in zswap are same-value filled pages (i.e. contents of the
|
||||
page have same value or repetitive pattern). These pages include zero-filled
|
||||
pages and they are handled differently. During store operation, a page is
|
||||
checked if it is a same-value filled page before compressing it. If true, the
|
||||
compressed length of the page is set to zero and the pattern or same-filled
|
||||
value is stored.
|
||||
|
||||
Same-value filled pages identification feature is enabled by default and can be
|
||||
disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0,
|
||||
e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at
|
||||
runtime using the sysfs "same_filled_pages_enabled" attribute, e.g.
|
||||
|
||||
echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
|
||||
|
||||
When zswap same-filled page identification is disabled at runtime, it will stop
|
||||
checking for the same-value filled pages during store operation. However, the
|
||||
existing pages which are marked as same-value filled pages remain stored
|
||||
unchanged in zswap until they are either loaded or invalidated.
|
||||
|
||||
A debugfs interface is provided for various statistic about pool size, number
|
||||
of pages stored, and various counters for the reasons pages are rejected.
|
||||
of pages stored, same-value filled pages and various counters for the reasons
|
||||
pages are rejected.
|
||||
|
|
|
@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
|
|||
return retval;
|
||||
}
|
||||
|
||||
groups_sort(group_info);
|
||||
retval = set_current_groups(group_info);
|
||||
put_group_info(group_info);
|
||||
|
||||
|
|
|
@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
|
|||
*/
|
||||
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
|
||||
{
|
||||
struct drm_mm *mm = old->mm;
|
||||
|
||||
DRM_MM_BUG_ON(!old->allocated);
|
||||
|
||||
*new = *old;
|
||||
|
||||
list_replace(&old->node_list, &new->node_list);
|
||||
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
|
||||
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
|
||||
|
||||
if (drm_mm_hole_follows(old)) {
|
||||
list_replace(&old->hole_stack, &new->hole_stack);
|
||||
rb_replace_node(&old->rb_hole_size,
|
||||
&new->rb_hole_size,
|
||||
&old->mm->holes_size);
|
||||
&mm->holes_size);
|
||||
rb_replace_node(&old->rb_hole_addr,
|
||||
&new->rb_hole_addr,
|
||||
&old->mm->holes_addr);
|
||||
&mm->holes_addr);
|
||||
}
|
||||
|
||||
old->allocated = false;
|
||||
|
|
|
@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|||
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
|
||||
if (autofs4_write(sbi, pipe, &pkt, pktsz))
|
||||
switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
|
||||
case 0:
|
||||
break;
|
||||
|
|
|
@ -1216,15 +1216,14 @@ killed:
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
char *get_task_comm(char *buf, struct task_struct *tsk)
|
||||
char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
|
||||
{
|
||||
/* buf must be at least sizeof(tsk->comm) in size */
|
||||
task_lock(tsk);
|
||||
strncpy(buf, tsk->comm, sizeof(tsk->comm));
|
||||
strncpy(buf, tsk->comm, buf_size);
|
||||
task_unlock(tsk);
|
||||
return buf;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_task_comm);
|
||||
EXPORT_SYMBOL_GPL(__get_task_comm);
|
||||
|
||||
/*
|
||||
* These functions flushes out all traces of the currently running executable
|
||||
|
|
|
@ -60,6 +60,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
|
|||
gi->gid[i] = exp->ex_anon_gid;
|
||||
else
|
||||
gi->gid[i] = rqgi->gid[i];
|
||||
|
||||
/* Each thread allocates its own gi, no race */
|
||||
groups_sort(gi);
|
||||
}
|
||||
} else {
|
||||
gi = get_group_info(rqgi);
|
||||
|
|
|
@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
|
|||
extern void set_groups(struct cred *, struct group_info *);
|
||||
extern int groups_search(const struct group_info *, kgid_t);
|
||||
extern bool may_setgroups(void);
|
||||
extern void groups_sort(struct group_info *);
|
||||
|
||||
/*
|
||||
* The security context of a task
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/radix-tree.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct idr {
|
||||
struct radix_tree_root idr_rt;
|
||||
|
|
|
@ -66,6 +66,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|||
return tsk->signal->oom_mm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this helper if tsk->mm != mm and the victim mm needs a special
|
||||
* handling. This is guaranteed to stay true after once set.
|
||||
*/
|
||||
static inline bool mm_is_oom_victim(struct mm_struct *mm)
|
||||
{
|
||||
return test_bit(MMF_OOM_VICTIM, &mm->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether a page fault on the given mm is still reliable.
|
||||
* This is no longer true if the oom reaper started to reap the
|
||||
|
|
|
@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
|||
struct rb_root *root);
|
||||
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root_cached *root);
|
||||
|
||||
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
|
|
|
@ -1503,7 +1503,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
|
|||
__set_task_comm(tsk, from, false);
|
||||
}
|
||||
|
||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
|
||||
#define get_task_comm(buf, tsk) ({ \
|
||||
BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
|
||||
__get_task_comm(buf, sizeof(buf), tsk); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void scheduler_ipi(void);
|
||||
|
|
|
@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
|
|||
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
|
||||
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
||||
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
||||
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
||||
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
||||
|
||||
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
||||
|
|
|
@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
|
|||
{
|
||||
__kernel_size_t ret;
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
if (p_size == (size_t)-1)
|
||||
|
||||
/* Work around gcc excess stack consumption issue */
|
||||
if (p_size == (size_t)-1 ||
|
||||
(__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
|
||||
return __builtin_strlen(p);
|
||||
ret = strnlen(p, p_size);
|
||||
if (p_size <= ret)
|
||||
|
|
|
@ -1755,3 +1755,11 @@ Efault:
|
|||
return -EFAULT;
|
||||
}
|
||||
#endif
|
||||
|
||||
__weak void abort(void)
|
||||
{
|
||||
BUG();
|
||||
|
||||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
|
|
|
@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b)
|
|||
return gid_gt(a, b) - gid_lt(a, b);
|
||||
}
|
||||
|
||||
static void groups_sort(struct group_info *group_info)
|
||||
void groups_sort(struct group_info *group_info)
|
||||
{
|
||||
sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid),
|
||||
gid_cmp, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(groups_sort);
|
||||
|
||||
/* a simple bsearch */
|
||||
int groups_search(const struct group_info *group_info, kgid_t grp)
|
||||
|
@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
|
|||
void set_groups(struct cred *new, struct group_info *group_info)
|
||||
{
|
||||
put_group_info(new->group_info);
|
||||
groups_sort(group_info);
|
||||
get_group_info(group_info);
|
||||
new->group_info = group_info;
|
||||
}
|
||||
|
@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
|
|||
return retval;
|
||||
}
|
||||
|
||||
groups_sort(group_info);
|
||||
retval = set_current_groups(group_info);
|
||||
put_group_info(group_info);
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
|
|||
}
|
||||
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
|
||||
|
||||
void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2)
|
||||
void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
|
||||
{
|
||||
write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
|
|||
}
|
||||
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
|
||||
|
||||
void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2)
|
||||
void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
|
||||
{
|
||||
write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
|
||||
_RET_IP_);
|
||||
|
|
|
@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
|
|||
return retval;
|
||||
}
|
||||
|
||||
groups_sort(group_info);
|
||||
retval = set_current_groups(group_info);
|
||||
put_group_info(group_info);
|
||||
|
||||
|
|
10
lib/rbtree.c
10
lib/rbtree.c
|
@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
|||
}
|
||||
EXPORT_SYMBOL(rb_replace_node);
|
||||
|
||||
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root_cached *root)
|
||||
{
|
||||
rb_replace_node(victim, new, &root->rb_root);
|
||||
|
||||
if (root->rb_leftmost == victim)
|
||||
root->rb_leftmost = new;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node_cached);
|
||||
|
||||
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root)
|
||||
{
|
||||
|
|
|
@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
|||
* get_user_pages_longterm() and disallow it for filesystem-dax
|
||||
* mappings.
|
||||
*/
|
||||
if (vma_is_fsdax(vma))
|
||||
return -EOPNOTSUPP;
|
||||
if (vma_is_fsdax(vma)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
|
||||
vec->got_ref = true;
|
||||
|
|
|
@ -1523,7 +1523,7 @@ static void kmemleak_scan(void)
|
|||
if (page_count(page) == 0)
|
||||
continue;
|
||||
scan_block(page, page + 1, NULL);
|
||||
if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
|
||||
if (!(pfn & 63))
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3831,7 +3831,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
|
|||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
|
||||
/* `inline' is required to avoid gcc 4.1.2 build error */
|
||||
static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
|
||||
{
|
||||
if (vma_is_anonymous(vmf->vma))
|
||||
return do_huge_pmd_wp_page(vmf, orig_pmd);
|
||||
|
|
10
mm/mmap.c
10
mm/mmap.c
|
@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
|
|||
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
||||
unmap_vmas(&tlb, vma, 0, -1);
|
||||
|
||||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||
if (unlikely(tsk_is_oom_victim(current))) {
|
||||
if (unlikely(mm_is_oom_victim(mm))) {
|
||||
/*
|
||||
* Wait for oom_reap_task() to stop working on this
|
||||
* mm. Because MMF_OOM_SKIP is already set before
|
||||
* calling down_read(), oom_reap_task() will not run
|
||||
* on this "mm" post up_write().
|
||||
*
|
||||
* tsk_is_oom_victim() cannot be set from under us
|
||||
* either because current->mm is already set to NULL
|
||||
* mm_is_oom_victim() cannot be set from under us
|
||||
* either because victim->mm is already set to NULL
|
||||
* under task_lock before calling mmput and oom_mm is
|
||||
* set not NULL by the OOM killer only if current->mm
|
||||
* set not NULL by the OOM killer only if victim->mm
|
||||
* is found not NULL while holding the task_lock.
|
||||
*/
|
||||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||
down_write(&mm->mmap_sem);
|
||||
up_write(&mm->mmap_sem);
|
||||
}
|
||||
|
|
|
@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
|
|||
return;
|
||||
|
||||
/* oom_mm is bound to the signal struct life time. */
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
|
||||
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
|
||||
mmgrab(tsk->signal->oom_mm);
|
||||
set_bit(MMF_OOM_VICTIM, &mm->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that the task is woken up from uninterruptible sleep
|
||||
|
|
|
@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
|
|||
{
|
||||
struct page *page, *next;
|
||||
unsigned long flags, pfn;
|
||||
int batch_count = 0;
|
||||
|
||||
/* Prepare pages for freeing */
|
||||
list_for_each_entry_safe(page, next, list, lru) {
|
||||
|
@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
|
|||
set_page_private(page, 0);
|
||||
trace_mm_page_free_batched(page);
|
||||
free_unref_page_commit(page, pfn);
|
||||
|
||||
/*
|
||||
* Guard against excessive IRQ disabled times when we get
|
||||
* a large list of pages to free.
|
||||
*/
|
||||
if (++batch_count == SWAP_CLUSTER_MAX) {
|
||||
local_irq_restore(flags);
|
||||
batch_count = 0;
|
||||
local_irq_save(flags);
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
23
mm/slab.c
23
mm/slab.c
|
@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
|
|||
*dbg_redzone2(cachep, objp));
|
||||
}
|
||||
|
||||
if (cachep->flags & SLAB_STORE_USER) {
|
||||
pr_err("Last user: [<%p>](%pSR)\n",
|
||||
*dbg_userword(cachep, objp),
|
||||
*dbg_userword(cachep, objp));
|
||||
}
|
||||
if (cachep->flags & SLAB_STORE_USER)
|
||||
pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
|
||||
realobj = (char *)objp + obj_offset(cachep);
|
||||
size = cachep->object_size;
|
||||
for (i = 0; i < size && lines; i += 16, lines--) {
|
||||
|
@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
|
|||
/* Mismatch ! */
|
||||
/* Print header */
|
||||
if (lines == 0) {
|
||||
pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
|
||||
pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
|
||||
print_tainted(), cachep->name,
|
||||
realobj, size);
|
||||
print_objinfo(cachep, objp, 0);
|
||||
|
@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
|
|||
if (objnr) {
|
||||
objp = index_to_obj(cachep, page, objnr - 1);
|
||||
realobj = (char *)objp + obj_offset(cachep);
|
||||
pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
|
||||
pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
|
||||
print_objinfo(cachep, objp, 2);
|
||||
}
|
||||
if (objnr + 1 < cachep->num) {
|
||||
objp = index_to_obj(cachep, page, objnr + 1);
|
||||
realobj = (char *)objp + obj_offset(cachep);
|
||||
pr_err("Next obj: start=%p, len=%d\n", realobj, size);
|
||||
pr_err("Next obj: start=%px, len=%d\n", realobj, size);
|
||||
print_objinfo(cachep, objp, 2);
|
||||
}
|
||||
}
|
||||
|
@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
|
|||
/* Verify double free bug */
|
||||
for (i = page->active; i < cachep->num; i++) {
|
||||
if (get_free_obj(page, i) == objnr) {
|
||||
pr_err("slab: double free detected in cache '%s', objp %p\n",
|
||||
pr_err("slab: double free detected in cache '%s', objp %px\n",
|
||||
cachep->name, objp);
|
||||
BUG();
|
||||
}
|
||||
|
@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
|
|||
else
|
||||
slab_error(cache, "memory outside object was overwritten");
|
||||
|
||||
pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
|
||||
pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
|
||||
obj, redzone1, redzone2);
|
||||
}
|
||||
|
||||
|
@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
|
||||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
|
||||
slab_error(cachep, "double free, or memory outside object was overwritten");
|
||||
pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
|
||||
pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
|
||||
objp, *dbg_redzone1(cachep, objp),
|
||||
*dbg_redzone2(cachep, objp));
|
||||
}
|
||||
|
@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||
cachep->ctor(objp);
|
||||
if (ARCH_SLAB_MINALIGN &&
|
||||
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
|
||||
pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
|
||||
pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
|
||||
objp, (int)ARCH_SLAB_MINALIGN);
|
||||
}
|
||||
return objp;
|
||||
|
@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
|
|||
return;
|
||||
}
|
||||
#endif
|
||||
seq_printf(m, "%p", (void *)address);
|
||||
seq_printf(m, "%px", (void *)address);
|
||||
}
|
||||
|
||||
static int leaks_show(struct seq_file *m, void *p)
|
||||
|
|
|
@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
|
|||
goto out_free_groups;
|
||||
creds->cr_group_info->gid[i] = kgid;
|
||||
}
|
||||
groups_sort(creds->cr_group_info);
|
||||
|
||||
return 0;
|
||||
out_free_groups:
|
||||
|
|
|
@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
|
|||
goto out;
|
||||
rsci.cred.cr_group_info->gid[i] = kgid;
|
||||
}
|
||||
groups_sort(rsci.cred.cr_group_info);
|
||||
|
||||
/* mech name */
|
||||
len = qword_get(&mesg, buf, mlen);
|
||||
|
|
|
@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
|
|||
ug.gi->gid[i] = kgid;
|
||||
}
|
||||
|
||||
groups_sort(ug.gi);
|
||||
ugp = unix_gid_lookup(cd, uid);
|
||||
if (ugp) {
|
||||
struct cache_head *ch;
|
||||
|
@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
|
||||
cred->cr_group_info->gid[i] = kgid;
|
||||
}
|
||||
groups_sort(cred->cr_group_info);
|
||||
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
|
||||
*authp = rpc_autherr_badverf;
|
||||
return SVC_DENIED;
|
||||
|
|
|
@ -44,10 +44,10 @@
|
|||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
READELF="${CROSS_COMPILE}readelf"
|
||||
ADDR2LINE="${CROSS_COMPILE}addr2line"
|
||||
SIZE="${CROSS_COMPILE}size"
|
||||
NM="${CROSS_COMPILE}nm"
|
||||
READELF="${CROSS_COMPILE:-}readelf"
|
||||
ADDR2LINE="${CROSS_COMPILE:-}addr2line"
|
||||
SIZE="${CROSS_COMPILE:-}size"
|
||||
NM="${CROSS_COMPILE:-}nm"
|
||||
|
||||
command -v awk >/dev/null 2>&1 || die "awk isn't installed"
|
||||
command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# Sergey Senozhatsky, 2015
|
||||
# sergey.senozhatsky.work@gmail.com
|
||||
|
|
Loading…
Reference in New Issue