Merge branch 'honglin/release' into 'release' (merge request !220)

rue: fix some sparse warnings of RUE net/mm reported by lore kernel test robot
Upstream kernel test robot report few issues caused by RUE functions.
Fix the net/mm functions related issues.

Note:
This MR is not related to the real RUE function, so no need to re-trigger the RUE regression test already done for [1].

[1] https://git.woa.com/tlinux/tkernel5/-/merge_requests/197
This commit is contained in:
frankjpliu 2024-10-28 06:46:29 +00:00
commit 24dfafc175
6 changed files with 38 additions and 34 deletions

View File

@ -37,6 +37,8 @@ extern unsigned int sysctl_clean_dying_memcg_threshold;
extern void kclean_dying_memcg_stop(void);
extern void wakeup_kclean_dying_memcg(void);
extern atomic_long_t dying_memcgs_count;
extern void memory_qos_update(void);
extern int memory_qos_prio_reclaim_ratio_update(void);
/* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item {

View File

@ -108,8 +108,6 @@ extern int sysctl_vm_use_priority_oom;
extern int sysctl_vm_qos_highest_reclaim_prio;
extern unsigned int sysctl_vm_qos_prio_reclaim_ratio;
extern unsigned int sysctl_clean_dying_memcg_async;
extern void memory_qos_update(void);
extern int memory_qos_prio_reclaim_ratio_update(void);
static int vm_lowest_prio = CGROUP_PRIORITY_MAX;
static int twenty = 20;
#endif
@ -624,7 +622,7 @@ static int setup_pagecache_limit(void)
}
static int pc_limit_proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@ -635,7 +633,7 @@ static int pc_limit_proc_dointvec(struct ctl_table *table, int write,
}
static int pc_reclaim_limit_proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int pre_reclaim_ratio = vm_pagecache_limit_reclaim_ratio;
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@ -654,7 +652,7 @@ static int pc_reclaim_limit_proc_dointvec(struct ctl_table *table, int write,
}
static int pc_limit_async_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@ -1973,8 +1971,8 @@ int proc_do_static_key(struct ctl_table *table, int write,
}
#ifdef CONFIG_MEMCG
int memory_qos_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
static int memory_qos_sysctl_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int error;
@ -2000,8 +1998,8 @@ out:
return error;
}
int memory_qos_sysctl_highest_reclaim_prio_handler(struct ctl_table *table,
int write, void __user *buffer,
static int memory_qos_sysctl_highest_reclaim_prio_handler(struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int error;
@ -2016,8 +2014,8 @@ int memory_qos_sysctl_highest_reclaim_prio_handler(struct ctl_table *table,
return 0;
}
int memory_qos_sysctl_prio_reclaim_ratio_handler(struct ctl_table *table,
int write, void __user *buffer,
static int memory_qos_sysctl_prio_reclaim_ratio_handler(struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int error;
@ -2043,7 +2041,7 @@ int memory_qos_sysctl_prio_reclaim_ratio_handler(struct ctl_table *table,
}
static int clean_dying_memcg_async_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
@ -2065,7 +2063,7 @@ static int clean_dying_memcg_async_handler(struct ctl_table *table, int write,
}
static int clean_dying_memcg_threshold_handler(struct ctl_table *table,
int write, void __user *buffer, size_t *lenp, loff_t *ppos)
int write, void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int old_val = sysctl_clean_dying_memcg_threshold;
int ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);

View File

@ -1794,7 +1794,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct page *page;
struct page *tmp;
struct folio *folio;
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg;
#endif
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@ -2125,11 +2127,12 @@ immap_locked:
if (is_shmem)
__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else {
#ifdef CONFIG_MEMCG
memcg = page_memcg(hpage);
if (!mem_cgroup_disabled() && memcg)
page_counter_charge(&memcg->pagecache, nr);
#endif
__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
}

View File

@ -1374,7 +1374,9 @@ static void do_mem_cgroup_account_oom_skip(struct mem_cgroup *memcg,
void mem_cgroup_account_oom_skip(struct task_struct *task,
struct oom_control *oc)
{
rcu_read_lock();
do_mem_cgroup_account_oom_skip(mem_cgroup_from_task(task), oc);
rcu_read_unlock();
}
/*
@ -3129,7 +3131,7 @@ static int memcg_notify_prio_change(struct mem_cgroup *memcg,
return 0;
}
int mem_cgroup_notify_prio_change(struct cgroup_subsys_state *css,
static int mem_cgroup_notify_prio_change(struct cgroup_subsys_state *css,
u16 old_prio, u16 new_prio)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
@ -4222,7 +4224,7 @@ vm_pagecache_limit_retry_times __read_mostly = MEMCG_PAGECACHE_RETRIES;
void mem_cgroup_shrink_pagecache(struct mem_cgroup *memcg, gfp_t gfp_mask)
{
long pages_reclaimed;
unsigned long pages_used, pages_max, goal_pages_used, pre_used;
unsigned long pages_used, pages_max, goal_pages_used;
unsigned int retry_times = 0;
unsigned int limit_retry_times;
u32 max_ratio;
@ -4267,7 +4269,6 @@ void mem_cgroup_shrink_pagecache(struct mem_cgroup *memcg, gfp_t gfp_mask)
if (fatal_signal_pending(current))
break;
pre_used = pages_used;
pages_reclaimed = shrink_page_cache_memcg(gfp_mask, memcg,
pages_used - goal_pages_used);
@ -4379,7 +4380,7 @@ static u64 memory_pagecache_max_read(struct cgroup_subsys_state *css,
return memcg->pagecache_max_ratio;
}
unsigned long mem_cgroup_pagecache_get_reclaim_pages(struct mem_cgroup *memcg)
static unsigned long mem_cgroup_pagecache_get_reclaim_pages(struct mem_cgroup *memcg)
{
unsigned long goal_pages_used, pages_used, pages_max;
@ -7241,7 +7242,7 @@ void wakeup_kclean_dying_memcg(void)
wake_up_interruptible(&kclean_dying_memcg_wq);
}
void charge_dying_memcgs(struct mem_cgroup *memcg)
static void charge_dying_memcgs(struct mem_cgroup *memcg)
{
if (sysctl_vm_memory_qos == 0)
return;
@ -10271,7 +10272,7 @@ extern unsigned long shrink_slab(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg,
int priority);
void reap_slab(struct mem_cgroup *memcg)
static void reap_slab(struct mem_cgroup *memcg)
{
struct mem_cgroup *parent;

View File

@ -7456,7 +7456,7 @@ unsigned long __pagecache_over_limit(void)
}
int proc_pagecache_system_usage(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
vm_pagecache_system_usage = __pagecache_over_limit();
@ -7512,8 +7512,8 @@ static void pagecache_reclaim_unlock_zone(struct zone *zone)
* reclaim lock on any zone are sleeping on this waitqueue.
* So this is basically a congestion wait queue for them.
*/
DECLARE_WAIT_QUEUE_HEAD(pagecache_reclaim_wq);
DECLARE_WAIT_QUEUE_HEAD(kpagecache_limitd_wq);
static DECLARE_WAIT_QUEUE_HEAD(pagecache_reclaim_wq);
static DECLARE_WAIT_QUEUE_HEAD(kpagecache_limitd_wq);
/*
* Similar to shrink_zone but it has a different consumer - pagecache limit

View File

@ -44,7 +44,7 @@ struct cgroup_cls_state *task_cls_state(struct task_struct *p)
}
EXPORT_SYMBOL_GPL(task_cls_state);
int cls_cgroup_stats_init(struct cls_cgroup_stats *stats)
static int cls_cgroup_stats_init(struct cls_cgroup_stats *stats)
{
struct {
struct nlattr nla;
@ -73,7 +73,7 @@ int cls_cgroup_stats_init(struct cls_cgroup_stats *stats)
return err;
}
void cls_cgroup_stats_destroy(struct cls_cgroup_stats *stats)
static void cls_cgroup_stats_destroy(struct cls_cgroup_stats *stats)
{
rtnl_lock();
gen_kill_estimator(&stats->est);
@ -373,7 +373,7 @@ static ssize_t write_bps_dev_limit(struct kernfs_open_file *of,
kfree(limit_bw_config[ifindex].name);
limit_bw_config[ifindex].name = name;
strncpy(limit_bw_config[ifindex].name, dev_name, strlen(dev_name));
strscpy(limit_bw_config[ifindex].name, dev_name, strlen(dev_name));
if (!rx_rate)
cs->rx_dev_scale[ifindex] = WND_DIVISOR;
@ -521,7 +521,7 @@ out_free_lports:
return ret;
}
int net_cgroup_notify_prio_change(struct cgroup_subsys_state *css,
static int net_cgroup_notify_prio_change(struct cgroup_subsys_state *css,
u16 old_prio, u16 new_prio)
{
if (css)
@ -615,7 +615,7 @@ static ssize_t write_dev_online_bps_max(struct kernfs_open_file *of,
kfree(online_max_config[ifindex].name);
online_max_config[ifindex].name = name;
strncpy(online_max_config[ifindex].name, dev_name, strlen(dev_name));
strscpy(online_max_config[ifindex].name, dev_name, strlen(dev_name));
if (rx_rate > -1) {
online_max_config[ifindex].rx_bps_max = rx_rate;
@ -726,7 +726,7 @@ static ssize_t write_dev_online_bps_min(struct kernfs_open_file *of,
kfree(online_min_config[ifindex].name);
online_min_config[ifindex].name = name;
strncpy(online_min_config[ifindex].name, dev_name, strlen(dev_name));
strscpy(online_min_config[ifindex].name, dev_name, strlen(dev_name));
if (rx_rate > -1)
RUE_CALL_INT(NET, write_rx_online_bps_min, cs, ifindex, rx_rate);
@ -835,7 +835,7 @@ static ssize_t write_dev_bps_config(struct kernfs_open_file *of,
kfree(bw_config[ifindex].name);
bw_config[ifindex].name = name;
strncpy(bw_config[ifindex].name, dev_name, strlen(dev_name));
strscpy(bw_config[ifindex].name, dev_name, strlen(dev_name));
if (v[0] > -1 && v[1] > -1) {
bw_config[ifindex].rx_bps_min = v[0];
@ -932,7 +932,7 @@ static u64 read_rx_min_rwnd_segs(struct cgroup_subsys_state *css,
return RUE_CALL_TYPE(NET, read_rx_min_rwnd_segs, u64, css, cft);
}
int read_class_stat(struct seq_file *sf, void *v)
static int read_class_stat(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);
@ -941,19 +941,19 @@ int read_class_stat(struct seq_file *sf, void *v)
return 0;
}
int rx_dump(struct seq_file *sf, void *v)
static int rx_dump(struct seq_file *sf, void *v)
{
RUE_CALL_VOID(NET, dump_rx_tb, sf);
return 0;
}
int tx_dump(struct seq_file *sf, void *v)
static int tx_dump(struct seq_file *sf, void *v)
{
RUE_CALL_VOID(NET, dump_tx_tb, sf);
return 0;
}
int bps_limit_dump(struct seq_file *sf, void *v)
static int bps_limit_dump(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);