2010-04-03 07:17:17 +08:00
|
|
|
/*
|
2010-06-30 07:49:16 +08:00
|
|
|
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
|
2010-04-03 07:17:17 +08:00
|
|
|
* Internal non-public definitions that provide either classic
|
2010-06-30 07:49:16 +08:00
|
|
|
* or preemptible semantics.
|
2010-04-03 07:17:17 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2013-12-04 02:02:52 +08:00
|
|
|
* along with this program; if not, you can access it online at
|
|
|
|
* http://www.gnu.org/licenses/gpl-2.0.html.
|
2010-04-03 07:17:17 +08:00
|
|
|
*
|
2010-06-30 07:49:16 +08:00
|
|
|
* Copyright (c) 2010 Linaro
|
2010-04-03 07:17:17 +08:00
|
|
|
*
|
|
|
|
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
|
|
*/
|
|
|
|
|
2010-09-10 04:40:39 +08:00
|
|
|
#include <linux/kthread.h>
|
2016-01-08 08:05:19 +08:00
|
|
|
#include <linux/init.h>
|
2010-10-01 12:26:52 +08:00
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
|
2010-09-28 08:25:23 +08:00
|
|
|
/* Global control variables for rcupdate callback mechanism. */
|
|
|
|
struct rcu_ctrlblk {
|
|
|
|
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
|
|
|
|
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
|
|
|
|
struct rcu_head **curtail; /* ->next pointer of last CB. */
|
2010-10-01 12:26:52 +08:00
|
|
|
RCU_TRACE(long qlen); /* Number of pending CBs. */
|
2012-10-20 03:49:17 +08:00
|
|
|
RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
|
|
|
|
RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
|
|
|
|
RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
|
2013-07-13 04:50:28 +08:00
|
|
|
RCU_TRACE(const char *name); /* Name of RCU type. */
|
2010-09-28 08:25:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Definition for rcupdate control block. */
|
|
|
|
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
|
|
|
|
.donetail = &rcu_sched_ctrlblk.rcucblist,
|
|
|
|
.curtail = &rcu_sched_ctrlblk.rcucblist,
|
2011-06-21 15:13:44 +08:00
|
|
|
RCU_TRACE(.name = "rcu_sched")
|
2010-09-28 08:25:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
|
|
|
|
.donetail = &rcu_bh_ctrlblk.rcucblist,
|
|
|
|
.curtail = &rcu_bh_ctrlblk.rcucblist,
|
2011-06-21 15:13:44 +08:00
|
|
|
RCU_TRACE(.name = "rcu_bh")
|
2010-09-28 08:25:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
2013-03-28 01:43:02 +08:00
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
|
2010-09-28 08:25:23 +08:00
|
|
|
int rcu_scheduler_active __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
|
2010-04-03 07:17:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* During boot, we forgive RCU lockdep issues. After this function is
|
rcu: Narrow early boot window of illegal synchronous grace periods
The current preemptible RCU implementation goes through three phases
during bootup. In the first phase, there is only one CPU that is running
with preemption disabled, so that a no-op is a synchronous grace period.
In the second mid-boot phase, the scheduler is running, but RCU has
not yet gotten its kthreads spawned (and, for expedited grace periods,
workqueues are not yet running. During this time, any attempt to do
a synchronous grace period will hang the system (or complain bitterly,
depending). In the third and final phase, RCU is fully operational and
everything works normally.
This has been OK for some time, but there has recently been some
synchronous grace periods showing up during the second mid-boot phase.
This code worked "by accident" for awhile, but started failing as soon
as expedited RCU grace periods switched over to workqueues in commit
8b355e3bc140 ("rcu: Drive expedited grace periods from workqueue").
Note that the code was buggy even before this commit, as it was subject
to failure on real-time systems that forced all expedited grace periods
to run as normal grace periods (for example, using the rcu_normal ksysfs
parameter). The callchain from the failure case is as follows:
early_amd_iommu_init()
|-> acpi_put_table(ivrs_base);
|-> acpi_tb_put_table(table_desc);
|-> acpi_tb_invalidate_table(table_desc);
|-> acpi_tb_release_table(...)
|-> acpi_os_unmap_memory
|-> acpi_os_unmap_iomem
|-> acpi_os_map_cleanup
|-> synchronize_rcu_expedited
The kernel showing this callchain was built with CONFIG_PREEMPT_RCU=y,
which caused the code to try using workqueues before they were
initialized, which did not go well.
This commit therefore reworks RCU to permit synchronous grace periods
to proceed during this mid-boot phase. This commit is therefore a
fix to a regression introduced in v4.9, and is therefore being put
forward post-merge-window in v4.10.
This commit sets a flag from the existing rcu_scheduler_starting()
function which causes all synchronous grace periods to take the expedited
path. The expedited path now checks this flag, using the requesting task
to drive the expedited grace period forward during the mid-boot phase.
Finally, this flag is updated by a core_initcall() function named
rcu_exp_runtime_mode(), which causes the runtime codepaths to be used.
Note that this arrangement assumes that tasks are not sent POSIX signals
(or anything similar) from the time that the first task is spawned
through core_initcall() time.
Fixes: 8b355e3bc140 ("rcu: Drive expedited grace periods from workqueue")
Reported-by: "Zheng, Lv" <lv.zheng@intel.com>
Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Stan Kain <stan.kain@gmail.com>
Tested-by: Ivan <waffolz@hotmail.com>
Tested-by: Emanuel Castelo <emanuel.castelo@gmail.com>
Tested-by: Bruno Pesavento <bpesavento@infinito.it>
Tested-by: Borislav Petkov <bp@suse.de>
Tested-by: Frederic Bezies <fredbezies@gmail.com>
Cc: <stable@vger.kernel.org> # 4.9.0-
2017-01-10 18:28:26 +08:00
|
|
|
* invoked, we start taking RCU lockdep issues seriously. Note that unlike
|
|
|
|
* Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
|
|
|
|
* to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
|
|
|
|
* The reason for this is that Tiny RCU does not need kthreads, so does
|
|
|
|
* not have to care about the fact that the scheduler is half-initialized
|
|
|
|
* at a certain phase of the boot process.
|
2010-04-03 07:17:17 +08:00
|
|
|
*/
|
2010-09-10 04:40:39 +08:00
|
|
|
void __init rcu_scheduler_starting(void)
|
2010-04-03 07:17:17 +08:00
|
|
|
{
|
|
|
|
WARN_ON(nr_context_switches() > 0);
|
rcu: Narrow early boot window of illegal synchronous grace periods
The current preemptible RCU implementation goes through three phases
during bootup. In the first phase, there is only one CPU that is running
with preemption disabled, so that a no-op is a synchronous grace period.
In the second mid-boot phase, the scheduler is running, but RCU has
not yet gotten its kthreads spawned (and, for expedited grace periods,
workqueues are not yet running. During this time, any attempt to do
a synchronous grace period will hang the system (or complain bitterly,
depending). In the third and final phase, RCU is fully operational and
everything works normally.
This has been OK for some time, but there has recently been some
synchronous grace periods showing up during the second mid-boot phase.
This code worked "by accident" for awhile, but started failing as soon
as expedited RCU grace periods switched over to workqueues in commit
8b355e3bc140 ("rcu: Drive expedited grace periods from workqueue").
Note that the code was buggy even before this commit, as it was subject
to failure on real-time systems that forced all expedited grace periods
to run as normal grace periods (for example, using the rcu_normal ksysfs
parameter). The callchain from the failure case is as follows:
early_amd_iommu_init()
|-> acpi_put_table(ivrs_base);
|-> acpi_tb_put_table(table_desc);
|-> acpi_tb_invalidate_table(table_desc);
|-> acpi_tb_release_table(...)
|-> acpi_os_unmap_memory
|-> acpi_os_unmap_iomem
|-> acpi_os_map_cleanup
|-> synchronize_rcu_expedited
The kernel showing this callchain was built with CONFIG_PREEMPT_RCU=y,
which caused the code to try using workqueues before they were
initialized, which did not go well.
This commit therefore reworks RCU to permit synchronous grace periods
to proceed during this mid-boot phase. This commit is therefore a
fix to a regression introduced in v4.9, and is therefore being put
forward post-merge-window in v4.10.
This commit sets a flag from the existing rcu_scheduler_starting()
function which causes all synchronous grace periods to take the expedited
path. The expedited path now checks this flag, using the requesting task
to drive the expedited grace period forward during the mid-boot phase.
Finally, this flag is updated by a core_initcall() function named
rcu_exp_runtime_mode(), which causes the runtime codepaths to be used.
Note that this arrangement assumes that tasks are not sent POSIX signals
(or anything similar) from the time that the first task is spawned
through core_initcall() time.
Fixes: 8b355e3bc140 ("rcu: Drive expedited grace periods from workqueue")
Reported-by: "Zheng, Lv" <lv.zheng@intel.com>
Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Stan Kain <stan.kain@gmail.com>
Tested-by: Ivan <waffolz@hotmail.com>
Tested-by: Emanuel Castelo <emanuel.castelo@gmail.com>
Tested-by: Bruno Pesavento <bpesavento@infinito.it>
Tested-by: Borislav Petkov <bp@suse.de>
Tested-by: Frederic Bezies <fredbezies@gmail.com>
Cc: <stable@vger.kernel.org> # 4.9.0-
2017-01-10 18:28:26 +08:00
|
|
|
rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
|
2010-04-03 07:17:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
2010-09-28 08:25:23 +08:00
|
|
|
|
2010-10-01 12:26:52 +08:00
|
|
|
#ifdef CONFIG_RCU_TRACE
|
|
|
|
|
|
|
|
static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2012-08-22 03:14:19 +08:00
|
|
|
local_irq_save(flags);
|
2010-10-01 12:26:52 +08:00
|
|
|
rcp->qlen -= n;
|
2012-08-22 03:14:19 +08:00
|
|
|
local_irq_restore(flags);
|
2010-10-01 12:26:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump statistics for TINY_RCU, such as they are.
|
|
|
|
*/
|
|
|
|
static int show_tiny_stats(struct seq_file *m, void *unused)
|
|
|
|
{
|
|
|
|
seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
|
|
|
|
seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int show_tiny_stats_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, show_tiny_stats, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations show_tiny_stats_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = show_tiny_stats_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct dentry *rcudir;
|
|
|
|
|
|
|
|
static int __init rcutiny_trace_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *retval;
|
|
|
|
|
|
|
|
rcudir = debugfs_create_dir("rcu", NULL);
|
|
|
|
if (!rcudir)
|
|
|
|
goto free_out;
|
|
|
|
retval = debugfs_create_file("rcudata", 0444, rcudir,
|
|
|
|
NULL, &show_tiny_stats_fops);
|
|
|
|
if (!retval)
|
|
|
|
goto free_out;
|
|
|
|
return 0;
|
|
|
|
free_out:
|
|
|
|
debugfs_remove_recursive(rcudir);
|
|
|
|
return 1;
|
|
|
|
}
|
2016-01-08 08:05:19 +08:00
|
|
|
device_initcall(rcutiny_trace_init);
|
2010-10-01 12:26:52 +08:00
|
|
|
|
2013-03-28 01:43:02 +08:00
|
|
|
static void check_cpu_stall(struct rcu_ctrlblk *rcp)
|
|
|
|
{
|
|
|
|
unsigned long j;
|
|
|
|
unsigned long js;
|
|
|
|
|
|
|
|
if (rcu_cpu_stall_suppress)
|
|
|
|
return;
|
|
|
|
rcp->ticks_this_gp++;
|
|
|
|
j = jiffies;
|
2015-03-04 06:57:58 +08:00
|
|
|
js = READ_ONCE(rcp->jiffies_stall);
|
2014-12-23 03:10:12 +08:00
|
|
|
if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
|
2013-03-28 01:43:02 +08:00
|
|
|
pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
|
2014-12-09 17:53:34 +08:00
|
|
|
rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
|
2013-03-28 01:43:02 +08:00
|
|
|
jiffies - rcp->gp_start, rcp->qlen);
|
|
|
|
dump_stack();
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rcp->jiffies_stall,
|
|
|
|
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
2014-12-23 03:10:12 +08:00
|
|
|
} else if (ULONG_CMP_GE(j, js)) {
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rcp->jiffies_stall,
|
|
|
|
jiffies + rcu_jiffies_till_stall_check());
|
2014-12-23 03:10:12 +08:00
|
|
|
}
|
2013-03-28 01:43:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
|
|
|
|
{
|
|
|
|
rcp->ticks_this_gp = 0;
|
|
|
|
rcp->gp_start = jiffies;
|
2015-03-04 06:57:58 +08:00
|
|
|
WRITE_ONCE(rcp->jiffies_stall,
|
|
|
|
jiffies + rcu_jiffies_till_stall_check());
|
2013-03-28 01:43:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void check_cpu_stalls(void)
|
|
|
|
{
|
|
|
|
RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
|
|
|
|
RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
|
|
|
|
}
|
2013-04-16 22:49:22 +08:00
|
|
|
|
|
|
|
#endif /* #ifdef CONFIG_RCU_TRACE */
|