Merge branch 'akpm' (fixes from Andrew)
Merge misc fixes from Andrew Morton. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mqueue: sys_mq_open: do not call mnt_drop_write() if read-only mm/hotplug: only free wait_table if it's allocated by vmalloc dma-debug: update DMA debug API to better handle multiple mappings of a buffer dma-debug: fix locking bug in check_unmap() drivers/rtc/rtc-at91rm9200.c: use a variable for storing IMR drivers/video/ep93xx-fb.c: include <linux/io.h> for devm_ioremap() drivers/rtc/rtc-da9052.c: fix for rtc device registration mm: zone_end_pfn is too small poweroff: change orderly_poweroff() to use schedule_work() mm/hugetlb: fix total hugetlbfs pages count when using memory overcommit accouting printk: Provide a wake_up_klogd() off-case irq_work.h: fix warning when CONFIG_IRQ_WORK=n
This commit is contained in:
commit
14629ed314
|
@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
|
||||||
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
|
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
|
||||||
static void __iomem *at91_rtc_regs;
|
static void __iomem *at91_rtc_regs;
|
||||||
static int irq;
|
static int irq;
|
||||||
|
static u32 at91_rtc_imr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decode time/date into rtc_time structure
|
* Decode time/date into rtc_time structure
|
||||||
|
@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
|
||||||
cr = at91_rtc_read(AT91_RTC_CR);
|
cr = at91_rtc_read(AT91_RTC_CR);
|
||||||
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
|
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
|
||||||
|
|
||||||
|
at91_rtc_imr |= AT91_RTC_ACKUPD;
|
||||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
|
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
|
||||||
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
|
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
|
||||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
|
||||||
|
at91_rtc_imr &= ~AT91_RTC_ACKUPD;
|
||||||
|
|
||||||
at91_rtc_write(AT91_RTC_TIMR,
|
at91_rtc_write(AT91_RTC_TIMR,
|
||||||
bin2bcd(tm->tm_sec) << 0
|
bin2bcd(tm->tm_sec) << 0
|
||||||
|
@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||||
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
|
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
|
||||||
tm->tm_year = at91_alarm_year - 1900;
|
tm->tm_year = at91_alarm_year - 1900;
|
||||||
|
|
||||||
alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
|
alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
|
||||||
? 1 : 0;
|
? 1 : 0;
|
||||||
|
|
||||||
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
|
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
|
||||||
|
@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||||
tm.tm_sec = alrm->time.tm_sec;
|
tm.tm_sec = alrm->time.tm_sec;
|
||||||
|
|
||||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
||||||
|
at91_rtc_imr &= ~AT91_RTC_ALARM;
|
||||||
at91_rtc_write(AT91_RTC_TIMALR,
|
at91_rtc_write(AT91_RTC_TIMALR,
|
||||||
bin2bcd(tm.tm_sec) << 0
|
bin2bcd(tm.tm_sec) << 0
|
||||||
| bin2bcd(tm.tm_min) << 8
|
| bin2bcd(tm.tm_min) << 8
|
||||||
|
@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||||
|
|
||||||
if (alrm->enabled) {
|
if (alrm->enabled) {
|
||||||
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
||||||
|
at91_rtc_imr |= AT91_RTC_ALARM;
|
||||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
||||||
|
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
|
||||||
|
at91_rtc_imr |= AT91_RTC_ALARM;
|
||||||
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
|
||||||
} else
|
} else {
|
||||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
|
||||||
|
at91_rtc_imr &= ~AT91_RTC_ALARM;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
|
||||||
*/
|
*/
|
||||||
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
|
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
|
||||||
{
|
{
|
||||||
unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
|
|
||||||
|
|
||||||
seq_printf(seq, "update_IRQ\t: %s\n",
|
seq_printf(seq, "update_IRQ\t: %s\n",
|
||||||
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
|
(at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
|
||||||
seq_printf(seq, "periodic_IRQ\t: %s\n",
|
seq_printf(seq, "periodic_IRQ\t: %s\n",
|
||||||
(imr & AT91_RTC_SECEV) ? "yes" : "no");
|
(at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
|
||||||
unsigned int rtsr;
|
unsigned int rtsr;
|
||||||
unsigned long events = 0;
|
unsigned long events = 0;
|
||||||
|
|
||||||
rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
|
rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
|
||||||
if (rtsr) { /* this interrupt is shared! Is it ours? */
|
if (rtsr) { /* this interrupt is shared! Is it ours? */
|
||||||
if (rtsr & AT91_RTC_ALARM)
|
if (rtsr & AT91_RTC_ALARM)
|
||||||
events |= (RTC_AF | RTC_IRQF);
|
events |= (RTC_AF | RTC_IRQF);
|
||||||
|
@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
|
||||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
||||||
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
||||||
AT91_RTC_CALEV);
|
AT91_RTC_CALEV);
|
||||||
|
at91_rtc_imr = 0;
|
||||||
|
|
||||||
ret = request_irq(irq, at91_rtc_interrupt,
|
ret = request_irq(irq, at91_rtc_interrupt,
|
||||||
IRQF_SHARED,
|
IRQF_SHARED,
|
||||||
|
@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
|
||||||
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
|
||||||
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
AT91_RTC_SECEV | AT91_RTC_TIMEV |
|
||||||
AT91_RTC_CALEV);
|
AT91_RTC_CALEV);
|
||||||
|
at91_rtc_imr = 0;
|
||||||
free_irq(irq, pdev);
|
free_irq(irq, pdev);
|
||||||
|
|
||||||
rtc_device_unregister(rtc);
|
rtc_device_unregister(rtc);
|
||||||
|
@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
/* AT91RM9200 RTC Power management control */
|
/* AT91RM9200 RTC Power management control */
|
||||||
|
|
||||||
static u32 at91_rtc_imr;
|
static u32 at91_rtc_bkpimr;
|
||||||
|
|
||||||
|
|
||||||
static int at91_rtc_suspend(struct device *dev)
|
static int at91_rtc_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
/* this IRQ is shared with DBGU and other hardware which isn't
|
/* this IRQ is shared with DBGU and other hardware which isn't
|
||||||
* necessarily doing PM like we are...
|
* necessarily doing PM like we are...
|
||||||
*/
|
*/
|
||||||
at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
|
at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
|
||||||
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
|
if (at91_rtc_bkpimr) {
|
||||||
if (at91_rtc_imr) {
|
if (device_may_wakeup(dev)) {
|
||||||
if (device_may_wakeup(dev))
|
|
||||||
enable_irq_wake(irq);
|
enable_irq_wake(irq);
|
||||||
else
|
} else {
|
||||||
at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
|
at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
|
||||||
}
|
at91_rtc_imr &= ~at91_rtc_bkpimr;
|
||||||
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int at91_rtc_resume(struct device *dev)
|
static int at91_rtc_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
if (at91_rtc_imr) {
|
if (at91_rtc_bkpimr) {
|
||||||
if (device_may_wakeup(dev))
|
if (device_may_wakeup(dev)) {
|
||||||
disable_irq_wake(irq);
|
disable_irq_wake(irq);
|
||||||
else
|
} else {
|
||||||
at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
|
at91_rtc_imr |= at91_rtc_bkpimr;
|
||||||
|
at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,6 @@
|
||||||
#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
|
#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
|
||||||
#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
|
#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
|
||||||
#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
|
#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
|
||||||
#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
|
|
||||||
|
|
||||||
#define AT91_RTC_VER 0x2c /* Valid Entry Register */
|
#define AT91_RTC_VER 0x2c /* Valid Entry Register */
|
||||||
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
|
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
|
||||||
|
|
|
@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
|
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
|
||||||
platform_set_drvdata(pdev, rtc);
|
platform_set_drvdata(pdev, rtc);
|
||||||
rtc->irq = platform_get_irq_byname(pdev, "ALM");
|
rtc->irq = DA9052_IRQ_ALARM;
|
||||||
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
|
ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
|
||||||
da9052_rtc_irq,
|
da9052_rtc_irq, rtc);
|
||||||
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
|
|
||||||
"ALM", rtc);
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
|
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/fb.h>
|
#include <linux/fb.h>
|
||||||
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include <linux/platform_data/video-ep93xx.h>
|
#include <linux/platform_data/video-ep93xx.h>
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work);
|
||||||
#ifdef CONFIG_IRQ_WORK
|
#ifdef CONFIG_IRQ_WORK
|
||||||
bool irq_work_needs_cpu(void);
|
bool irq_work_needs_cpu(void);
|
||||||
#else
|
#else
|
||||||
static bool irq_work_needs_cpu(void) { return false; }
|
static inline bool irq_work_needs_cpu(void) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _LINUX_IRQ_WORK_H */
|
#endif /* _LINUX_IRQ_WORK_H */
|
||||||
|
|
|
@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp);
|
||||||
unsigned long int_sqrt(unsigned long);
|
unsigned long int_sqrt(unsigned long);
|
||||||
|
|
||||||
extern void bust_spinlocks(int yes);
|
extern void bust_spinlocks(int yes);
|
||||||
extern void wake_up_klogd(void);
|
|
||||||
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
|
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
|
||||||
extern int panic_timeout;
|
extern int panic_timeout;
|
||||||
extern int panic_on_oops;
|
extern int panic_on_oops;
|
||||||
|
|
|
@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
|
||||||
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned zone_end_pfn(const struct zone *zone)
|
static inline unsigned long zone_end_pfn(const struct zone *zone)
|
||||||
{
|
{
|
||||||
return zone->zone_start_pfn + zone->spanned_pages;
|
return zone->zone_start_pfn + zone->spanned_pages;
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,6 +134,8 @@ extern int printk_delay_msec;
|
||||||
extern int dmesg_restrict;
|
extern int dmesg_restrict;
|
||||||
extern int kptr_restrict;
|
extern int kptr_restrict;
|
||||||
|
|
||||||
|
extern void wake_up_klogd(void);
|
||||||
|
|
||||||
void log_buf_kexec_setup(void);
|
void log_buf_kexec_setup(void);
|
||||||
void __init setup_log_buf(int early);
|
void __init setup_log_buf(int early);
|
||||||
#else
|
#else
|
||||||
|
@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void wake_up_klogd(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void log_buf_kexec_setup(void)
|
static inline void log_buf_kexec_setup(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -840,7 +840,8 @@ out_putfd:
|
||||||
fd = error;
|
fd = error;
|
||||||
}
|
}
|
||||||
mutex_unlock(&root->d_inode->i_mutex);
|
mutex_unlock(&root->d_inode->i_mutex);
|
||||||
mnt_drop_write(mnt);
|
if (!ro)
|
||||||
|
mnt_drop_write(mnt);
|
||||||
out_putname:
|
out_putname:
|
||||||
putname(name);
|
putname(name);
|
||||||
return fd;
|
return fd;
|
||||||
|
|
|
@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
|
||||||
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
|
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
|
||||||
#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
|
#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
|
||||||
|
|
||||||
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
|
||||||
|
|
||||||
int console_printk[4] = {
|
int console_printk[4] = {
|
||||||
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
|
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
|
||||||
DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
|
DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
|
||||||
|
@ -224,6 +222,7 @@ struct log {
|
||||||
static DEFINE_RAW_SPINLOCK(logbuf_lock);
|
static DEFINE_RAW_SPINLOCK(logbuf_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
|
DECLARE_WAIT_QUEUE_HEAD(log_wait);
|
||||||
/* the next printk record to read by syslog(READ) or /proc/kmsg */
|
/* the next printk record to read by syslog(READ) or /proc/kmsg */
|
||||||
static u64 syslog_seq;
|
static u64 syslog_seq;
|
||||||
static u32 syslog_idx;
|
static u32 syslog_idx;
|
||||||
|
@ -1957,45 +1956,6 @@ int is_console_locked(void)
|
||||||
return console_locked;
|
return console_locked;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Delayed printk version, for scheduler-internal messages:
|
|
||||||
*/
|
|
||||||
#define PRINTK_BUF_SIZE 512
|
|
||||||
|
|
||||||
#define PRINTK_PENDING_WAKEUP 0x01
|
|
||||||
#define PRINTK_PENDING_SCHED 0x02
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, printk_pending);
|
|
||||||
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
|
|
||||||
|
|
||||||
static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
|
||||||
{
|
|
||||||
int pending = __this_cpu_xchg(printk_pending, 0);
|
|
||||||
|
|
||||||
if (pending & PRINTK_PENDING_SCHED) {
|
|
||||||
char *buf = __get_cpu_var(printk_sched_buf);
|
|
||||||
printk(KERN_WARNING "[sched_delayed] %s", buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pending & PRINTK_PENDING_WAKEUP)
|
|
||||||
wake_up_interruptible(&log_wait);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
|
|
||||||
.func = wake_up_klogd_work_func,
|
|
||||||
.flags = IRQ_WORK_LAZY,
|
|
||||||
};
|
|
||||||
|
|
||||||
void wake_up_klogd(void)
|
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
if (waitqueue_active(&log_wait)) {
|
|
||||||
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
|
|
||||||
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void console_cont_flush(char *text, size_t size)
|
static void console_cont_flush(char *text, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2458,6 +2418,44 @@ static int __init printk_late_init(void)
|
||||||
late_initcall(printk_late_init);
|
late_initcall(printk_late_init);
|
||||||
|
|
||||||
#if defined CONFIG_PRINTK
|
#if defined CONFIG_PRINTK
|
||||||
|
/*
|
||||||
|
* Delayed printk version, for scheduler-internal messages:
|
||||||
|
*/
|
||||||
|
#define PRINTK_BUF_SIZE 512
|
||||||
|
|
||||||
|
#define PRINTK_PENDING_WAKEUP 0x01
|
||||||
|
#define PRINTK_PENDING_SCHED 0x02
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(int, printk_pending);
|
||||||
|
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
|
||||||
|
|
||||||
|
static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
||||||
|
{
|
||||||
|
int pending = __this_cpu_xchg(printk_pending, 0);
|
||||||
|
|
||||||
|
if (pending & PRINTK_PENDING_SCHED) {
|
||||||
|
char *buf = __get_cpu_var(printk_sched_buf);
|
||||||
|
printk(KERN_WARNING "[sched_delayed] %s", buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pending & PRINTK_PENDING_WAKEUP)
|
||||||
|
wake_up_interruptible(&log_wait);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
|
||||||
|
.func = wake_up_klogd_work_func,
|
||||||
|
.flags = IRQ_WORK_LAZY,
|
||||||
|
};
|
||||||
|
|
||||||
|
void wake_up_klogd(void)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (waitqueue_active(&log_wait)) {
|
||||||
|
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
|
||||||
|
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
int printk_sched(const char *fmt, ...)
|
int printk_sched(const char *fmt, ...)
|
||||||
{
|
{
|
||||||
|
|
59
kernel/sys.c
59
kernel/sys.c
|
@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
|
||||||
|
|
||||||
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
|
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
|
||||||
|
|
||||||
static int __orderly_poweroff(void)
|
static int __orderly_poweroff(bool force)
|
||||||
{
|
{
|
||||||
int argc;
|
|
||||||
char **argv;
|
char **argv;
|
||||||
static char *envp[] = {
|
static char *envp[] = {
|
||||||
"HOME=/",
|
"HOME=/",
|
||||||
|
@ -2196,35 +2195,19 @@ static int __orderly_poweroff(void)
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
|
argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
|
||||||
if (argv == NULL) {
|
if (argv) {
|
||||||
|
ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
|
||||||
|
argv_free(argv);
|
||||||
|
} else {
|
||||||
printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
|
printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
|
||||||
__func__, poweroff_cmd);
|
__func__, poweroff_cmd);
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
|
|
||||||
NULL, NULL, NULL);
|
|
||||||
argv_free(argv);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* orderly_poweroff - Trigger an orderly system poweroff
|
|
||||||
* @force: force poweroff if command execution fails
|
|
||||||
*
|
|
||||||
* This may be called from any context to trigger a system shutdown.
|
|
||||||
* If the orderly shutdown fails, it will force an immediate shutdown.
|
|
||||||
*/
|
|
||||||
int orderly_poweroff(bool force)
|
|
||||||
{
|
|
||||||
int ret = __orderly_poweroff();
|
|
||||||
|
|
||||||
if (ret && force) {
|
if (ret && force) {
|
||||||
printk(KERN_WARNING "Failed to start orderly shutdown: "
|
printk(KERN_WARNING "Failed to start orderly shutdown: "
|
||||||
"forcing the issue\n");
|
"forcing the issue\n");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* I guess this should try to kick off some daemon to sync and
|
* I guess this should try to kick off some daemon to sync and
|
||||||
* poweroff asap. Or not even bother syncing if we're doing an
|
* poweroff asap. Or not even bother syncing if we're doing an
|
||||||
|
@ -2236,4 +2219,28 @@ int orderly_poweroff(bool force)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool poweroff_force;
|
||||||
|
|
||||||
|
static void poweroff_work_func(struct work_struct *work)
|
||||||
|
{
|
||||||
|
__orderly_poweroff(poweroff_force);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DECLARE_WORK(poweroff_work, poweroff_work_func);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* orderly_poweroff - Trigger an orderly system poweroff
|
||||||
|
* @force: force poweroff if command execution fails
|
||||||
|
*
|
||||||
|
* This may be called from any context to trigger a system shutdown.
|
||||||
|
* If the orderly shutdown fails, it will force an immediate shutdown.
|
||||||
|
*/
|
||||||
|
int orderly_poweroff(bool force)
|
||||||
|
{
|
||||||
|
if (force) /* do not override the pending "true" */
|
||||||
|
poweroff_force = true;
|
||||||
|
schedule_work(&poweroff_work);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(orderly_poweroff);
|
EXPORT_SYMBOL_GPL(orderly_poweroff);
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/printk.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
|
@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
|
||||||
wake_up_klogd();
|
wake_up_klogd();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
|
||||||
entry = bucket_find_exact(bucket, ref);
|
entry = bucket_find_exact(bucket, ref);
|
||||||
|
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
|
/* must drop lock before calling dma_mapping_error */
|
||||||
|
put_hash_bucket(bucket, &flags);
|
||||||
|
|
||||||
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
|
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
|
||||||
err_printk(ref->dev, NULL,
|
err_printk(ref->dev, NULL,
|
||||||
"DMA-API: device driver tries "
|
"DMA-API: device driver tries to free an "
|
||||||
"to free an invalid DMA memory address\n");
|
"invalid DMA memory address\n");
|
||||||
return;
|
} else {
|
||||||
|
err_printk(ref->dev, NULL,
|
||||||
|
"DMA-API: device driver tries to free DMA "
|
||||||
|
"memory it has not allocated [device "
|
||||||
|
"address=0x%016llx] [size=%llu bytes]\n",
|
||||||
|
ref->dev_addr, ref->size);
|
||||||
}
|
}
|
||||||
err_printk(ref->dev, NULL, "DMA-API: device driver tries "
|
return;
|
||||||
"to free DMA memory it has not allocated "
|
|
||||||
"[device address=0x%016llx] [size=%llu bytes]\n",
|
|
||||||
ref->dev_addr, ref->size);
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ref->size != entry->size) {
|
if (ref->size != entry->size) {
|
||||||
|
@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
|
||||||
hash_bucket_del(entry);
|
hash_bucket_del(entry);
|
||||||
dma_entry_free(entry);
|
dma_entry_free(entry);
|
||||||
|
|
||||||
out:
|
|
||||||
put_hash_bucket(bucket, &flags);
|
put_hash_bucket(bucket, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
ref.dev = dev;
|
ref.dev = dev;
|
||||||
ref.dev_addr = dma_addr;
|
ref.dev_addr = dma_addr;
|
||||||
bucket = get_hash_bucket(&ref, &flags);
|
bucket = get_hash_bucket(&ref, &flags);
|
||||||
entry = bucket_find_exact(bucket, &ref);
|
|
||||||
|
|
||||||
if (!entry)
|
list_for_each_entry(entry, &bucket->list, list) {
|
||||||
goto out;
|
if (!exact_match(&ref, entry))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The same physical address can be mapped multiple
|
||||||
|
* times. Without a hardware IOMMU this results in the
|
||||||
|
* same device addresses being put into the dma-debug
|
||||||
|
* hash multiple times too. This can result in false
|
||||||
|
* positives being reported. Therefore we implement a
|
||||||
|
* best-fit algorithm here which updates the first entry
|
||||||
|
* from the hash which fits the reference value and is
|
||||||
|
* not currently listed as being checked.
|
||||||
|
*/
|
||||||
|
if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
|
||||||
|
entry->map_err_type = MAP_ERR_CHECKED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
entry->map_err_type = MAP_ERR_CHECKED;
|
|
||||||
out:
|
|
||||||
put_hash_bucket(bucket, &flags);
|
put_hash_bucket(bucket, &flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(debug_dma_mapping_error);
|
EXPORT_SYMBOL(debug_dma_mapping_error);
|
||||||
|
|
|
@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
|
||||||
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
|
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
|
||||||
unsigned long hugetlb_total_pages(void)
|
unsigned long hugetlb_total_pages(void)
|
||||||
{
|
{
|
||||||
struct hstate *h = &default_hstate;
|
struct hstate *h;
|
||||||
return h->nr_huge_pages * pages_per_huge_page(h);
|
unsigned long nr_total_pages = 0;
|
||||||
|
|
||||||
|
for_each_hstate(h)
|
||||||
|
nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
|
||||||
|
return nr_total_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hugetlb_acct_memory(struct hstate *h, long delta)
|
static int hugetlb_acct_memory(struct hstate *h, long delta)
|
||||||
|
|
|
@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
|
||||||
for (i = 0; i < MAX_NR_ZONES; i++) {
|
for (i = 0; i < MAX_NR_ZONES; i++) {
|
||||||
struct zone *zone = pgdat->node_zones + i;
|
struct zone *zone = pgdat->node_zones + i;
|
||||||
|
|
||||||
if (zone->wait_table)
|
/*
|
||||||
|
* wait_table may be allocated from boot memory,
|
||||||
|
* here only free if it's allocated by vmalloc.
|
||||||
|
*/
|
||||||
|
if (is_vmalloc_addr(zone->wait_table))
|
||||||
vfree(zone->wait_table);
|
vfree(zone->wait_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue