Blackfin/ipipe: upgrade to I-pipe mainline
This patch introduces Blackfin-specific bits to support the current tip of the interrupt pipeline development, mainly: - 2/3-level interrupt maps (sparse IRQs) - generic virq handling - sysinfo v2 format for ipipe_get_sysinfo() Signed-off-by: Philippe Gerum <rpm@xenomai.org> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
This commit is contained in:
parent
8944b5a258
commit
5b5da4c4b8
|
@ -34,11 +34,12 @@
|
|||
#include <asm/bitops.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#define IPIPE_ARCH_STRING "1.12-00"
|
||||
#define IPIPE_ARCH_STRING "1.16-01"
|
||||
#define IPIPE_MAJOR_NUMBER 1
|
||||
#define IPIPE_MINOR_NUMBER 12
|
||||
#define IPIPE_PATCH_NUMBER 0
|
||||
#define IPIPE_MINOR_NUMBER 16
|
||||
#define IPIPE_PATCH_NUMBER 1
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error "I-pipe/blackfin: SMP not implemented"
|
||||
|
@ -55,25 +56,19 @@ do { \
|
|||
#define task_hijacked(p) \
|
||||
({ \
|
||||
int __x__ = __ipipe_root_domain_p; \
|
||||
__clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
|
||||
if (__x__) \
|
||||
hard_local_irq_enable(); \
|
||||
hard_local_irq_enable(); \
|
||||
!__x__; \
|
||||
})
|
||||
|
||||
struct ipipe_domain;
|
||||
|
||||
struct ipipe_sysinfo {
|
||||
|
||||
int ncpus; /* Number of CPUs on board */
|
||||
u64 cpufreq; /* CPU frequency (in Hz) */
|
||||
|
||||
/* Arch-dependent block */
|
||||
|
||||
struct {
|
||||
unsigned tmirq; /* Timer tick IRQ */
|
||||
u64 tmfreq; /* Timer frequency */
|
||||
} archdep;
|
||||
int sys_nr_cpus; /* Number of CPUs on board */
|
||||
int sys_hrtimer_irq; /* hrtimer device IRQ */
|
||||
u64 sys_hrtimer_freq; /* hrtimer device frequency */
|
||||
u64 sys_hrclock_freq; /* hrclock device frequency */
|
||||
u64 sys_cpu_freq; /* CPU frequency (Hz) */
|
||||
};
|
||||
|
||||
#define ipipe_read_tsc(t) \
|
||||
|
@ -115,9 +110,19 @@ void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
|
|||
void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
|
||||
unsigned irq);
|
||||
|
||||
#define __ipipe_enable_irq(irq) (irq_desc[irq].chip->unmask(irq))
|
||||
#define __ipipe_enable_irq(irq) \
|
||||
do { \
|
||||
struct irq_desc *desc = irq_to_desc(irq); \
|
||||
struct irq_chip *chip = get_irq_desc_chip(desc); \
|
||||
chip->irq_unmask(&desc->irq_data); \
|
||||
} while (0)
|
||||
|
||||
#define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq))
|
||||
#define __ipipe_disable_irq(irq) \
|
||||
do { \
|
||||
struct irq_desc *desc = irq_to_desc(irq); \
|
||||
struct irq_chip *chip = get_irq_desc_chip(desc); \
|
||||
chip->irq_mask(&desc->irq_data); \
|
||||
} while (0)
|
||||
|
||||
static inline int __ipipe_check_tickdev(const char *devname)
|
||||
{
|
||||
|
@ -128,12 +133,11 @@ void __ipipe_enable_pipeline(void);
|
|||
|
||||
#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
|
||||
|
||||
#define __ipipe_sync_pipeline ___ipipe_sync_pipeline
|
||||
void ___ipipe_sync_pipeline(unsigned long syncmask);
|
||||
void ___ipipe_sync_pipeline(void);
|
||||
|
||||
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
|
||||
|
||||
int __ipipe_get_irq_priority(unsigned irq);
|
||||
int __ipipe_get_irq_priority(unsigned int irq);
|
||||
|
||||
void __ipipe_serial_debug(const char *fmt, ...);
|
||||
|
||||
|
@ -152,7 +156,10 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
|
|||
return ffs(ul) - 1;
|
||||
}
|
||||
|
||||
#define __ipipe_run_irqtail() /* Must be a macro */ \
|
||||
#define __ipipe_do_root_xirq(ipd, irq) \
|
||||
((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)))
|
||||
|
||||
#define __ipipe_run_irqtail(irq) /* Must be a macro */ \
|
||||
do { \
|
||||
unsigned long __pending; \
|
||||
CSYNC(); \
|
||||
|
@ -164,42 +171,8 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define __ipipe_run_isr(ipd, irq) \
|
||||
do { \
|
||||
if (!__ipipe_pipeline_head_p(ipd)) \
|
||||
hard_local_irq_enable(); \
|
||||
if (ipd == ipipe_root_domain) { \
|
||||
if (unlikely(ipipe_virtual_irq_p(irq))) { \
|
||||
irq_enter(); \
|
||||
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
|
||||
irq_exit(); \
|
||||
} else \
|
||||
ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
|
||||
} else { \
|
||||
__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
|
||||
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
|
||||
/* Attempt to exit the outer interrupt level before \
|
||||
* starting the deferred IRQ processing. */ \
|
||||
__ipipe_run_irqtail(); \
|
||||
__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
|
||||
} \
|
||||
hard_local_irq_disable(); \
|
||||
} while (0)
|
||||
|
||||
#define __ipipe_syscall_watched_p(p, sc) \
|
||||
(((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
|
||||
|
||||
void ipipe_init_irq_threads(void);
|
||||
|
||||
int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
|
||||
|
||||
#ifdef CONFIG_TICKSOURCE_CORETMR
|
||||
#define IRQ_SYSTMR IRQ_CORETMR
|
||||
#define IRQ_PRIOTMR IRQ_CORETMR
|
||||
#else
|
||||
#define IRQ_SYSTMR IRQ_TIMER0
|
||||
#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
|
||||
#endif
|
||||
(ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls)
|
||||
|
||||
#ifdef CONFIG_BF561
|
||||
#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val)
|
||||
|
@ -219,11 +192,11 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
|
|||
|
||||
#define task_hijacked(p) 0
|
||||
#define ipipe_trap_notify(t, r) 0
|
||||
#define __ipipe_root_tick_p(regs) 1
|
||||
|
||||
#define ipipe_init_irq_threads() do { } while (0)
|
||||
#define ipipe_start_irq_thread(irq, desc) 0
|
||||
#endif /* !CONFIG_IPIPE */
|
||||
|
||||
#ifndef CONFIG_TICKSOURCE_GPTMR0
|
||||
#ifdef CONFIG_TICKSOURCE_CORETMR
|
||||
#define IRQ_SYSTMR IRQ_CORETMR
|
||||
#define IRQ_PRIOTMR IRQ_CORETMR
|
||||
#else
|
||||
|
@ -231,10 +204,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
|
|||
#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
|
||||
#endif
|
||||
|
||||
#define __ipipe_root_tick_p(regs) 1
|
||||
|
||||
#endif /* !CONFIG_IPIPE */
|
||||
|
||||
#define ipipe_update_tick_evtdev(evtdev) do { } while (0)
|
||||
|
||||
#endif /* !__ASM_BLACKFIN_IPIPE_H */
|
||||
|
|
|
@ -24,8 +24,10 @@
|
|||
|
||||
#ifdef CONFIG_IPIPE
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <mach/irq.h>
|
||||
|
||||
#define IPIPE_NR_XIRQS NR_IRQS
|
||||
#define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */
|
||||
|
||||
/* Blackfin-specific, per-cpu pipeline status */
|
||||
#define IPIPE_SYNCDEFER_FLAG 15
|
||||
|
@ -42,11 +44,14 @@
|
|||
#define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4)
|
||||
#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5)
|
||||
#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6)
|
||||
#define IPIPE_LAST_EVENT IPIPE_EVENT_CLEANUP
|
||||
#define IPIPE_EVENT_RETURN (IPIPE_FIRST_EVENT + 7)
|
||||
#define IPIPE_LAST_EVENT IPIPE_EVENT_RETURN
|
||||
#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
|
||||
|
||||
#define IPIPE_TIMER_IRQ IRQ_CORETMR
|
||||
|
||||
#define __IPIPE_FEATURE_SYSINFO_V2 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
|
||||
|
@ -63,6 +68,8 @@ void __ipipe_unlock_root(void);
|
|||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define __IPIPE_FEATURE_SYSINFO_V2 1
|
||||
|
||||
#endif /* CONFIG_IPIPE */
|
||||
|
||||
#endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */
|
||||
|
|
|
@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
|
|||
* pending for it.
|
||||
*/
|
||||
if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
|
||||
ipipe_head_cpudom_var(irqpend_himask) == 0)
|
||||
!__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
|
||||
goto out;
|
||||
|
||||
__ipipe_walk_pipeline(head);
|
||||
|
@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
|
|||
}
|
||||
EXPORT_SYMBOL(__ipipe_disable_irqdesc);
|
||||
|
||||
int __ipipe_syscall_root(struct pt_regs *regs)
|
||||
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
|
||||
{
|
||||
struct ipipe_percpu_domain_data *p;
|
||||
unsigned long flags;
|
||||
void (*hook)(void);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We need to run the IRQ tail hook whenever we don't
|
||||
* propagate a syscall to higher domains, because we know that
|
||||
* important operations might be pending there (e.g. Xenomai
|
||||
* deferred rescheduling).
|
||||
*/
|
||||
WARN_ON_ONCE(irqs_disabled_hw());
|
||||
|
||||
if (regs->orig_p0 < NR_syscalls) {
|
||||
void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
|
||||
hook();
|
||||
if ((current->flags & PF_EVNOTIFY) == 0)
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* We need to run the IRQ tail hook each time we intercept a
|
||||
* syscall, because we know that important operations might be
|
||||
* pending there (e.g. Xenomai deferred rescheduling).
|
||||
*/
|
||||
hook = (__typeof__(hook))__ipipe_irq_tail_hook;
|
||||
hook();
|
||||
|
||||
/*
|
||||
* This routine either returns:
|
||||
|
@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs)
|
|||
* tail work has to be performed (for handling signals etc).
|
||||
*/
|
||||
|
||||
if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
|
||||
if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
|
||||
!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
|
||||
return 0;
|
||||
|
||||
ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
|
||||
|
||||
flags = hard_local_irq_save();
|
||||
hard_local_irq_disable();
|
||||
|
||||
if (!__ipipe_root_domain_p) {
|
||||
hard_local_irq_restore(flags);
|
||||
return 1;
|
||||
/*
|
||||
* This is the end of the syscall path, so we may
|
||||
* safely assume a valid Linux task stack here.
|
||||
*/
|
||||
if (current->ipipe_flags & PF_EVTRET) {
|
||||
current->ipipe_flags &= ~PF_EVTRET;
|
||||
__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
|
||||
}
|
||||
|
||||
p = ipipe_root_cpudom_ptr();
|
||||
if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
|
||||
__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
|
||||
if (!__ipipe_root_domain_p)
|
||||
ret = -1;
|
||||
else {
|
||||
p = ipipe_root_cpudom_ptr();
|
||||
if (__ipipe_ipending_p(p))
|
||||
__ipipe_sync_pipeline();
|
||||
}
|
||||
|
||||
hard_local_irq_restore(flags);
|
||||
hard_local_irq_enable();
|
||||
|
||||
return -ret;
|
||||
}
|
||||
|
||||
unsigned long ipipe_critical_enter(void (*syncfn) (void))
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = hard_local_irq_save();
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
void ipipe_critical_exit(unsigned long flags)
|
||||
{
|
||||
hard_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __ipipe_no_irqtail(void)
|
||||
{
|
||||
}
|
||||
|
||||
int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
|
||||
{
|
||||
info->ncpus = num_online_cpus();
|
||||
info->cpufreq = ipipe_cpu_freq();
|
||||
info->archdep.tmirq = IPIPE_TIMER_IRQ;
|
||||
info->archdep.tmfreq = info->cpufreq;
|
||||
info->sys_nr_cpus = num_online_cpus();
|
||||
info->sys_cpu_freq = ipipe_cpu_freq();
|
||||
info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
|
||||
info->sys_hrtimer_freq = __ipipe_core_clock;
|
||||
info->sys_hrclock_freq = __ipipe_core_clock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq)
|
|||
asmlinkage void __ipipe_sync_root(void)
|
||||
{
|
||||
void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
|
||||
struct ipipe_percpu_domain_data *p;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void)
|
|||
|
||||
clear_thread_flag(TIF_IRQ_SYNC);
|
||||
|
||||
if (ipipe_root_cpudom_var(irqpend_himask) != 0)
|
||||
__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
|
||||
p = ipipe_root_cpudom_ptr();
|
||||
if (__ipipe_ipending_p(p))
|
||||
__ipipe_sync_pipeline();
|
||||
|
||||
hard_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void ___ipipe_sync_pipeline(unsigned long syncmask)
|
||||
void ___ipipe_sync_pipeline(void)
|
||||
{
|
||||
if (__ipipe_root_domain_p &&
|
||||
test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
|
||||
return;
|
||||
|
||||
__ipipe_sync_stage(syncmask);
|
||||
__ipipe_sync_stage();
|
||||
}
|
||||
|
||||
void __ipipe_disable_root_irqs_hw(void)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/sched.h>
|
||||
#ifdef CONFIG_IPIPE
|
||||
#include <linux/ipipe.h>
|
||||
#endif
|
||||
|
@ -556,10 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
|
|||
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
|
||||
{
|
||||
#ifdef CONFIG_IPIPE
|
||||
_set_irq_handler(irq, handle_level_irq);
|
||||
#else
|
||||
__set_irq_handler_unlocked(irq, handle);
|
||||
handle = handle_level_irq;
|
||||
#endif
|
||||
__set_irq_handler_unlocked(irq, handle);
|
||||
}
|
||||
|
||||
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
|
||||
|
@ -1392,7 +1392,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
|
|||
struct ipipe_domain *this_domain = __ipipe_current_domain;
|
||||
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
|
||||
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
|
||||
int irq, s;
|
||||
int irq, s = 0;
|
||||
|
||||
if (likely(vec == EVT_IVTMR_P))
|
||||
irq = IRQ_CORETMR;
|
||||
|
@ -1442,6 +1442,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
|
|||
__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't want Linux interrupt handlers to run at the
|
||||
* current core priority level (i.e. < EVT15), since this
|
||||
* might delay other interrupts handled by a high priority
|
||||
* domain. Here is what we do instead:
|
||||
*
|
||||
* - we raise the SYNCDEFER bit to prevent
|
||||
* __ipipe_handle_irq() to sync the pipeline for the root
|
||||
* stage for the incoming interrupt. Upon return, that IRQ is
|
||||
* pending in the interrupt log.
|
||||
*
|
||||
* - we raise the TIF_IRQ_SYNC bit for the current thread, so
|
||||
* that _schedule_and_signal_from_int will eventually sync the
|
||||
* pipeline from EVT15.
|
||||
*/
|
||||
if (this_domain == ipipe_root_domain) {
|
||||
s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
|
||||
barrier();
|
||||
|
@ -1451,6 +1466,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
|
|||
__ipipe_handle_irq(irq, regs);
|
||||
ipipe_trace_irq_exit(irq);
|
||||
|
||||
if (user_mode(regs) &&
|
||||
!ipipe_test_foreign_stack() &&
|
||||
(current->ipipe_flags & PF_EVTRET) != 0) {
|
||||
/*
|
||||
* Testing for user_regs() does NOT fully eliminate
|
||||
* foreign stack contexts, because of the forged
|
||||
* interrupt returns we do through
|
||||
* __ipipe_call_irqtail. In that case, we might have
|
||||
* preempted a foreign stack context in a high
|
||||
* priority domain, with a single interrupt level now
|
||||
* pending after the irqtail unwinding is done. In
|
||||
* which case user_mode() is now true, and the event
|
||||
* gets dispatched spuriously.
|
||||
*/
|
||||
current->ipipe_flags &= ~PF_EVTRET;
|
||||
__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
|
||||
}
|
||||
|
||||
if (this_domain == ipipe_root_domain) {
|
||||
set_thread_flag(TIF_IRQ_SYNC);
|
||||
if (!s) {
|
||||
|
|
Loading…
Reference in New Issue