Xtensa updates for v5.1:

- use generic spinlock/rwlock implementations
 - clean up IPI processing
 - document boot parameters passing to the kernel
 - fix get_wchan
 - various cleanups in time.c, process.c, traps.c and thread_info.h
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEK2eFS5jlMn3N6xfYUfnMkfg/oEQFAlyAf0ATHGpjbXZia2Jj
 QGdtYWlsLmNvbQAKCRBR+cyR+D+gRPH8D/4sKwUajbF1IKwwlUxrnOZEaofqlixD
 SnNJmg7n0rPT74Zo2phBhX2mhTSKBEJjMTkmVOFTLfE0JU0vUDlGyAFNeW9WnDlw
 RiI7iQc+Iihjb1EGnbH2zcvrKJSasXjKGpt+HrzrAIA16k0HE74o6+MHyBXE/qK/
 hHbc4P3xpLHaQYPR/slK8SWYw6m7GFMAdPRK2nIJ2qvNADGKr5Ic4V6KG3/GDVp8
 0RmdLNtK0A1iToYymmz06BsUemY62xxL3UZZCpzGSh7NWZxApKJTUzDkA/f4uRmc
 yhljHEZprSOuszJSGXe6JrmJ6Hs3unp084sjrVkRO95at/d0YoviWNtQ57bzL2d7
 lPZIKx4u/YPTUZjsf65h9SmyeZ2eGPiNLinNgaW/qDkNHRWyLpxTO2Z6LIb1OUUm
 xWjQGYr0HF/eaoNi7+7tiZgijKTJwTaKuWM9tFjjxxEWm6FAZJIMYcsCfySJHo3G
 3D/cm24kHDUK4GKd4kRPlh4KJDcSDRrcwNiHdY5lpU8jPJy6xi3F0KEGxPw8LT44
 q52ZEqx+zDXKKMy1Om+uYg4Uoyrol0cm6DUYzp5Q2VPjo+81lLN3Dy8NHbk2e7Rp
 FcsIzclBOBoAYkj5KbngisBZ2k/Gw5O413RRmUBjx570D8Sfuc43RyeryrlOPyaC
 ne2fJjYmLql2wg==
 =VHOK
 -----END PGP SIGNATURE-----

Merge tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa

Pull xtensa updates from Max Filippov:

 - use generic spinlock/rwlock implementations

 - clean up IPI processing

 - document boot parameters passing to the kernel

 - fix get_wchan

 - various cleanups in time.c, process.c, traps.c and thread_info.h

* tag 'xtensa-20190307' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: simplify trap_init
  xtensa: drop unused definitions
  xtensa: fix get_wchan
  xtensa: use generic spinlock/rwlock implementation
  xtensa: provide xchg for sizes 1 and 2
  xtensa: clean up arch/xtensa/kernel/time.c
  xtensa: SMP: rework IPI processing
  xtensa: document boot parameter passing
This commit is contained in:
Linus Torvalds 2019-03-07 13:27:53 -08:00
commit dd1c3ed76f
11 changed files with 113 additions and 257 deletions

View File

@ -0,0 +1,19 @@
Passing boot parameters to the kernel.
Boot parameters are represented as a TLV list in the memory. Please see
arch/xtensa/include/asm/bootparam.h for definition of the bp_tag structure and
tag value constants. First entry in the list must have type BP_TAG_FIRST, last
entry must have type BP_TAG_LAST. The address of the first list entry is
passed to the kernel in the register a2. The address type depends on MMU type:
- For configurations without MMU, with region protection or with MPU the
address must be the physical address.
- For configurations with region translarion MMU or with MMUv3 and CONFIG_MMU=n
the address must be a valid address in the current mapping. The kernel will
not change the mapping on its own.
- For configurations with MMUv2 the address must be a virtual address in the
default virtual mapping (0xd0000000..0xffffffff).
- For configurations with MMUv3 and CONFIG_MMU=y the address may be either a
virtual or physical address. In either case it must be within the default
virtual mapping. It is considered physical if it is within the range of
physical addresses covered by the default KSEG mapping (XCHAL_KSEG_PADDR..
XCHAL_KSEG_PADDR + XCHAL_KSEG_SIZE), otherwise it is considered virtual.

View File

@ -5,6 +5,8 @@ config XTENSA
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT

View File

@ -23,6 +23,8 @@ generic-y += mm-arch-hooks.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += sections.h
generic-y += socket.h

View File

@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/bits.h>
#include <linux/stringify.h>
/*
@ -138,6 +139,28 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 oldv, newv;
u32 ret;
do {
oldv = READ_ONCE(*p);
ret = (oldv & bitmask) >> bitoff;
newv = (oldv & ~bitmask) | (x << bitoff);
} while (__cmpxchg_u32(p, oldv, newv) != oldv);
return ret;
}
/*
* This only works if the compiler isn't horribly bad at optimizing.
* gcc-2.5.8 reportedly can't handle this, but I define that one to
@ -150,11 +173,16 @@ static __inline__ unsigned long
__xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
case 1:
return xchg_small(ptr, x, 1);
case 2:
return xchg_small(ptr, x, 2);
case 4:
return xchg_u32(ptr, x);
default:
__xchg_called_with_bad_pointer();
return x;
}
__xchg_called_with_bad_pointer();
return x;
}
#endif /* __ASSEMBLY__ */

View File

@ -12,188 +12,9 @@
#define _XTENSA_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
/*
* spinlock
*
* There is at most one owner of a spinlock. There are not different
* types of spinlock owners like there are for rwlocks (see below).
*
* When trying to obtain a spinlock, the function "spins" forever, or busy-
* waits, until the lock is obtained. When spinning, presumably some other
* owner will soon give up the spinlock making it available to others. Use
* the trylock functions to avoid spinning forever.
*
* possible values:
*
* 0 nobody owns the spinlock
* 1 somebody owns the spinlock
*/
#define arch_spin_is_locked(x) ((x)->slock != 0)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&lock->slock)
: "memory");
}
/*
* rwlock
*
* Read-write locks are really a more flexible spinlock. They allow
* multiple readers but only one writer. Write ownership is exclusive
* (i.e., all other readers and writers are blocked from ownership while
* there is a write owner). These rwlocks are unfair to writers. Writers
* can be starved for an indefinite time by readers.
*
* possible values:
*
* 0 nobody owns the rwlock
* >0 one or more readers own the rwlock
* (the positive value is the actual number of readers)
* 0x80000000 one writer owns the rwlock, no other writers, no readers
*/
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
"1: movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
" bnez %0, 1b\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" wsr %0, scompare1\n"
" movi %0, 1\n"
" slli %0, %0, 31\n"
" s32c1i %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return tmp == 0 ? 1 : 0;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
" movi %0, 0\n"
" s32ri %0, %1, 0\n"
: "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
unsigned long result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" bltz %1, 1b\n"
" wsr %1, scompare1\n"
" addi %0, %1, 1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
}
/* Returns 1 if the lock is obtained, 0 otherwise. */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long result;
unsigned long tmp;
__asm__ __volatile__(
" l32i %1, %2, 0\n"
" addi %0, %1, 1\n"
" bltz %0, 1f\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" sub %0, %0, %1\n"
"1:\n"
: "=&a" (result), "=&a" (tmp)
: "a" (&rw->lock)
: "memory");
return result == 0;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" addi %0, %1, -1\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (tmp1), "=&a" (tmp2)
: "a" (&rw->lock)
: "memory");
}
#define smp_mb__after_spinlock() smp_mb()
#endif /* _XTENSA_SPINLOCK_H */

View File

@ -2,20 +2,11 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly"
#endif
typedef struct {
volatile unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif

View File

@ -121,15 +121,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_SYSCALL_TRACEPOINT)
/*
* Thread-synchronous status.
*
* This is different from the flags in that nobody else
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define THREAD_SIZE KERNEL_STACK_SIZE
#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)

View File

@ -52,8 +52,6 @@
extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
@ -321,8 +319,8 @@ unsigned long get_wchan(struct task_struct *p)
/* Stack layout: sp-4: ra, sp-3: sp' */
pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
sp = *(unsigned long *)sp - 3;
pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
sp = SPILL_SLOT(sp, 1);
} while (count++ < 16);
return 0;
}

View File

@ -372,8 +372,7 @@ static void send_ipi_message(const struct cpumask *callmask,
unsigned long mask = 0;
for_each_cpu(index, callmask)
if (index != smp_processor_id())
mask |= 1 << index;
mask |= 1 << index;
set_er(mask, MIPISET(msg_id));
}
@ -412,22 +411,31 @@ irqreturn_t ipi_interrupt(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
unsigned int msg;
unsigned i;
msg = get_er(MIPICAUSE(cpu));
for (i = 0; i < IPI_MAX; i++)
if (msg & (1 << i)) {
set_er(1 << i, MIPICAUSE(cpu));
++ipi->ipi_count[i];
for (;;) {
unsigned int msg;
msg = get_er(MIPICAUSE(cpu));
set_er(msg, MIPICAUSE(cpu));
if (!msg)
break;
if (msg & (1 << IPI_CALL_FUNC)) {
++ipi->ipi_count[IPI_CALL_FUNC];
generic_smp_call_function_interrupt();
}
if (msg & (1 << IPI_RESCHEDULE))
scheduler_ipi();
if (msg & (1 << IPI_CALL_FUNC))
generic_smp_call_function_interrupt();
if (msg & (1 << IPI_CPU_STOP))
ipi_cpu_stop(cpu);
if (msg & (1 << IPI_RESCHEDULE)) {
++ipi->ipi_count[IPI_RESCHEDULE];
scheduler_ipi();
}
if (msg & (1 << IPI_CPU_STOP)) {
++ipi->ipi_count[IPI_CPU_STOP];
ipi_cpu_stop(cpu);
}
}
return IRQ_HANDLED;
}

View File

@ -52,14 +52,11 @@ static struct clocksource ccount_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev);
struct ccount_timer {
struct clock_event_device evt;
int irq_enabled;
char name[24];
};
static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
@ -107,7 +104,30 @@ static int ccount_timer_set_oneshot(struct clock_event_device *evt)
return 0;
}
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = {
.evt = {
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = ccount_timer_set_next_event,
.set_state_shutdown = ccount_timer_shutdown,
.set_state_oneshot = ccount_timer_set_oneshot,
.tick_resume = ccount_timer_set_oneshot,
},
};
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
set_linux_timer(get_linux_timer());
evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
return IRQ_HANDLED;
}
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_TIMER,
@ -120,14 +140,8 @@ void local_timer_setup(unsigned cpu)
struct clock_event_device *clockevent = &timer->evt;
timer->irq_enabled = 1;
clockevent->name = timer->name;
snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent->rating = 300;
clockevent->set_next_event = ccount_timer_set_next_event;
clockevent->set_state_shutdown = ccount_timer_shutdown;
clockevent->set_state_oneshot = ccount_timer_set_oneshot;
clockevent->tick_resume = ccount_timer_set_oneshot;
clockevent->name = timer->name;
clockevent->cpumask = cpumask_of(cpu);
clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
if (WARN(!clockevent->irq, "error: can't map timer irq"))
@ -190,23 +204,6 @@ void __init time_init(void)
timer_probe();
}
/*
* The timer interrupt is called HZ times per second.
*/
irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
set_linux_timer(get_linux_timer());
evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
return IRQ_HANDLED;
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
void calibrate_delay(void)
{

View File

@ -420,16 +420,15 @@ void __init trap_init(void)
/* Setup specific handlers. */
for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
int fast = dispatch_init_table[i].fast;
int cause = dispatch_init_table[i].cause;
void *handler = dispatch_init_table[i].handler;
if (fast == 0)
set_handler(default_handler, cause, handler);
if (fast && fast & USER)
if ((fast & USER) != 0)
set_handler(fast_user_handler, cause, handler);
if (fast && fast & KRNL)
if ((fast & KRNL) != 0)
set_handler(fast_kernel_handler, cause, handler);
}