2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* arch/sh/kernel/cpu/init.c
|
|
|
|
*
|
|
|
|
* CPU init code
|
|
|
|
*
|
2009-08-15 06:43:21 +08:00
|
|
|
* Copyright (C) 2002 - 2009 Paul Mundt
|
2006-09-27 13:09:26 +08:00
|
|
|
* Copyright (C) 2003 Richard Curnow
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
2006-12-25 08:51:47 +08:00
|
|
|
#include <linux/mm.h>
|
2007-12-10 14:50:28 +08:00
|
|
|
#include <linux/log2.h>
|
2006-12-25 08:51:47 +08:00
|
|
|
#include <asm/mmu_context.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/uaccess.h>
|
2006-09-27 17:36:17 +08:00
|
|
|
#include <asm/page.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/cache.h>
|
2007-12-10 14:50:28 +08:00
|
|
|
#include <asm/elf.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/io.h>
|
2007-09-21 17:32:32 +08:00
|
|
|
#include <asm/smp.h>
|
2007-11-10 19:18:18 +08:00
|
|
|
#ifdef CONFIG_SUPERH32
|
|
|
|
#include <asm/ubc.h>
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic wrapper for command line arguments to disable on-chip
|
|
|
|
* peripherals (nofpu, nodsp, and so forth).
|
|
|
|
*/
|
|
|
|
#define onchip_setup(x) \
|
|
|
|
static int x##_disabled __initdata = 0; \
|
|
|
|
\
|
|
|
|
static int __init x##_setup(char *opts) \
|
|
|
|
{ \
|
|
|
|
x##_disabled = 1; \
|
2006-03-31 18:30:33 +08:00
|
|
|
return 1; \
|
2005-04-17 06:20:36 +08:00
|
|
|
} \
|
|
|
|
__setup("no" __stringify(x), x##_setup);
|
|
|
|
|
|
|
|
onchip_setup(fpu);
|
|
|
|
onchip_setup(dsp);
|
|
|
|
|
2007-03-08 17:12:17 +08:00
|
|
|
#ifdef CONFIG_SPECULATIVE_EXECUTION
|
|
|
|
#define CPUOPM 0xff2f0000
|
|
|
|
#define CPUOPM_RABD (1 << 5)
|
|
|
|
|
|
|
|
static void __init speculative_execution_init(void)
|
|
|
|
{
|
|
|
|
/* Clear RABD */
|
|
|
|
ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
|
|
|
|
|
|
|
|
/* Flush the update */
|
|
|
|
(void)ctrl_inl(CPUOPM);
|
|
|
|
ctrl_barrier();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define speculative_execution_init() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2009-08-15 06:43:21 +08:00
|
|
|
#ifdef CONFIG_CPU_SH4A
|
|
|
|
#define EXPMASK 0xff2f0004
|
|
|
|
#define EXPMASK_RTEDS (1 << 0)
|
|
|
|
#define EXPMASK_BRDSSLP (1 << 1)
|
|
|
|
#define EXPMASK_MMCAW (1 << 4)
|
|
|
|
|
|
|
|
static void __init expmask_init(void)
|
|
|
|
{
|
|
|
|
unsigned long expmask = __raw_readl(EXPMASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Future proofing.
|
|
|
|
*
|
2009-12-04 15:22:11 +08:00
|
|
|
* Disable support for slottable sleep instruction, non-nop
|
|
|
|
* instructions in the rte delay slot, and associative writes to
|
|
|
|
* the memory-mapped cache array.
|
2009-08-15 06:43:21 +08:00
|
|
|
*/
|
2009-12-04 15:22:11 +08:00
|
|
|
expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
|
2009-08-15 06:43:21 +08:00
|
|
|
|
|
|
|
__raw_writel(expmask, EXPMASK);
|
|
|
|
ctrl_barrier();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define expmask_init() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2009-06-02 10:49:20 +08:00
|
|
|
/* 2nd-level cache init */
|
|
|
|
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Generic first-level cache init
|
|
|
|
*/
|
2007-11-10 19:25:28 +08:00
|
|
|
#ifdef CONFIG_SUPERH32
|
2007-11-30 16:06:36 +08:00
|
|
|
static void __uses_jump_to_uncached cache_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long ccr, flags;
|
|
|
|
|
2007-11-30 16:06:36 +08:00
|
|
|
jump_to_uncached();
|
2005-04-17 06:20:36 +08:00
|
|
|
ccr = ctrl_inl(CCR);
|
|
|
|
|
|
|
|
/*
|
2006-09-27 13:09:26 +08:00
|
|
|
* At this point we don't know whether the cache is enabled or not - a
|
|
|
|
* bootloader may have enabled it. There are at least 2 things that
|
|
|
|
* could be dirty in the cache at this point:
|
|
|
|
* 1. kernel command line set up by boot loader
|
|
|
|
* 2. spilled registers from the prolog of this function
|
|
|
|
* => before re-initialising the cache, we must do a purge of the whole
|
|
|
|
* cache out to memory for safety. As long as nothing is spilled
|
|
|
|
* during the loop to lines that have already been done, this is safe.
|
|
|
|
* - RPC
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
if (ccr & CCR_CACHE_ENABLE) {
|
|
|
|
unsigned long ways, waysize, addrstart;
|
|
|
|
|
2006-12-25 09:19:56 +08:00
|
|
|
waysize = current_cpu_data.dcache.sets;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-05 14:40:13 +08:00
|
|
|
#ifdef CCR_CACHE_ORA
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* If the OC is already in RAM mode, we only have
|
|
|
|
* half of the entries to flush..
|
|
|
|
*/
|
|
|
|
if (ccr & CCR_CACHE_ORA)
|
|
|
|
waysize >>= 1;
|
2006-11-05 14:40:13 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-25 09:19:56 +08:00
|
|
|
waysize <<= current_cpu_data.dcache.entry_shift;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef CCR_CACHE_EMODE
|
|
|
|
/* If EMODE is not set, we only have 1 way to flush. */
|
|
|
|
if (!(ccr & CCR_CACHE_EMODE))
|
|
|
|
ways = 1;
|
|
|
|
else
|
|
|
|
#endif
|
2006-12-25 09:19:56 +08:00
|
|
|
ways = current_cpu_data.dcache.ways;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
addrstart = CACHE_OC_ADDRESS_ARRAY;
|
|
|
|
do {
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
for (addr = addrstart;
|
|
|
|
addr < addrstart + waysize;
|
2006-12-25 09:19:56 +08:00
|
|
|
addr += current_cpu_data.dcache.linesz)
|
2005-04-17 06:20:36 +08:00
|
|
|
ctrl_outl(0, addr);
|
|
|
|
|
2006-12-25 09:19:56 +08:00
|
|
|
addrstart += current_cpu_data.dcache.way_incr;
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (--ways);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default CCR values .. enable the caches
|
|
|
|
* and invalidate them immediately..
|
|
|
|
*/
|
|
|
|
flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
|
|
|
|
|
|
|
|
#ifdef CCR_CACHE_EMODE
|
|
|
|
/* Force EMODE if possible */
|
2006-12-25 09:19:56 +08:00
|
|
|
if (current_cpu_data.dcache.ways > 1)
|
2005-04-17 06:20:36 +08:00
|
|
|
flags |= CCR_CACHE_EMODE;
|
2006-09-27 13:09:26 +08:00
|
|
|
else
|
|
|
|
flags &= ~CCR_CACHE_EMODE;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2007-07-31 16:07:28 +08:00
|
|
|
#if defined(CONFIG_CACHE_WRITETHROUGH)
|
|
|
|
/* Write-through */
|
2005-04-17 06:20:36 +08:00
|
|
|
flags |= CCR_CACHE_WT;
|
2007-07-31 16:07:28 +08:00
|
|
|
#elif defined(CONFIG_CACHE_WRITEBACK)
|
|
|
|
/* Write-back */
|
2005-04-17 06:20:36 +08:00
|
|
|
flags |= CCR_CACHE_CB;
|
2007-07-31 16:07:28 +08:00
|
|
|
#else
|
|
|
|
/* Off */
|
|
|
|
flags &= ~CCR_CACHE_ENABLE;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2009-06-02 10:49:20 +08:00
|
|
|
l2_cache_init();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ctrl_outl(flags, CCR);
|
2007-11-30 16:06:36 +08:00
|
|
|
back_to_cached();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-11-10 19:25:28 +08:00
|
|
|
#else
|
|
|
|
#define cache_init() do { } while (0)
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-10 14:50:28 +08:00
|
|
|
#define CSHAPE(totalsize, linesize, assoc) \
|
|
|
|
((totalsize & ~0xff) | (linesize << 4) | assoc)
|
|
|
|
|
|
|
|
#define CACHE_DESC_SHAPE(desc) \
|
|
|
|
CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
|
|
|
|
|
|
|
|
static void detect_cache_shape(void)
|
|
|
|
{
|
|
|
|
l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
|
|
|
|
|
|
|
|
if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
|
|
|
|
l1i_cache_shape = l1d_cache_shape;
|
|
|
|
else
|
|
|
|
l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
|
|
|
|
|
|
|
|
if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
|
|
|
|
l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
|
|
|
|
else
|
|
|
|
l2_cache_shape = -1; /* No S-cache */
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_SH_DSP
|
|
|
|
static void __init release_dsp(void)
|
|
|
|
{
|
|
|
|
unsigned long sr;
|
|
|
|
|
|
|
|
/* Clear SR.DSP bit */
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
"stc\tsr, %0\n\t"
|
|
|
|
"and\t%1, %0\n\t"
|
|
|
|
"ldc\t%0, sr\n\t"
|
|
|
|
: "=&r" (sr)
|
|
|
|
: "r" (~SR_DSP)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init dsp_init(void)
|
|
|
|
{
|
|
|
|
unsigned long sr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the SR.DSP bit, wait for one instruction, and then read
|
|
|
|
* back the SR value.
|
|
|
|
*/
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
"stc\tsr, %0\n\t"
|
|
|
|
"or\t%1, %0\n\t"
|
|
|
|
"ldc\t%0, sr\n\t"
|
|
|
|
"nop\n\t"
|
|
|
|
"stc\tsr, %0\n\t"
|
|
|
|
: "=&r" (sr)
|
|
|
|
: "r" (SR_DSP)
|
|
|
|
);
|
|
|
|
|
|
|
|
/* If the DSP bit is still set, this CPU has a DSP */
|
|
|
|
if (sr & SR_DSP)
|
2006-12-25 09:19:56 +08:00
|
|
|
current_cpu_data.flags |= CPU_HAS_DSP;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Now that we've determined the DSP status, clear the DSP bit. */
|
|
|
|
release_dsp();
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SH_DSP */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sh_cpu_init
|
|
|
|
*
|
|
|
|
* This is our initial entry point for each CPU, and is invoked on the boot
|
|
|
|
* CPU prior to calling start_kernel(). For SMP, a combination of this and
|
|
|
|
* start_secondary() will bring up each processor to a ready state prior
|
|
|
|
* to hand forking the idle loop.
|
|
|
|
*
|
|
|
|
* We do all of the basic processor init here, including setting up the
|
|
|
|
* caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
|
|
|
|
* hit (and subsequently platform_setup()) things like determining the
|
|
|
|
* CPU subtype and initial configuration will all be done.
|
|
|
|
*
|
|
|
|
* Each processor family is still responsible for doing its own probing
|
|
|
|
* and cache configuration in detect_cpu_and_cache_system().
|
|
|
|
*/
|
2007-09-21 17:32:32 +08:00
|
|
|
|
2008-03-06 11:43:38 +08:00
|
|
|
asmlinkage void __init sh_cpu_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-21 17:32:32 +08:00
|
|
|
current_thread_info()->cpu = hard_smp_processor_id();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* First, probe the CPU */
|
|
|
|
detect_cpu_and_cache_system();
|
|
|
|
|
2007-03-12 15:15:22 +08:00
|
|
|
if (current_cpu_data.type == CPU_SH_NONE)
|
|
|
|
panic("Unknown CPU");
|
|
|
|
|
2007-11-10 19:25:28 +08:00
|
|
|
/* First setup the rest of the I-cache info */
|
|
|
|
current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
|
|
|
|
current_cpu_data.icache.linesz;
|
|
|
|
|
|
|
|
current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
|
|
|
|
current_cpu_data.icache.linesz;
|
|
|
|
|
|
|
|
/* And the D-cache too */
|
|
|
|
current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
|
|
|
|
current_cpu_data.dcache.linesz;
|
|
|
|
|
|
|
|
current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
|
|
|
|
current_cpu_data.dcache.linesz;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Init the cache */
|
|
|
|
cache_init();
|
|
|
|
|
2007-12-10 14:50:28 +08:00
|
|
|
if (raw_smp_processor_id() == 0) {
|
2007-09-21 17:32:32 +08:00
|
|
|
shm_align_mask = max_t(unsigned long,
|
|
|
|
current_cpu_data.dcache.way_size - 1,
|
|
|
|
PAGE_SIZE - 1);
|
2006-09-27 17:36:17 +08:00
|
|
|
|
2007-12-10 14:50:28 +08:00
|
|
|
/* Boot CPU sets the cache shape */
|
|
|
|
detect_cache_shape();
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Disable the FPU */
|
|
|
|
if (fpu_disabled) {
|
|
|
|
printk("FPU Disabled\n");
|
2006-12-25 09:19:56 +08:00
|
|
|
current_cpu_data.flags &= ~CPU_HAS_FPU;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FPU initialization */
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
disable_fpu();
|
2006-12-25 09:19:56 +08:00
|
|
|
if ((current_cpu_data.flags & CPU_HAS_FPU)) {
|
sh: Minor optimisations to FPU handling
A number of small optimisations to FPU handling, in particular:
- move the task USEDFPU flag from the thread_info flags field (which
is accessed asynchronously to the thread) to a new status field,
which is only accessed by the thread itself. This allows locking to
be removed in most cases, or can be reduced to a preempt_lock().
This mimics the i386 behaviour.
- move the modification of regs->sr and thread_info->status flags out
of save_fpu() to __unlazy_fpu(). This gives the compiler a better
chance to optimise things, as well as making save_fpu() symmetrical
with restore_fpu() and init_fpu().
- implement prepare_to_copy(), so that when creating a thread, we can
unlazy the FPU prior to copying the thread data structures.
Also make sure that the FPU is disabled while in the kernel, in
particular while booting, and for newly created kernel threads,
In a very artificial benchmark, the execution time for 2500000
context switches was reduced from 50 to 45 seconds.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2009-09-26 01:25:10 +08:00
|
|
|
current_thread_info()->status &= ~TS_USEDFPU;
|
2005-04-17 06:20:36 +08:00
|
|
|
clear_used_math();
|
|
|
|
}
|
|
|
|
|
2006-12-25 08:51:47 +08:00
|
|
|
/*
|
|
|
|
* Initialize the per-CPU ASID cache very early, since the
|
|
|
|
* TLB flushing routines depend on this being setup.
|
|
|
|
*/
|
|
|
|
current_cpu_data.asid_cache = NO_CONTEXT;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_SH_DSP
|
|
|
|
/* Probe for DSP */
|
|
|
|
dsp_init();
|
|
|
|
|
|
|
|
/* Disable the DSP */
|
|
|
|
if (dsp_disabled) {
|
|
|
|
printk("DSP Disabled\n");
|
2006-12-25 09:19:56 +08:00
|
|
|
current_cpu_data.flags &= ~CPU_HAS_DSP;
|
2005-04-17 06:20:36 +08:00
|
|
|
release_dsp();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-03-08 17:12:17 +08:00
|
|
|
speculative_execution_init();
|
2009-08-15 06:43:21 +08:00
|
|
|
expmask_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|