Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (56 commits)
  m68k: allow ColdFire 547x and 548x CPUs to be built with MMU enabled
  m68k/Kconfig: Separate classic m68k and coldfire early
  m68k: add ColdFire with MMU enabled support to the m68k mem init code
  m68k: do not use m68k startup or interrupt code for ColdFire CPUs
  m68k: add ColdFire FPU support for the V4e ColdFire CPUs
  m68k: adjustments to stack frame for ColdFire with MMU enabled
  m68k: use non-MMU linker script for ColdFire MMU builds
  m68k: ColdFire with MMU enabled uses same clocking code as non-MMU
  m68k: add code to setup a ColdFire 54xx platform when MMU enabled
  m68k: use non-MMU entry.S code when compiling for ColdFire CPU
  m68k: create ColdFire MMU pgalloc code
  m68k: compile appropriate mm arch files for ColdFire MMU support
  m68k: ColdFire V4e MMU paging init code and miss handler
  m68k: use ColdFire MMU read/write bit flags when ioremapping
  m68k: modify cache push and clear code for ColdFire with MMU enable
  m68k: use tracehook_report_syscall_entry/exit for ColdFire MMU ptrace path
  m68k: ColdFire V4e MMU context support code
  m68k: MMU enabled ColdFire needs 8k ELF alignment
  m68k: set ColdFire MMU page size
  m68k: define PAGE_OFFSET_RAW for ColdFire CPU with MMU enabled
  ...
This commit is contained in:
Linus Torvalds 2012-01-06 17:59:33 -08:00
commit c77417132c
74 changed files with 2004 additions and 701 deletions

View File

@ -3,7 +3,6 @@ config M68K
default y default y
select HAVE_IDE select HAVE_IDE
select HAVE_AOUT if MMU select HAVE_AOUT if MMU
select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
@ -41,12 +40,15 @@ config GENERIC_CALIBRATE_DELAY
config GENERIC_IOMAP config GENERIC_IOMAP
def_bool MMU def_bool MMU
config GENERIC_CSUM
bool
config TIME_LOW_RES config TIME_LOW_RES
bool bool
default y default y
config ARCH_USES_GETTIMEOFFSET config ARCH_USES_GETTIMEOFFSET
def_bool MMU def_bool MMU && !COLDFIRE
config NO_IOPORT config NO_IOPORT
def_bool y def_bool y
@ -61,6 +63,12 @@ config ZONE_DMA
config CPU_HAS_NO_BITFIELDS config CPU_HAS_NO_BITFIELDS
bool bool
config CPU_HAS_NO_MULDIV64
bool
config CPU_HAS_ADDRESS_SPACES
bool
config HZ config HZ
int int
default 1000 if CLEOPATRA default 1000 if CLEOPATRA
@ -80,9 +88,12 @@ config MMU
config MMU_MOTOROLA config MMU_MOTOROLA
bool bool
config MMU_COLDFIRE
bool
config MMU_SUN3 config MMU_SUN3
bool bool
depends on MMU && !MMU_MOTOROLA depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE
menu "Platform setup" menu "Platform setup"

View File

@ -1,8 +1,42 @@
comment "Processor Type" comment "Processor Type"
choice
prompt "CPU family support"
default M68KCLASSIC if MMU
default COLDFIRE if !MMU
help
The Freescale (was Motorola) M68K family of processors implements
the full 68000 processor instruction set.
The Freescale ColdFire family of processors is a modern derivitive
of the 68000 processor family. They are mainly targeted at embedded
applications, and are all System-On-Chip (SOC) devices, as opposed
to stand alone CPUs. They implement a subset of the original 68000
processor instruction set.
If you anticipate running this kernel on a computer with a classic
MC68xxx processor, select M68KCLASSIC.
If you anticipate running this kernel on a computer with a ColdFire
processor, select COLDFIRE.
config M68KCLASSIC
bool "Classic M68K CPU family support"
config COLDFIRE
bool "Coldfire CPU family support"
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
endchoice
if M68KCLASSIC
config M68000 config M68000
bool bool
select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
help help
The Freescale (was Motorola) 68000 CPU is the first generation of The Freescale (was Motorola) 68000 CPU is the first generation of
the well known M68K family of processors. The CPU core as well as the well known M68K family of processors. The CPU core as well as
@ -18,21 +52,11 @@ config MCPU32
based on the 68020 processor. For the most part it is used in based on the 68020 processor. For the most part it is used in
System-On-Chip parts, and does not contain a paging MMU. System-On-Chip parts, and does not contain a paging MMU.
config COLDFIRE
bool
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select CPU_HAS_NO_BITFIELDS
help
The Freescale ColdFire family of processors is a modern derivitive
of the 68000 processor family. They are mainly targeted at embedded
applications, and are all System-On-Chip (SOC) devices, as opposed
to stand alone CPUs. They implement a subset of the original 68000
processor instruction set.
config M68020 config M68020
bool "68020 support" bool "68020 support"
depends on MMU depends on MMU
select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help help
If you anticipate running this kernel on a computer with a MC68020 If you anticipate running this kernel on a computer with a MC68020
processor, say Y. Otherwise, say N. Note that the 68020 requires a processor, say Y. Otherwise, say N. Note that the 68020 requires a
@ -42,6 +66,8 @@ config M68020
config M68030 config M68030
bool "68030 support" bool "68030 support"
depends on MMU && !MMU_SUN3 depends on MMU && !MMU_SUN3
select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help help
If you anticipate running this kernel on a computer with a MC68030 If you anticipate running this kernel on a computer with a MC68030
processor, say Y. Otherwise, say N. Note that a MC68EC030 will not processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
@ -50,6 +76,8 @@ config M68030
config M68040 config M68040
bool "68040 support" bool "68040 support"
depends on MMU && !MMU_SUN3 depends on MMU && !MMU_SUN3
select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help help
If you anticipate running this kernel on a computer with a MC68LC040 If you anticipate running this kernel on a computer with a MC68LC040
or MC68040 processor, say Y. Otherwise, say N. Note that an or MC68040 processor, say Y. Otherwise, say N. Note that an
@ -59,6 +87,8 @@ config M68040
config M68060 config M68060
bool "68060 support" bool "68060 support"
depends on MMU && !MMU_SUN3 depends on MMU && !MMU_SUN3
select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help help
If you anticipate running this kernel on a computer with a MC68060 If you anticipate running this kernel on a computer with a MC68060
processor, say Y. Otherwise, say N. processor, say Y. Otherwise, say N.
@ -91,10 +121,13 @@ config M68360
help help
Motorola 68360 processor support. Motorola 68360 processor support.
endif # M68KCLASSIC
if COLDFIRE
config M5206 config M5206
bool "MCF5206" bool "MCF5206"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_MBAR select HAVE_MBAR
help help
@ -103,7 +136,6 @@ config M5206
config M5206e config M5206e
bool "MCF5206e" bool "MCF5206e"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_MBAR select HAVE_MBAR
help help
@ -112,7 +144,6 @@ config M5206e
config M520x config M520x
bool "MCF520x" bool "MCF520x"
depends on !MMU depends on !MMU
select COLDFIRE
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT select HAVE_CACHE_SPLIT
help help
@ -121,7 +152,6 @@ config M520x
config M523x config M523x
bool "MCF523x" bool "MCF523x"
depends on !MMU depends on !MMU
select COLDFIRE
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT select HAVE_CACHE_SPLIT
select HAVE_IPSBAR select HAVE_IPSBAR
@ -131,7 +161,6 @@ config M523x
config M5249 config M5249
bool "MCF5249" bool "MCF5249"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_MBAR select HAVE_MBAR
help help
@ -143,7 +172,6 @@ config M527x
config M5271 config M5271
bool "MCF5271" bool "MCF5271"
depends on !MMU depends on !MMU
select COLDFIRE
select M527x select M527x
select HAVE_CACHE_SPLIT select HAVE_CACHE_SPLIT
select HAVE_IPSBAR select HAVE_IPSBAR
@ -154,7 +182,6 @@ config M5271
config M5272 config M5272
bool "MCF5272" bool "MCF5272"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_MBAR select HAVE_MBAR
help help
@ -163,7 +190,6 @@ config M5272
config M5275 config M5275
bool "MCF5275" bool "MCF5275"
depends on !MMU depends on !MMU
select COLDFIRE
select M527x select M527x
select HAVE_CACHE_SPLIT select HAVE_CACHE_SPLIT
select HAVE_IPSBAR select HAVE_IPSBAR
@ -174,7 +200,6 @@ config M5275
config M528x config M528x
bool "MCF528x" bool "MCF528x"
depends on !MMU depends on !MMU
select COLDFIRE
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT select HAVE_CACHE_SPLIT
select HAVE_IPSBAR select HAVE_IPSBAR
@ -184,7 +209,6 @@ config M528x
config M5307 config M5307
bool "MCF5307" bool "MCF5307"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_CACHE_CB select HAVE_CACHE_CB
select HAVE_MBAR select HAVE_MBAR
@ -194,7 +218,6 @@ config M5307
config M532x config M532x
bool "MCF532x" bool "MCF532x"
depends on !MMU depends on !MMU
select COLDFIRE
select HAVE_CACHE_CB select HAVE_CACHE_CB
help help
Freescale (Motorola) ColdFire 532x processor support. Freescale (Motorola) ColdFire 532x processor support.
@ -202,7 +225,6 @@ config M532x
config M5407 config M5407
bool "MCF5407" bool "MCF5407"
depends on !MMU depends on !MMU
select COLDFIRE
select COLDFIRE_SW_A7 select COLDFIRE_SW_A7
select HAVE_CACHE_CB select HAVE_CACHE_CB
select HAVE_MBAR select HAVE_MBAR
@ -214,9 +236,8 @@ config M54xx
config M547x config M547x
bool "MCF547x" bool "MCF547x"
depends on !MMU
select COLDFIRE
select M54xx select M54xx
select MMU_COLDFIRE if MMU
select HAVE_CACHE_CB select HAVE_CACHE_CB
select HAVE_MBAR select HAVE_MBAR
help help
@ -224,14 +245,15 @@ config M547x
config M548x config M548x
bool "MCF548x" bool "MCF548x"
depends on !MMU select MMU_COLDFIRE if MMU
select COLDFIRE
select M54xx select M54xx
select HAVE_CACHE_CB select HAVE_CACHE_CB
select HAVE_MBAR select HAVE_MBAR
help help
Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support. Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
endif # COLDFIRE
comment "Processor Specific Options" comment "Processor Specific Options"

View File

@ -2,6 +2,14 @@ menu "Kernel hacking"
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
config BOOTPARAM_STRING
string 'Kernel Boot Parameter'
default 'console=ttyS0,19200'
depends on BOOTPARAM
if !MMU if !MMU
config FULLDEBUG config FULLDEBUG
@ -15,14 +23,6 @@ config HIGHPROFILE
help help
Use a fast secondary clock to produce profiling information. Use a fast secondary clock to produce profiling information.
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
config BOOTPARAM_STRING
string 'Kernel Boot Parameter'
default 'console=ttyS0,19200'
depends on BOOTPARAM
config NO_KERNEL_MSG config NO_KERNEL_MSG
bool "Suppress Kernel BUG Messages" bool "Suppress Kernel BUG Messages"
help help

View File

@ -1,5 +1,7 @@
comment "Machine Types" comment "Machine Types"
if M68KCLASSIC
config AMIGA config AMIGA
bool "Amiga support" bool "Amiga support"
depends on MMU depends on MMU
@ -130,6 +132,8 @@ config SUN3
If you don't want to compile a kernel exclusively for a Sun 3, say N. If you don't want to compile a kernel exclusively for a Sun 3, say N.
endif # M68KCLASSIC
config PILOT config PILOT
bool bool

View File

@ -1,112 +0,0 @@
/****************************************************************************/
/*
* anchor.h -- Anchor CO-MEM Lite PCI host bridge part.
*
* (C) Copyright 2000, Moreton Bay (www.moreton.com.au)
*/
/****************************************************************************/
#ifndef anchor_h
#define anchor_h
/****************************************************************************/
/*
* Define basic addressing info.
*/
#if defined(CONFIG_M5407C3)
#define COMEM_BASE 0xFFFF0000 /* Base of CO-MEM address space */
#define COMEM_IRQ 25 /* IRQ of anchor part */
#else
#define COMEM_BASE 0x80000000 /* Base of CO-MEM address space */
#define COMEM_IRQ 25 /* IRQ of anchor part */
#endif
/****************************************************************************/
/*
* 4-byte registers of CO-MEM, so adjust register addresses for
* easy access. Handy macro for word access too.
*/
#define LREG(a) ((a) >> 2)
#define WREG(a) ((a) >> 1)
/*
* Define base addresses within CO-MEM Lite register address space.
*/
#define COMEM_I2O 0x0000 /* I2O registers */
#define COMEM_OPREGS 0x0400 /* Operation registers */
#define COMEM_PCIBUS 0x2000 /* Direct access to PCI bus */
#define COMEM_SHMEM 0x4000 /* Shared memory region */
#define COMEM_SHMEMSIZE 0x4000 /* Size of shared memory */
/*
* Define CO-MEM Registers.
*/
#define COMEM_I2OHISR 0x0030 /* I2O host interrupt status */
#define COMEM_I2OHIMR 0x0034 /* I2O host interrupt mask */
#define COMEM_I2OLISR 0x0038 /* I2O local interrupt status */
#define COMEM_I2OLIMR 0x003c /* I2O local interrupt mask */
#define COMEM_IBFPFIFO 0x0040 /* I2O inbound free/post FIFO */
#define COMEM_OBPFFIFO 0x0044 /* I2O outbound post/free FIFO */
#define COMEM_IBPFFIFO 0x0048 /* I2O inbound post/free FIFO */
#define COMEM_OBFPFIFO 0x004c /* I2O outbound free/post FIFO */
#define COMEM_DAHBASE 0x0460 /* Direct access base address */
#define COMEM_NVCMD 0x04a0 /* I2C serial command */
#define COMEM_NVREAD 0x04a4 /* I2C serial read */
#define COMEM_NVSTAT 0x04a8 /* I2C status */
#define COMEM_DMALBASE 0x04b0 /* DMA local base address */
#define COMEM_DMAHBASE 0x04b4 /* DMA host base address */
#define COMEM_DMASIZE 0x04b8 /* DMA size */
#define COMEM_DMACTL 0x04bc /* DMA control */
#define COMEM_HCTL 0x04e0 /* Host control */
#define COMEM_HINT 0x04e4 /* Host interrupt control/status */
#define COMEM_HLDATA 0x04e8 /* Host to local data mailbox */
#define COMEM_LINT 0x04f4 /* Local interrupt contole status */
#define COMEM_LHDATA 0x04f8 /* Local to host data mailbox */
#define COMEM_LBUSCFG 0x04fc /* Local bus configuration */
/*
* Commands and flags for use with Direct Access Register.
*/
#define COMEM_DA_IACK 0x00000000 /* Interrupt acknowledge (read) */
#define COMEM_DA_SPCL 0x00000010 /* Special cycle (write) */
#define COMEM_DA_MEMRD 0x00000004 /* Memory read cycle */
#define COMEM_DA_MEMWR 0x00000004 /* Memory write cycle */
#define COMEM_DA_IORD 0x00000002 /* I/O read cycle */
#define COMEM_DA_IOWR 0x00000002 /* I/O write cycle */
#define COMEM_DA_CFGRD 0x00000006 /* Configuration read cycle */
#define COMEM_DA_CFGWR 0x00000006 /* Configuration write cycle */
#define COMEM_DA_ADDR(a) ((a) & 0xffffe000)
#define COMEM_DA_OFFSET(a) ((a) & 0x00001fff)
/*
* The PCI bus will be limited in what slots will actually be used.
* Define valid device numbers for different boards.
*/
#if defined(CONFIG_M5407C3)
#define COMEM_MINDEV 14 /* Minimum valid DEVICE */
#define COMEM_MAXDEV 14 /* Maximum valid DEVICE */
#define COMEM_BRIDGEDEV 15 /* Slot bridge is in */
#else
#define COMEM_MINDEV 0 /* Minimum valid DEVICE */
#define COMEM_MAXDEV 3 /* Maximum valid DEVICE */
#endif
#define COMEM_MAXPCI (COMEM_MAXDEV+1) /* Maximum PCI devices */
/****************************************************************************/
#endif /* anchor_h */

View File

@ -55,6 +55,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
return c != 0; return c != 0;
} }
static inline int atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
"subql #1,%1; slt %0"
: "=d" (c), "=m" (*v)
: "m" (*v));
return c != 0;
}
static inline int atomic_inc_and_test(atomic_t *v) static inline int atomic_inc_and_test(atomic_t *v)
{ {
char c; char c;

View File

@ -2,23 +2,89 @@
#define _M68K_CACHEFLUSH_H #define _M68K_CACHEFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#ifdef CONFIG_COLDFIRE
#include <asm/mcfsim.h>
#endif
/* cache code */ /* cache code */
#define FLUSH_I_AND_D (0x00000808) #define FLUSH_I_AND_D (0x00000808)
#define FLUSH_I (0x00000008) #define FLUSH_I (0x00000008)
#ifndef ICACHE_MAX_ADDR
#define ICACHE_MAX_ADDR 0
#define ICACHE_SET_MASK 0
#define DCACHE_MAX_ADDR 0
#define DCACHE_SETMASK 0
#endif
static inline void flush_cf_icache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)"
: "=a" (set)
: "a" (set));
}
}
static inline void flush_cf_dcache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)"
: "=a" (set)
: "a" (set));
}
}
static inline void flush_cf_bcache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)"
: "=a" (set)
: "a" (set));
}
}
/* /*
* Cache handling functions * Cache handling functions
*/ */
static inline void flush_icache(void) static inline void flush_icache(void)
{ {
if (CPU_IS_040_OR_060) if (CPU_IS_COLDFIRE) {
flush_cf_icache(0, ICACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
asm volatile ( "nop\n" asm volatile ( "nop\n"
" .chip 68040\n" " .chip 68040\n"
" cpusha %bc\n" " cpusha %bc\n"
" .chip 68k"); " .chip 68k");
else { } else {
unsigned long tmp; unsigned long tmp;
asm volatile ( "movec %%cacr,%0\n" asm volatile ( "movec %%cacr,%0\n"
" or.w %1,%0\n" " or.w %1,%0\n"
@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len);
process changes. */ process changes. */
#define __flush_cache_all() \ #define __flush_cache_all() \
({ \ ({ \
if (CPU_IS_040_OR_060) \ if (CPU_IS_COLDFIRE) { \
flush_cf_dcache(0, DCACHE_MAX_ADDR); \
} else if (CPU_IS_040_OR_060) { \
__asm__ __volatile__("nop\n\t" \ __asm__ __volatile__("nop\n\t" \
".chip 68040\n\t" \ ".chip 68040\n\t" \
"cpusha %dc\n\t" \ "cpusha %dc\n\t" \
".chip 68k"); \ ".chip 68k"); \
else { \ } else { \
unsigned long _tmp; \ unsigned long _tmp; \
__asm__ __volatile__("movec %%cacr,%0\n\t" \ __asm__ __volatile__("movec %%cacr,%0\n\t" \
"orw %1,%0\n\t" \ "orw %1,%0\n\t" \
@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
static inline void __flush_page_to_ram(void *vaddr) static inline void __flush_page_to_ram(void *vaddr)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
unsigned long addr, start, end;
addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
start = addr & ICACHE_SET_MASK;
end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
if (start > end) {
flush_cf_bcache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_bcache(start, end);
} else if (CPU_IS_040_OR_060) {
__asm__ __volatile__("nop\n\t" __asm__ __volatile__("nop\n\t"
".chip 68040\n\t" ".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t" "cpushp %%bc,(%0)\n\t"

View File

@ -3,6 +3,10 @@
#include <linux/in6.h> #include <linux/in6.h>
#ifdef CONFIG_GENERIC_CSUM
#include <asm-generic/checksum.h>
#else
/* /*
* computes the checksum of a memory block at buff, length len, * computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit) * and adds in "sum" (32-bit)
@ -34,30 +38,6 @@ extern __wsum csum_partial_copy_nocheck(const void *src,
void *dst, int len, void *dst, int len,
__wsum sum); __wsum sum);
#ifdef CONFIG_COLDFIRE
/*
* The ColdFire cores don't support all the 68k instructions used
* in the optimized checksum code below. So it reverts back to using
* more standard C coded checksums. The fast checksum code is
* significantly larger than the optimized version, so it is not
* inlined here.
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp = (__force u32)sum;
tmp = (tmp & 0xffff) + (tmp >> 16);
tmp = (tmp & 0xffff) + (tmp >> 16);
return (__force __sum16)~tmp;
}
#else
/* /*
* This is a version of ip_fast_csum() optimized for IP headers, * This is a version of ip_fast_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. * which always checksum on 4 octet boundaries.
@ -97,8 +77,6 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)~sum; return (__force __sum16)~sum;
} }
#endif /* CONFIG_COLDFIRE */
static inline __wsum static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum) unsigned short proto, __wsum sum)
@ -167,4 +145,5 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
return csum_fold(sum); return csum_fold(sum);
} }
#endif /* CONFIG_GENERIC_CSUM */
#endif /* _M68K_CHECKSUM_H */ #endif /* _M68K_CHECKSUM_H */

View File

@ -1,7 +1,9 @@
#ifndef _M68K_DIV64_H #ifndef _M68K_DIV64_H
#define _M68K_DIV64_H #define _M68K_DIV64_H
#ifdef CONFIG_MMU #ifdef CONFIG_CPU_HAS_NO_MULDIV64
#include <asm-generic/div64.h>
#else
#include <linux/types.h> #include <linux/types.h>
@ -27,8 +29,6 @@
__rem; \ __rem; \
}) })
#else #endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
#include <asm-generic/div64.h>
#endif /* CONFIG_MMU */
#endif /* _M68K_DIV64_H */ #endif /* _M68K_DIV64_H */

View File

@ -59,10 +59,10 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */ is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
#ifndef CONFIG_SUN3 #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
#define ELF_EXEC_PAGESIZE 4096
#else
#define ELF_EXEC_PAGESIZE 8192 #define ELF_EXEC_PAGESIZE 8192
#else
#define ELF_EXEC_PAGESIZE 4096
#endif #endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical /* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View File

@ -222,16 +222,24 @@
* Non-MMU systems do not reserve %a2 in this way, and this definition is * Non-MMU systems do not reserve %a2 in this way, and this definition is
* not used for them. * not used for them.
*/ */
#ifdef CONFIG_MMU
#define curptr a2 #define curptr a2
#define GET_CURRENT(tmp) get_current tmp #define GET_CURRENT(tmp) get_current tmp
.macro get_current reg=%d0 .macro get_current reg=%d0
movel %sp,\reg movel %sp,\reg
andw #-THREAD_SIZE,\reg andl #-THREAD_SIZE,\reg
movel \reg,%curptr movel \reg,%curptr
movel %curptr@,%curptr movel %curptr@,%curptr
.endm .endm
#else
#define GET_CURRENT(tmp)
#endif /* CONFIG_MMU */
#else /* C source */ #else /* C source */
#define STR(X) STR1(X) #define STR(X) STR1(X)

View File

@ -12,6 +12,8 @@
#define FPSTATESIZE (96) #define FPSTATESIZE (96)
#elif defined(CONFIG_M68KFPU_EMU) #elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28) #define FPSTATESIZE (28)
#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
#define FPSTATESIZE (16)
#elif defined(CONFIG_M68060) #elif defined(CONFIG_M68060)
#define FPSTATESIZE (12) #define FPSTATESIZE (12)
#else #else

View File

@ -225,7 +225,8 @@ static inline void gpio_set_value(unsigned gpio, int value)
static inline int gpio_to_irq(unsigned gpio) static inline int gpio_to_irq(unsigned gpio)
{ {
return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL; return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
: __gpio_to_irq(gpio);
} }
static inline int irq_to_gpio(unsigned irq) static inline int irq_to_gpio(unsigned irq)

View File

@ -25,7 +25,8 @@
#define NR_IRQS 0 #define NR_IRQS 0
#endif #endif
#ifdef CONFIG_MMU #if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \
defined(CONFIG_M68040) || defined(CONFIG_M68060)
/* /*
* Interrupt source definitions * Interrupt source definitions
@ -80,7 +81,7 @@ extern unsigned int irq_canonicalize(unsigned int irq);
#else #else
#define irq_canonicalize(irq) (irq) #define irq_canonicalize(irq) (irq)
#endif /* CONFIG_MMU */ #endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */
asmlinkage void do_IRQ(int irq, struct pt_regs *regs); asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
extern atomic_t irq_err_count; extern atomic_t irq_err_count;

View File

@ -39,8 +39,12 @@
#define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */ #define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */
#define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */ #define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */
#define ACR_CM 0x00000060 /* Cache mode mask */ #define ACR_CM 0x00000060 /* Cache mode mask */
#define ACR_SP 0x00000008 /* Supervisor protect */
#define ACR_WPROTECT 0x00000004 /* Write protect */ #define ACR_WPROTECT 0x00000004 /* Write protect */
#define ACR_BA(x) ((x) & 0xff000000)
#define ACR_ADMSK(x) ((((x) - 1) & 0xff000000) >> 8)
#if defined(CONFIG_M5407) #if defined(CONFIG_M5407)
#define ICACHE_SIZE 0x4000 /* instruction - 16k */ #define ICACHE_SIZE 0x4000 /* instruction - 16k */
@ -56,6 +60,11 @@
#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ #define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
#define CACHE_WAYS 4 /* 4 ways */ #define CACHE_WAYS 4 /* 4 ways */
#define ICACHE_SET_MASK ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
#define DCACHE_SET_MASK ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
#define ICACHE_MAX_ADDR ICACHE_SET_MASK
#define DCACHE_MAX_ADDR DCACHE_SET_MASK
/* /*
* Version 4 cores have a true harvard style separate instruction * Version 4 cores have a true harvard style separate instruction
* and data cache. Enable data and instruction caches, also enable write * and data cache. Enable data and instruction caches, also enable write
@ -73,6 +82,27 @@
#else #else
#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP) #define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
#endif #endif
#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#if defined(CONFIG_MMU)
/*
* If running with the MMU enabled then we need to map the internal
* register region as non-cacheable. And then we map all our RAM as
* cacheable and supervisor access only.
*/
#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
ACR_ENABLE+ACR_SUPER+ACR_SP)
#define ACR2_MODE 0
#define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
ACR_ENABLE+ACR_SUPER+ACR_SP)
#else
/*
* For the non-MMU enabled case we map all of RAM as cacheable.
*/
#if defined(CONFIG_CACHE_COPYBACK) #if defined(CONFIG_CACHE_COPYBACK)
#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP) #define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
#else #else
@ -80,7 +110,6 @@
#endif #endif
#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) #define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA) #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA) #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
@ -94,4 +123,5 @@
#define CACHE_PUSH #define CACHE_PUSH
#endif #endif
#endif /* CONFIG_MMU */
#endif /* m54xxacr_h */ #endif /* m54xxacr_h */

View File

@ -0,0 +1,102 @@
#ifndef M68K_MCF_PGALLOC_H
#define M68K_MCF_PGALLOC_H
#include <asm/tlb.h>
#include <asm/tlbflush.h>
extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
extern const char bad_pmd_string[];
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
if (!page)
return NULL;
memset((void *)page, 0, PAGE_SIZE);
return (pte_t *) (page);
}
extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
__free_page(page);
}
#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
pte_t *pte;
if (!page)
return NULL;
pte = kmap(page);
if (pte) {
clear_page(pte);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
kunmap(page);
return page;
}
extern inline void pte_free(struct mm_struct *mm, struct page *page)
{
__free_page(page);
}
/*
* In our implementation, each pgd entry contains 1 pmd that is never allocated
* or freed. pgd_present is always 1, so this should never be called. -NL
*/
#define pmd_free(mm, pmd) BUG()
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
if (!new_pgd)
return NULL;
memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
return new_pgd;
}
#define pgd_populate(mm, pmd, pte) BUG()
#endif /* M68K_MCF_PGALLOC_H */

View File

@ -0,0 +1,425 @@
#ifndef _MCF_PGTABLE_H
#define _MCF_PGTABLE_H
#include <asm/mcfmmu.h>
#include <asm/page.h>
/*
* MMUDR bits, in proper place. We write these directly into the MMUDR
* after masking from the pte.
*/
#define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */
#define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */
#define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */
#define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */
#define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */
#define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */
#define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */
#define CF_CACHEMASK (~MMUDR_CM_CCB)
#define CF_PAGE_MMUDR_MASK 0x000000fe
#define _PAGE_NOCACHE030 CF_PAGE_NOCACHE
/*
* MMUTR bits, need shifting down.
*/
#define CF_PAGE_MMUTR_MASK 0x00000c00
#define CF_PAGE_MMUTR_SHIFT 10
#define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
#define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
/*
* Fake bits, not implemented in CF, will get masked out before
* hitting hardware.
*/
#define CF_PAGE_DIRTY 0x00000001
#define CF_PAGE_FILE 0x00000200
#define CF_PAGE_ACCESSED 0x00001000
#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
#define _DESCTYPE_MASK 0x003
#define _CACHEMASK040 (~0x060)
#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
/*
* Externally used page protection values.
*/
#define _PAGE_PRESENT (CF_PAGE_VALID)
#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
#define _PAGE_DIRTY (CF_PAGE_DIRTY)
#define _PAGE_READWRITE (CF_PAGE_READABLE \
| CF_PAGE_WRITABLE \
| CF_PAGE_SYSTEM \
| CF_PAGE_SHARED)
/*
* Compound page protection values.
*/
#define PAGE_NONE __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_SHARED)
#define PAGE_INIT __pgprot(CF_PAGE_VALID \
| CF_PAGE_READABLE \
| CF_PAGE_WRITABLE \
| CF_PAGE_EXEC \
| CF_PAGE_SYSTEM)
#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_WRITABLE \
| CF_PAGE_EXEC \
| CF_PAGE_SYSTEM)
#define PAGE_COPY __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_DIRTY)
/*
* Page protections for initialising protection_map. See mm/mmap.c
* for use. In general, the bit positions are xwr, and P-items are
* private, the S-items are shared.
*/
#define __P000 PAGE_NONE
#define __P001 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE)
#define __P010 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_WRITABLE)
#define __P011 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_WRITABLE)
#define __P100 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_EXEC)
#define __P101 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_EXEC)
#define __P110 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_WRITABLE \
| CF_PAGE_EXEC)
#define __P111 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_WRITABLE \
| CF_PAGE_EXEC)
#define __S000 PAGE_NONE
#define __S001 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE)
#define __S010 PAGE_SHARED
#define __S011 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_SHARED \
| CF_PAGE_READABLE)
#define __S100 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_EXEC)
#define __S101 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_READABLE \
| CF_PAGE_EXEC)
#define __S110 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_SHARED \
| CF_PAGE_EXEC)
#define __S111 __pgprot(CF_PAGE_VALID \
| CF_PAGE_ACCESSED \
| CF_PAGE_SHARED \
| CF_PAGE_READABLE \
| CF_PAGE_EXEC)
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
#ifndef __ASSEMBLY__
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
#define pmd_set(pmdp, ptep) do {} while (0)
static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
{
return !pte_val(pte);
}
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & CF_PAGE_VALID;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_val(*ptep) = 0;
}
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pte_page(pte) virt_to_page(__pte_page(pte))
static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
#define pmd_none(pmd) pmd_none2(&(pmd))
static inline int pmd_bad2(pmd_t *pmd) { return 0; }
#define pmd_bad(pmd) pmd_bad2(&(pmd))
#define pmd_present(pmd) (!pmd_none2(&(pmd)))
static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t *pgdp) {}
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
__FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
__FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not...
* [we have the full set here even if they don't change from m68k]
*/
static inline int pte_read(pte_t pte)
{
return pte_val(pte) & CF_PAGE_READABLE;
}
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & CF_PAGE_WRITABLE;
}
static inline int pte_exec(pte_t pte)
{
return pte_val(pte) & CF_PAGE_EXEC;
}
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & CF_PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & CF_PAGE_ACCESSED;
}
static inline int pte_file(pte_t pte)
{
return pte_val(pte) & CF_PAGE_FILE;
}
static inline int pte_special(pte_t pte)
{
return 0;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_WRITABLE;
return pte;
}
static inline pte_t pte_rdprotect(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_READABLE;
return pte;
}
static inline pte_t pte_exprotect(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_EXEC;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_DIRTY;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_ACCESSED;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= CF_PAGE_WRITABLE;
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
pte_val(pte) |= CF_PAGE_READABLE;
return pte;
}
static inline pte_t pte_mkexec(pte_t pte)
{
pte_val(pte) |= CF_PAGE_EXEC;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= CF_PAGE_DIRTY;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= CF_PAGE_ACCESSED;
return pte;
}
static inline pte_t pte_mknocache(pte_t pte)
{
pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
return pte;
}
static inline pte_t pte_mkcache(pte_t pte)
{
pte_val(pte) &= ~CF_PAGE_NOCACHE;
return pte;
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return pte;
}
#define swapper_pg_dir kernel_pg_dir
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
/*
* Find an entry in a pagetable directory.
*/
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/*
* Find an entry in a kernel pagetable directory.
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/*
* Find an entry in the second-level pagetable.
*/
static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
/*
* Find an entry in the third-level pagetable.
*/
#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) \
((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
/*
* Disable caching for page at given kernel virtual address.
*/
static inline void nocache_page(void *vaddr)
{
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
unsigned long addr = (unsigned long) vaddr;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mknocache(*ptep);
}
/*
* Enable caching for page at given kernel virtual address.
*/
static inline void cache_page(void *vaddr)
{
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
unsigned long addr = (unsigned long) vaddr;
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mkcache(*ptep);
}
#define PTE_FILE_MAX_BITS 21
#define PTE_FILE_SHIFT 11
static inline unsigned long pte_to_pgoff(pte_t pte)
{
return pte_val(pte) >> PTE_FILE_SHIFT;
}
static inline pte_t pgoff_to_pte(unsigned pgoff)
{
return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
}
/*
* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
*/
#define __swp_type(x) ((x).val & 0xFF)
#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
#define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
(off << PTE_FILE_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) (__pte((x).val))
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
__pte_offset(addr))
#define pte_unmap(pte) ((void) 0)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#endif /* !__ASSEMBLY__ */
#endif /* _MCF_PGTABLE_H */

View File

@ -0,0 +1,112 @@
/*
* mcfmmu.h -- definitions for the ColdFire v4e MMU
*
* (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#ifndef MCFMMU_H
#define MCFMMU_H
/*
* The MMU support registers are mapped into the address space using
* the processor MMUBASE register. We used a fixed address for mapping,
* there doesn't seem any need to make this configurable yet.
*/
#define MMUBASE 0xfe000000
/*
* The support registers of the MMU. Names are the sames as those
* used in the Freescale v4e documentation.
*/
#define MMUCR (MMUBASE + 0x00) /* Control register */
#define MMUOR (MMUBASE + 0x04) /* Operation register */
#define MMUSR (MMUBASE + 0x08) /* Status register */
#define MMUAR (MMUBASE + 0x10) /* TLB Address register */
#define MMUTR (MMUBASE + 0x14) /* TLB Tag register */
#define MMUDR (MMUBASE + 0x18) /* TLB Data register */
/*
* MMU Control register bit flags
*/
#define MMUCR_EN 0x00000001 /* Virtual mode enable */
#define MMUCR_ASM 0x00000002 /* Address space mode */
/*
* MMU Operation register.
*/
#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
#define MMUOR_ACC 0x00000002 /* TLB access */
#define MMUOR_RD 0x00000004 /* TLB access read */
#define MMUOR_WR 0x00000000 /* TLB access write */
#define MMUOR_ADR 0x00000008 /* TLB address select */
#define MMUOR_ITLB 0x00000010 /* ITLB operation */
#define MMUOR_CAS 0x00000020 /* Clear non-locked ASID TLBs */
#define MMUOR_CNL 0x00000040 /* Clear non-locked TLBs */
#define MMUOR_CA 0x00000080 /* Clear all TLBs */
#define MMUOR_STLB 0x00000100 /* Search TLBs */
#define MMUOR_AAN 16 /* TLB allocation address */
#define MMUOR_AAMASK 0xffff0000 /* AA mask */
/*
* MMU Status register.
*/
#define MMUSR_HIT 0x00000002 /* Search TLB hit */
#define MMUSR_WF 0x00000008 /* Write access fault */
#define MMUSR_RF 0x00000010 /* Read access fault */
#define MMUSR_SPF 0x00000020 /* Supervisor protect fault */
/*
* MMU Read/Write Tag register.
*/
#define MMUTR_V 0x00000001 /* Valid */
#define MMUTR_SG 0x00000002 /* Shared global */
#define MMUTR_IDN 2 /* Address Space ID */
#define MMUTR_IDMASK 0x000003fc /* ASID mask */
#define MMUTR_VAN 10 /* Virtual Address */
#define MMUTR_VAMASK 0xfffffc00 /* VA mask */
/*
* MMU Read/Write Data register.
*/
#define MMUDR_LK 0x00000002 /* Lock entry */
#define MMUDR_X 0x00000004 /* Execute access enable */
#define MMUDR_W 0x00000008 /* Write access enable */
#define MMUDR_R 0x00000010 /* Read access enable */
#define MMUDR_SP 0x00000020 /* Supervisor access enable */
#define MMUDR_CM_CWT 0x00000000 /* Cachable write thru */
#define MMUDR_CM_CCB 0x00000040 /* Cachable copy back */
#define MMUDR_CM_NCP 0x00000080 /* Non-cachable precise */
#define MMUDR_CM_NCI 0x000000c0 /* Non-cachable imprecise */
#define MMUDR_SZ_1MB 0x00000000 /* 1MB page size */
#define MMUDR_SZ_4KB 0x00000100 /* 4kB page size */
#define MMUDR_SZ_8KB 0x00000200 /* 8kB page size */
#define MMUDR_SZ_1KB 0x00000300 /* 1kB page size */
#define MMUDR_PAN 10 /* Physical address */
#define MMUDR_PAMASK 0xfffffc00 /* PA mask */
#ifndef __ASSEMBLY__
/*
* Simple access functions for the MMU registers. Nothing fancy
* currently required, just simple 32bit access.
*/
static inline u32 mmu_read(u32 a)
{
return *((volatile u32 *) a);
}
static inline void mmu_write(u32 a, u32 v)
{
*((volatile u32 *) a) = v;
__asm__ __volatile__ ("nop");
}
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
#endif
#endif /* MCFMMU_H */

View File

@ -8,7 +8,206 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_SUN3
#if defined(CONFIG_COLDFIRE)
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/mcfmmu.h>
#include <asm/mmu.h>
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
extern unsigned long context_map[];
extern mm_context_t next_mmu_context;
extern atomic_t nr_free_contexts;
extern struct mm_struct *context_mm[LAST_CONTEXT+1];
extern void steal_context(void);
static inline void get_mmu_context(struct mm_struct *mm)
{
mm_context_t ctx;
if (mm->context != NO_CONTEXT)
return;
while (atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
ctx = next_mmu_context;
while (test_and_set_bit(ctx, context_map)) {
ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
if (ctx > LAST_CONTEXT)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
mm->context = ctx;
context_mm[ctx] = mm;
}
/*
* Set up the context for a new address space.
*/
#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
/*
* We're finished using the context for an address space.
*/
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
clear_bit(mm->context, context_map);
mm->context = NO_CONTEXT;
atomic_inc(&nr_free_contexts);
}
}
static inline void set_context(mm_context_t context, pgd_t *pgd)
{
__asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
get_mmu_context(tsk->mm);
set_context(tsk->mm->context, next->pgd);
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
get_mmu_context(mm);
set_context(mm->context, mm->pgd);
}
#define deactivate_mm(tsk, mm) do { } while (0)
extern void mmu_context_init(void);
#define prepare_arch_switch(next) load_ksp_mmu(next)
static inline void load_ksp_mmu(struct task_struct *task)
{
unsigned long flags;
struct mm_struct *mm;
int asid;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long mmuar;
local_irq_save(flags);
mmuar = task->thread.ksp;
/* Search for a valid TLB entry, if one is found, don't remap */
mmu_write(MMUAR, mmuar);
mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
if (mmu_read(MMUSR) & MMUSR_HIT)
goto end;
if (mmuar >= PAGE_OFFSET) {
mm = &init_mm;
} else {
pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
mm = task->mm;
}
if (!mm)
goto bug;
pgd = pgd_offset(mm, mmuar);
if (pgd_none(*pgd))
goto bug;
pmd = pmd_offset(pgd, mmuar);
if (pmd_none(*pmd))
goto bug;
pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
: pte_offset_map(pmd, mmuar);
if (pte_none(*pte) || !pte_present(*pte))
goto bug;
set_pte(pte, pte_mkyoung(*pte));
asid = mm->context & 0xff;
if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
set_pte(pte, pte_wrprotect(*pte));
mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
(((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
>> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
goto end;
bug:
pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
end:
local_irq_restore(flags);
}
#elif defined(CONFIG_SUN3)
#include <asm/sun3mmu.h>
#include <linux/sched.h>
extern unsigned long get_free_context(struct mm_struct *mm);
extern void clear_context(unsigned long context);
/* set the context for a new task to unmapped */
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = SUN3_INVALID_CONTEXT;
return 0;
}
/* find the context given to this process, and if it hasn't already
got one, go get one for it. */
static inline void get_mmu_context(struct mm_struct *mm)
{
if (mm->context == SUN3_INVALID_CONTEXT)
mm->context = get_free_context(mm);
}
/* flush context if allocated... */
static inline void destroy_context(struct mm_struct *mm)
{
if (mm->context != SUN3_INVALID_CONTEXT)
clear_context(mm->context);
}
static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
sun3_put_context(mm->context);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
activate_context(tsk->mm);
}
#define deactivate_mm(tsk, mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
activate_context(next_mm);
}
#else
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
@ -103,55 +302,8 @@ static inline void activate_mm(struct mm_struct *prev_mm,
switch_mm_0460(next_mm); switch_mm_0460(next_mm);
} }
#else /* CONFIG_SUN3 */
#include <asm/sun3mmu.h>
#include <linux/sched.h>
extern unsigned long get_free_context(struct mm_struct *mm);
extern void clear_context(unsigned long context);
/* set the context for a new task to unmapped */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = SUN3_INVALID_CONTEXT;
return 0;
}
/* find the context given to this process, and if it hasn't already
got one, go get one for it. */
static inline void get_mmu_context(struct mm_struct *mm)
{
if(mm->context == SUN3_INVALID_CONTEXT)
mm->context = get_free_context(mm);
}
/* flush context if allocated... */
static inline void destroy_context(struct mm_struct *mm)
{
if(mm->context != SUN3_INVALID_CONTEXT)
clear_context(mm->context);
}
static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
sun3_put_context(mm->context);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
activate_context(tsk->mm);
}
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
activate_context(next_mm);
}
#endif #endif
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)

View File

@ -8,6 +8,7 @@
#define _PAGE_PRESENT 0x001 #define _PAGE_PRESENT 0x001
#define _PAGE_SHORT 0x002 #define _PAGE_SHORT 0x002
#define _PAGE_RONLY 0x004 #define _PAGE_RONLY 0x004
#define _PAGE_READWRITE 0x000
#define _PAGE_ACCESSED 0x008 #define _PAGE_ACCESSED 0x008
#define _PAGE_DIRTY 0x010 #define _PAGE_DIRTY 0x010
#define _PAGE_SUPER 0x080 /* 68040 supervisor only */ #define _PAGE_SUPER 0x080 /* 68040 supervisor only */

View File

@ -6,10 +6,10 @@
#include <asm/page_offset.h> #include <asm/page_offset.h>
/* PAGE_SHIFT determines the page size */ /* PAGE_SHIFT determines the page size */
#ifndef CONFIG_SUN3 #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
#define PAGE_SHIFT (12) #define PAGE_SHIFT 13
#else #else
#define PAGE_SHIFT (13) #define PAGE_SHIFT 12
#endif #endif
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
@ -36,6 +36,10 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
extern unsigned long _rambase;
extern unsigned long _ramstart;
extern unsigned long _ramend;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU

View File

@ -5,9 +5,6 @@
extern unsigned long memory_start; extern unsigned long memory_start;
extern unsigned long memory_end; extern unsigned long memory_end;
extern unsigned long _rambase;
extern unsigned long _ramstart;
extern unsigned long _ramend;
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) #define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr) #define free_user_page(page, addr) free_page(addr)

View File

@ -1,11 +1,9 @@
/* This handles the memory map.. */ /* This handles the memory map.. */
#ifdef CONFIG_MMU #if defined(CONFIG_RAMBASE)
#ifndef CONFIG_SUN3 #define PAGE_OFFSET_RAW CONFIG_RAMBASE
#define PAGE_OFFSET_RAW 0x00000000 #elif defined(CONFIG_SUN3)
#else
#define PAGE_OFFSET_RAW 0x0E000000 #define PAGE_OFFSET_RAW 0x0E000000
#endif
#else #else
#define PAGE_OFFSET_RAW CONFIG_RAMBASE #define PAGE_OFFSET_RAW 0x00000000
#endif #endif

View File

@ -7,7 +7,9 @@
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#include <asm/virtconvert.h> #include <asm/virtconvert.h>
#ifdef CONFIG_SUN3 #if defined(CONFIG_COLDFIRE)
#include <asm/mcf_pgalloc.h>
#elif defined(CONFIG_SUN3)
#include <asm/sun3_pgalloc.h> #include <asm/sun3_pgalloc.h>
#else #else
#include <asm/motorola_pgalloc.h> #include <asm/motorola_pgalloc.h>

View File

@ -40,6 +40,8 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */ /* PGDIR_SHIFT determines what a third-level page table entry can map */
#ifdef CONFIG_SUN3 #ifdef CONFIG_SUN3
#define PGDIR_SHIFT 17 #define PGDIR_SHIFT 17
#elif defined(CONFIG_COLDFIRE)
#define PGDIR_SHIFT 22
#else #else
#define PGDIR_SHIFT 25 #define PGDIR_SHIFT 25
#endif #endif
@ -54,6 +56,10 @@
#define PTRS_PER_PTE 16 #define PTRS_PER_PTE 16
#define PTRS_PER_PMD 1 #define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048 #define PTRS_PER_PGD 2048
#elif defined(CONFIG_COLDFIRE)
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else #else
#define PTRS_PER_PTE 1024 #define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 8 #define PTRS_PER_PMD 8
@ -66,12 +72,22 @@
#ifdef CONFIG_SUN3 #ifdef CONFIG_SUN3
#define KMAP_START 0x0DC00000 #define KMAP_START 0x0DC00000
#define KMAP_END 0x0E000000 #define KMAP_END 0x0E000000
#elif defined(CONFIG_COLDFIRE)
#define KMAP_START 0xe0000000
#define KMAP_END 0xf0000000
#else #else
#define KMAP_START 0xd0000000 #define KMAP_START 0xd0000000
#define KMAP_END 0xf0000000 #define KMAP_END 0xf0000000
#endif #endif
#ifndef CONFIG_SUN3 #ifdef CONFIG_SUN3
extern unsigned long m68k_vmalloc_end;
#define VMALLOC_START 0x0f800000
#define VMALLOC_END m68k_vmalloc_end
#elif defined(CONFIG_COLDFIRE)
#define VMALLOC_START 0xd0000000
#define VMALLOC_END 0xe0000000
#else
/* Just any arbitrary offset to the start of the vmalloc VM area: the /* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the * current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that * physical memory until the kernel virtual memory starts. That means that
@ -82,11 +98,7 @@
#define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END KMAP_START #define VMALLOC_END KMAP_START
#else #endif
extern unsigned long m68k_vmalloc_end;
#define VMALLOC_START 0x0f800000
#define VMALLOC_END m68k_vmalloc_end
#endif /* CONFIG_SUN3 */
/* zero page used for uninitialized stuff */ /* zero page used for uninitialized stuff */
extern void *empty_zero_page; extern void *empty_zero_page;
@ -130,6 +142,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#ifdef CONFIG_SUN3 #ifdef CONFIG_SUN3
#include <asm/sun3_pgtable.h> #include <asm/sun3_pgtable.h>
#elif defined(CONFIG_COLDFIRE)
#include <asm/mcf_pgtable.h>
#else #else
#include <asm/motorola_pgtable.h> #include <asm/motorola_pgtable.h>
#endif #endif
@ -138,6 +152,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/* /*
* Macro to mark a page protection value as "uncacheable". * Macro to mark a page protection value as "uncacheable".
*/ */
#ifdef CONFIG_COLDFIRE
# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
#else
#ifdef SUN3_PAGE_NOCACHE #ifdef SUN3_PAGE_NOCACHE
# define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
#else #else
@ -152,6 +169,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot))) : (prot)))
#endif /* CONFIG_COLDFIRE */
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -48,10 +48,12 @@ static inline void wrusp(unsigned long usp)
* so don't change it unless you know what you are doing. * so don't change it unless you know what you are doing.
*/ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_SUN3 #if defined(CONFIG_COLDFIRE)
#define TASK_SIZE (0xF0000000UL) #define TASK_SIZE (0xC0000000UL)
#else #elif defined(CONFIG_SUN3)
#define TASK_SIZE (0x0E000000UL) #define TASK_SIZE (0x0E000000UL)
#else
#define TASK_SIZE (0xF0000000UL)
#endif #endif
#else #else
#define TASK_SIZE (0xFFFFFFFFUL) #define TASK_SIZE (0xFFFFFFFFUL)
@ -66,10 +68,12 @@ static inline void wrusp(unsigned long usp)
* space during mmap's. * space during mmap's.
*/ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifndef CONFIG_SUN3 #if defined(CONFIG_COLDFIRE)
#define TASK_UNMAPPED_BASE 0xC0000000UL #define TASK_UNMAPPED_BASE 0x60000000UL
#else #elif defined(CONFIG_SUN3)
#define TASK_UNMAPPED_BASE 0x0A000000UL #define TASK_UNMAPPED_BASE 0x0A000000UL
#else
#define TASK_UNMAPPED_BASE 0xC0000000UL
#endif #endif
#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
#else #else
@ -88,14 +92,12 @@ struct thread_struct {
unsigned long fp[8*3]; unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */ unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */ unsigned char fpstate[FPSTATESIZE]; /* floating point state */
struct thread_info info;
}; };
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \ .sr = PS_S, \
.fs = __KERNEL_DS, \ .fs = __KERNEL_DS, \
.info = INIT_THREAD_INFO(init_task), \
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU

View File

@ -22,23 +22,26 @@ typedef struct {
} mm_segment_t; } mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/* /*
* Get/set the SFC/DFC registers for MOVES instructions * Get/set the SFC/DFC registers for MOVES instructions
*/ */
#define USER_DS MAKE_MM_SEG(__USER_DS)
#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
static inline mm_segment_t get_fs(void) static inline mm_segment_t get_fs(void)
{ {
#ifdef CONFIG_MMU
mm_segment_t _v; mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):); __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
return _v; return _v;
#else }
return USER_DS;
#endif static inline void set_fs(mm_segment_t val)
{
__asm__ __volatile__ ("movec %0,%/sfc\n\t"
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val.seg) : "memory");
} }
static inline mm_segment_t get_ds(void) static inline mm_segment_t get_ds(void)
@ -47,14 +50,13 @@ static inline mm_segment_t get_ds(void)
return KERNEL_DS; return KERNEL_DS;
} }
static inline void set_fs(mm_segment_t val) #else
{ #define USER_DS MAKE_MM_SEG(TASK_SIZE)
#ifdef CONFIG_MMU #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
__asm__ __volatile__ ("movec %0,%/sfc\n\t" #define get_ds() (KERNEL_DS)
"movec %0,%/dfc\n\t" #define get_fs() (current_thread_info()->addr_limit)
: /* no outputs */ : "r" (val.seg) : "memory"); #define set_fs(x) (current_thread_info()->addr_limit = (x))
#endif #endif
}
#define segment_eq(a,b) ((a).seg == (b).seg) #define segment_eq(a,b) ((a).seg == (b).seg)

View File

@ -40,6 +40,7 @@
#define MACH_HP300 9 #define MACH_HP300 9
#define MACH_Q40 10 #define MACH_Q40 10
#define MACH_SUN3X 11 #define MACH_SUN3X 11
#define MACH_M54XX 12
#define COMMAND_LINE_SIZE 256 #define COMMAND_LINE_SIZE 256
@ -211,23 +212,27 @@ extern unsigned long m68k_machtype;
#define CPUB_68030 1 #define CPUB_68030 1
#define CPUB_68040 2 #define CPUB_68040 2
#define CPUB_68060 3 #define CPUB_68060 3
#define CPUB_COLDFIRE 4
#define CPU_68020 (1<<CPUB_68020) #define CPU_68020 (1<<CPUB_68020)
#define CPU_68030 (1<<CPUB_68030) #define CPU_68030 (1<<CPUB_68030)
#define CPU_68040 (1<<CPUB_68040) #define CPU_68040 (1<<CPUB_68040)
#define CPU_68060 (1<<CPUB_68060) #define CPU_68060 (1<<CPUB_68060)
#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
#define FPUB_68881 0 #define FPUB_68881 0
#define FPUB_68882 1 #define FPUB_68882 1
#define FPUB_68040 2 /* Internal FPU */ #define FPUB_68040 2 /* Internal FPU */
#define FPUB_68060 3 /* Internal FPU */ #define FPUB_68060 3 /* Internal FPU */
#define FPUB_SUNFPA 4 /* Sun-3 FPA */ #define FPUB_SUNFPA 4 /* Sun-3 FPA */
#define FPUB_COLDFIRE 5 /* ColdFire FPU */
#define FPU_68881 (1<<FPUB_68881) #define FPU_68881 (1<<FPUB_68881)
#define FPU_68882 (1<<FPUB_68882) #define FPU_68882 (1<<FPUB_68882)
#define FPU_68040 (1<<FPUB_68040) #define FPU_68040 (1<<FPUB_68040)
#define FPU_68060 (1<<FPUB_68060) #define FPU_68060 (1<<FPUB_68060)
#define FPU_SUNFPA (1<<FPUB_SUNFPA) #define FPU_SUNFPA (1<<FPUB_SUNFPA)
#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
#define MMUB_68851 0 #define MMUB_68851 0
#define MMUB_68030 1 /* Internal MMU */ #define MMUB_68030 1 /* Internal MMU */
@ -235,6 +240,7 @@ extern unsigned long m68k_machtype;
#define MMUB_68060 3 /* Internal MMU */ #define MMUB_68060 3 /* Internal MMU */
#define MMUB_APOLLO 4 /* Custom Apollo */ #define MMUB_APOLLO 4 /* Custom Apollo */
#define MMUB_SUN3 5 /* Custom Sun-3 */ #define MMUB_SUN3 5 /* Custom Sun-3 */
#define MMUB_COLDFIRE 6 /* Internal MMU */
#define MMU_68851 (1<<MMUB_68851) #define MMU_68851 (1<<MMUB_68851)
#define MMU_68030 (1<<MMUB_68030) #define MMU_68030 (1<<MMUB_68030)
@ -242,6 +248,7 @@ extern unsigned long m68k_machtype;
#define MMU_68060 (1<<MMUB_68060) #define MMU_68060 (1<<MMUB_68060)
#define MMU_SUN3 (1<<MMUB_SUN3) #define MMU_SUN3 (1<<MMUB_SUN3)
#define MMU_APOLLO (1<<MMUB_APOLLO) #define MMU_APOLLO (1<<MMUB_APOLLO)
#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
#ifdef __KERNEL__ #ifdef __KERNEL__
@ -341,6 +348,13 @@ extern int m68k_is040or060;
# endif # endif
#endif #endif
#if !defined(CONFIG_COLDFIRE)
# define CPU_IS_COLDFIRE (0)
#else
# define CPU_IS_COLDFIRE (1)
# define MMU_IS_COLDFIRE (1)
#endif
#define CPU_TYPE (m68k_cputype) #define CPU_TYPE (m68k_cputype)
#ifdef CONFIG_M68KFPU_EMU #ifdef CONFIG_M68KFPU_EMU

View File

@ -15,11 +15,7 @@ struct sigcontext {
unsigned long sc_pc; unsigned long sc_pc;
unsigned short sc_formatvec; unsigned short sc_formatvec;
#ifndef __uClinux__ #ifndef __uClinux__
# ifdef __mcoldfire__
unsigned long sc_fpregs[2][2]; /* room for two fp registers */
# else
unsigned long sc_fpregs[2*3]; /* room for two fp registers */ unsigned long sc_fpregs[2*3]; /* room for two fp registers */
# endif
unsigned long sc_fpcntl[3]; unsigned long sc_fpcntl[3];
unsigned char sc_fpstate[216]; unsigned char sc_fpstate[216];
#endif #endif

View File

@ -3,6 +3,7 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/segment.h>
/* /*
* On machines with 4k pages we default to an 8k thread size, though we * On machines with 4k pages we default to an 8k thread size, though we
@ -26,6 +27,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
unsigned long flags; unsigned long flags;
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */ __u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */ unsigned long tp_value; /* thread pointer */
@ -39,6 +41,7 @@ struct thread_info {
{ \ { \
.task = &tsk, \ .task = &tsk, \
.exec_domain = &default_exec_domain, \ .exec_domain = &default_exec_domain, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \ .restart_block = { \
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
@ -47,34 +50,6 @@ struct thread_info {
#define init_stack (init_thread_union.stack) #define init_stack (init_thread_union.stack)
#ifdef CONFIG_MMU
#ifndef __ASSEMBLY__
#include <asm/current.h>
#endif
#ifdef ASM_OFFSETS_C
#define task_thread_info(tsk) ((struct thread_info *) NULL)
#else
#include <asm/asm-offsets.h>
#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
#endif
#define init_thread_info (init_task.thread.info)
#define task_stack_page(tsk) ((tsk)->stack)
#define current_thread_info() task_thread_info(current)
#define __HAVE_THREAD_FUNCTIONS
#define setup_thread_stack(p, org) ({ \
*(struct task_struct **)(p)->stack = (p); \
task_thread_info(p)->task = (p); \
})
#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
#else /* !CONFIG_MMU */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
@ -92,8 +67,6 @@ static inline struct thread_info *current_thread_info(void)
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
#endif /* CONFIG_MMU */
/* entry.S relies on these definitions! /* entry.S relies on these definitions!
* bits 0-7 are tested at every exception exit * bits 0-7 are tested at every exception exit
* bits 8-15 are also tested at syscall exit * bits 8-15 are also tested at syscall exit

View File

@ -5,10 +5,13 @@
#ifndef CONFIG_SUN3 #ifndef CONFIG_SUN3
#include <asm/current.h> #include <asm/current.h>
#include <asm/mcfmmu.h>
static inline void flush_tlb_kernel_page(void *addr) static inline void flush_tlb_kernel_page(void *addr)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs(); mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t" __asm__ __volatile__(".chip 68040\n\t"
@ -25,12 +28,15 @@ static inline void flush_tlb_kernel_page(void *addr)
*/ */
static inline void __flush_tlb(void) static inline void __flush_tlb(void)
{ {
if (CPU_IS_040_OR_060) if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) {
__asm__ __volatile__(".chip 68040\n\t" __asm__ __volatile__(".chip 68040\n\t"
"pflushan\n\t" "pflushan\n\t"
".chip 68k"); ".chip 68k");
else if (CPU_IS_020_OR_030) } else if (CPU_IS_020_OR_030) {
__asm__ __volatile__("pflush #0,#4"); __asm__ __volatile__("pflush #0,#4");
}
} }
static inline void __flush_tlb040_one(unsigned long addr) static inline void __flush_tlb040_one(unsigned long addr)
@ -43,7 +49,9 @@ static inline void __flush_tlb040_one(unsigned long addr)
static inline void __flush_tlb_one(unsigned long addr) static inline void __flush_tlb_one(unsigned long addr)
{ {
if (CPU_IS_040_OR_060) if (CPU_IS_COLDFIRE)
mmu_write(MMUOR, MMUOR_CNL);
else if (CPU_IS_040_OR_060)
__flush_tlb040_one(addr); __flush_tlb040_one(addr);
else if (CPU_IS_020_OR_030) else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr)); __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
@ -56,12 +64,15 @@ static inline void __flush_tlb_one(unsigned long addr)
*/ */
static inline void flush_tlb_all(void) static inline void flush_tlb_all(void)
{ {
if (CPU_IS_040_OR_060) if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) {
__asm__ __volatile__(".chip 68040\n\t" __asm__ __volatile__(".chip 68040\n\t"
"pflusha\n\t" "pflusha\n\t"
".chip 68k"); ".chip 68k");
else if (CPU_IS_020_OR_030) } else if (CPU_IS_020_OR_030) {
__asm__ __volatile__("pflusha"); __asm__ __volatile__("pflusha");
}
} }
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)

View File

@ -18,6 +18,7 @@
typedef void (*e_vector)(void); typedef void (*e_vector)(void);
extern e_vector vectors[]; extern e_vector vectors[];
extern e_vector *_ramvec;
asmlinkage void auto_inthandler(void); asmlinkage void auto_inthandler(void);
asmlinkage void user_inthandler(void); asmlinkage void user_inthandler(void);

View File

@ -20,6 +20,22 @@ static inline int access_ok(int type, const void __user *addr,
return 1; return 1;
} }
/*
* Not all varients of the 68k family support the notion of address spaces.
* The traditional 680x0 parts do, and they use the sfc/dfc registers and
* the "moves" instruction to access user space from kernel space. Other
* family members like ColdFire don't support this, and only have a single
* address space, and use the usual "move" instruction for user space access.
*
* Outside of this difference the user space access functions are the same.
* So lets keep the code simple and just define in what we need to use.
*/
#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
#define MOVES "moves"
#else
#define MOVES "move"
#endif
/* /*
* The exception table consists of pairs of addresses: the first is the * The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is * address of an instruction that is allowed to fault, and the second is
@ -43,7 +59,7 @@ extern int __get_user_bad(void);
#define __put_user_asm(res, x, ptr, bwl, reg, err) \ #define __put_user_asm(res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \ asm volatile ("\n" \
"1: moves."#bwl" %2,%1\n" \ "1: "MOVES"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
@ -83,8 +99,8 @@ asm volatile ("\n" \
{ \ { \
const void __user *__pu_ptr = (ptr); \ const void __user *__pu_ptr = (ptr); \
asm volatile ("\n" \ asm volatile ("\n" \
"1: moves.l %2,(%1)+\n" \ "1: "MOVES".l %2,(%1)+\n" \
"2: moves.l %R2,(%1)\n" \ "2: "MOVES".l %R2,(%1)\n" \
"3:\n" \ "3:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
@ -115,12 +131,12 @@ asm volatile ("\n" \
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \ type __gu_val; \
asm volatile ("\n" \ asm volatile ("\n" \
"1: moves."#bwl" %2,%1\n" \ "1: "MOVES"."#bwl" %2,%1\n" \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
"10: move.l %3,%0\n" \ "10: move.l %3,%0\n" \
" sub."#bwl" %1,%1\n" \ " sub.l %1,%1\n" \
" jra 2b\n" \ " jra 2b\n" \
" .previous\n" \ " .previous\n" \
"\n" \ "\n" \
@ -152,8 +168,8 @@ asm volatile ("\n" \
const void *__gu_ptr = (ptr); \ const void *__gu_ptr = (ptr); \
u64 __gu_val; \ u64 __gu_val; \
asm volatile ("\n" \ asm volatile ("\n" \
"1: moves.l (%2)+,%1\n" \ "1: "MOVES".l (%2)+,%1\n" \
"2: moves.l (%2),%R1\n" \ "2: "MOVES".l (%2),%R1\n" \
"3:\n" \ "3:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .even\n" \ " .even\n" \
@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned
#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
asm volatile ("\n" \ asm volatile ("\n" \
"1: moves."#s1" (%2)+,%3\n" \ "1: "MOVES"."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \ " move."#s1" %3,(%1)+\n" \
"2: moves."#s2" (%2)+,%3\n" \ "2: "MOVES"."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \ " move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \ " .ifnc \""#s3"\",\"\"\n" \
"3: moves."#s3" (%2)+,%3\n" \ "3: "MOVES"."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \ " move."#s3" %3,(%1)+\n" \
" .endif\n" \ " .endif\n" \
"4:\n" \ "4:\n" \
@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
asm volatile ("\n" \ asm volatile ("\n" \
" move."#s1" (%2)+,%3\n" \ " move."#s1" (%2)+,%3\n" \
"11: moves."#s1" %3,(%1)+\n" \ "11: "MOVES"."#s1" %3,(%1)+\n" \
"12: move."#s2" (%2)+,%3\n" \ "12: move."#s2" (%2)+,%3\n" \
"21: moves."#s2" %3,(%1)+\n" \ "21: "MOVES"."#s2" %3,(%1)+\n" \
"22:\n" \ "22:\n" \
" .ifnc \""#s3"\",\"\"\n" \ " .ifnc \""#s3"\",\"\"\n" \
" move."#s3" (%2)+,%3\n" \ " move."#s3" (%2)+,%3\n" \
"31: moves."#s3" %3,(%1)+\n" \ "31: "MOVES"."#s3" %3,(%1)+\n" \
"32:\n" \ "32:\n" \
" .endif\n" \ " .endif\n" \
"4:\n" \ "4:\n" \

View File

@ -7,11 +7,7 @@ typedef greg_t gregset_t[NGREG];
typedef struct fpregset { typedef struct fpregset {
int f_fpcntl[3]; int f_fpcntl[3];
#ifdef __mcoldfire__
int f_fpregs[8][2];
#else
int f_fpregs[8*3]; int f_fpregs[8*3];
#endif
} fpregset_t; } fpregset_t;
struct mcontext { struct mcontext {

View File

@ -2,19 +2,24 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
extra-$(CONFIG_MMU) := head.o extra-$(CONFIG_AMIGA) := head.o
extra-$(CONFIG_ATARI) := head.o
extra-$(CONFIG_MAC) := head.o
extra-$(CONFIG_APOLLO) := head.o
extra-$(CONFIG_VME) := head.o
extra-$(CONFIG_HP300) := head.o
extra-$(CONFIG_Q40) := head.o
extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds extra-y += vmlinux.lds
obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \ obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
signal.o sys_m68k.o syscalltable.o time.o traps.o obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU) += ints.o vectors.o obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
ifndef CONFIG_MMU_SUN3 ifndef CONFIG_MMU_SUN3
obj-y += dma.o obj-y += dma.o
endif
ifndef CONFIG_MMU
obj-y += init_task.o
endif endif

View File

@ -24,8 +24,7 @@ int main(void)
/* offsets into the task struct */ /* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
/* offsets into the thread struct */ /* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));

View File

@ -1,4 +1,4 @@
#ifdef CONFIG_MMU #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#include "entry_mm.S" #include "entry_mm.S"
#else #else
#include "entry_no.S" #include "entry_no.S"

View File

@ -99,7 +99,8 @@ do_trace_exit:
jra .Lret_from_exception jra .Lret_from_exception
ENTRY(ret_from_signal) ENTRY(ret_from_signal)
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2) movel %curptr@(TASK_STACK),%a1
tstb %a1@(TINFO_FLAGS+2)
jge 1f jge 1f
jbsr syscall_trace jbsr syscall_trace
1: RESTORE_SWITCH_STACK 1: RESTORE_SWITCH_STACK
@ -120,11 +121,13 @@ ENTRY(system_call)
SAVE_ALL_SYS SAVE_ALL_SYS
GET_CURRENT(%d1) GET_CURRENT(%d1)
movel %d1,%a1
| save top of frame | save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace? | syscall trace?
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2) tstb %a1@(TINFO_FLAGS+2)
jmi do_trace_entry jmi do_trace_entry
cmpl #NR_syscalls,%d0 cmpl #NR_syscalls,%d0
jcc badsys jcc badsys
@ -133,7 +136,8 @@ syscall:
movel %d0,%sp@(PT_OFF_D0) | save the return value movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall: ret_from_syscall:
|oriw #0x0700,%sr |oriw #0x0700,%sr
movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 movel %curptr@(TASK_STACK),%a1
movew %a1@(TINFO_FLAGS+2),%d0
jne syscall_exit_work jne syscall_exit_work
1: RESTORE_ALL 1: RESTORE_ALL
@ -159,7 +163,8 @@ ENTRY(ret_from_exception)
andw #ALLOWINT,%sr andw #ALLOWINT,%sr
resume_userspace: resume_userspace:
moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0 movel %curptr@(TASK_STACK),%a1
moveb %a1@(TINFO_FLAGS+3),%d0
jne exit_work jne exit_work
1: RESTORE_ALL 1: RESTORE_ALL
@ -199,7 +204,8 @@ do_delayed_trace:
ENTRY(auto_inthandler) ENTRY(auto_inthandler)
SAVE_ALL_INT SAVE_ALL_INT
GET_CURRENT(%d0) GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %d0,%a1
addqb #1,%a1@(TINFO_PREEMPT+1)
| put exception # in d0 | put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0 subw #VEC_SPUR,%d0
@ -211,7 +217,8 @@ auto_irqhandler_fixup = . + 2
addql #8,%sp | pop parameters off stack addql #8,%sp | pop parameters off stack
ret_from_interrupt: ret_from_interrupt:
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %curptr@(TASK_STACK),%a1
subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt jeq ret_from_last_interrupt
2: RESTORE_ALL 2: RESTORE_ALL
@ -232,7 +239,8 @@ ret_from_last_interrupt:
ENTRY(user_inthandler) ENTRY(user_inthandler)
SAVE_ALL_INT SAVE_ALL_INT
GET_CURRENT(%d0) GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %d0,%a1
addqb #1,%a1@(TINFO_PREEMPT+1)
| put exception # in d0 | put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2 user_irqvec_fixup = . + 2
@ -243,7 +251,8 @@ user_irqvec_fixup = . + 2
jsr do_IRQ | process the IRQ jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack addql #8,%sp | pop parameters off stack
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %curptr@(TASK_STACK),%a1
subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt jeq ret_from_last_interrupt
RESTORE_ALL RESTORE_ALL
@ -252,13 +261,15 @@ user_irqvec_fixup = . + 2
ENTRY(bad_inthandler) ENTRY(bad_inthandler)
SAVE_ALL_INT SAVE_ALL_INT
GET_CURRENT(%d0) GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %d0,%a1
addqb #1,%a1@(TINFO_PREEMPT+1)
movel %sp,%sp@- movel %sp,%sp@-
jsr handle_badint jsr handle_badint
addql #4,%sp addql #4,%sp
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) movel %curptr@(TASK_STACK),%a1
subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt jeq ret_from_last_interrupt
RESTORE_ALL RESTORE_ALL

View File

@ -44,8 +44,7 @@
ENTRY(buserr) ENTRY(buserr)
SAVE_ALL_INT SAVE_ALL_INT
moveq #-1,%d0 GET_CURRENT(%d0)
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */ movel %sp,%sp@- /* stack frame pointer argument */
jsr buserr_c jsr buserr_c
addql #4,%sp addql #4,%sp
@ -53,8 +52,7 @@ ENTRY(buserr)
ENTRY(trap) ENTRY(trap)
SAVE_ALL_INT SAVE_ALL_INT
moveq #-1,%d0 GET_CURRENT(%d0)
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */ movel %sp,%sp@- /* stack frame pointer argument */
jsr trap_c jsr trap_c
addql #4,%sp addql #4,%sp
@ -65,8 +63,7 @@ ENTRY(trap)
.globl dbginterrupt .globl dbginterrupt
ENTRY(dbginterrupt) ENTRY(dbginterrupt)
SAVE_ALL_INT SAVE_ALL_INT
moveq #-1,%d0 GET_CURRENT(%d0)
movel %d0,%sp@(PT_OFF_ORIG_D0)
movel %sp,%sp@- /* stack frame pointer argument */ movel %sp,%sp@- /* stack frame pointer argument */
jsr dbginterrupt_c jsr dbginterrupt_c
addql #4,%sp addql #4,%sp

View File

@ -19,7 +19,6 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* *
* All other task structs will be allocated on slabs in fork.c * All other task structs will be allocated on slabs in fork.c
*/ */
__asm__(".align 4");
struct task_struct init_task = INIT_TASK(init_task); struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task); EXPORT_SYMBOL(init_task);
@ -27,7 +26,7 @@ EXPORT_SYMBOL(init_task);
/* /*
* Initial thread structure. * Initial thread structure.
* *
* We need to make sure that this is 8192-byte aligned due to the * We need to make sure that this is THREAD size aligned due to the
* way process stacks are handled. This is done by having a special * way process stacks are handled. This is done by having a special
* "init_task" linker map entry.. * "init_task" linker map entry..
*/ */

View File

@ -14,7 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__muldi3);
#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) #if defined(CONFIG_CPU_HAS_NO_MULDIV64)
/* /*
* Simpler 68k and ColdFire parts also need a few other gcc functions. * Simpler 68k and ColdFire parts also need a few other gcc functions.
*/ */

View File

@ -33,22 +33,6 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/*
* Initial task/thread structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data
__attribute__((aligned(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
asmlinkage void ret_from_fork(void); asmlinkage void ret_from_fork(void);
@ -188,9 +172,7 @@ void flush_thread(void)
current->thread.fs = __USER_DS; current->thread.fs = __USER_DS;
if (!FPU_IS_EMU) if (!FPU_IS_EMU)
asm volatile (".chip 68k/68881\n\t" asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
"frestore %0@\n\t"
".chip 68k" : : "a" (&zero));
} }
/* /*
@ -264,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy the current fpu state */ /* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1" asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) "fmovel %/fpiar,%1\n\t"
: "memory"); "fmovel %/fpcr,%2\n\t"
"fmovel %/fpsr,%3"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0]),
"m" (p->thread.fpcntl[1]),
"m" (p->thread.fpcntl[2])
: "memory");
} else {
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0])
: "memory");
}
}
/* Restore the state in case the fpu was busy */ /* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
} }
@ -301,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0; return 0;
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" if (CPU_IS_COLDFIRE) {
:: "m" (fpu->fpcntl[0]) asm volatile ("fmovel %/fpiar,%0\n\t"
: "memory"); "fmovel %/fpcr,%1\n\t"
asm volatile ("fmovemx %/fp0-%/fp7,%0" "fmovel %/fpsr,%2\n\t"
:: "m" (fpu->fpregs[0]) "fmovemd %/fp0-%/fp7,%3"
: "memory"); :
: "m" (fpu->fpcntl[0]),
"m" (fpu->fpcntl[1]),
"m" (fpu->fpcntl[2]),
"m" (fpu->fpregs[0])
: "memory");
} else {
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
:
: "m" (fpu->fpcntl[0])
: "memory");
asm volatile ("fmovemx %/fp0-%/fp7,%0"
:
: "m" (fpu->fpregs[0])
: "memory");
}
return 1; return 1;
} }
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);

View File

@ -18,6 +18,7 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/user.h> #include <linux/user.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/page.h> #include <asm/page.h>
@ -275,3 +276,20 @@ asmlinkage void syscall_trace(void)
current->exit_code = 0; current->exit_code = 0;
} }
} }
#ifdef CONFIG_COLDFIRE
asmlinkage int syscall_trace_enter(void)
{
int ret = 0;
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(task_pt_regs(current));
return ret;
}
asmlinkage void syscall_trace_leave(void)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(task_pt_regs(current), 0);
}
#endif /* CONFIG_COLDFIRE */

View File

@ -221,7 +221,8 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
/* The bootinfo is located right after the kernel bss */ /* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)_end); if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040) if (CPU_IS_040)
m68k_is040or060 = 4; m68k_is040or060 = 4;
@ -235,7 +236,7 @@ void __init setup_arch(char **cmdline_p)
* with them, we should add a test to check_bugs() below] */ * with them, we should add a test to check_bugs() below] */
#ifndef CONFIG_M68KFPU_EMU_ONLY #ifndef CONFIG_M68KFPU_EMU_ONLY
/* clear the fpu if we have one */ /* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) { if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0; volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero)); asm volatile ("frestore %0" : : "m" (zero));
} }
@ -258,6 +259,10 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long)_edata; init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end; init_mm.brk = (unsigned long)_end;
#if defined(CONFIG_BOOTPARAM)
strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
m68k_command_line[CL_SIZE - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
*cmdline_p = m68k_command_line; *cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE); memcpy(boot_command_line, *cmdline_p, CL_SIZE);
@ -322,6 +327,11 @@ void __init setup_arch(char **cmdline_p)
case MACH_SUN3X: case MACH_SUN3X:
config_sun3x(); config_sun3x();
break; break;
#endif
#ifdef CONFIG_COLDFIRE
case MACH_M54XX:
config_BSP(NULL, 0);
break;
#endif #endif
default: default:
panic("No configuration setup"); panic("No configuration setup");
@ -384,6 +394,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#define LOOP_CYCLES_68030 (8) #define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3) #define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1) #define LOOP_CYCLES_68060 (1)
#define LOOP_CYCLES_COLDFIRE (2)
if (CPU_IS_020) { if (CPU_IS_020) {
cpu = "68020"; cpu = "68020";
@ -397,6 +408,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
} else if (CPU_IS_060) { } else if (CPU_IS_060) {
cpu = "68060"; cpu = "68060";
clockfactor = LOOP_CYCLES_68060; clockfactor = LOOP_CYCLES_68060;
} else if (CPU_IS_COLDFIRE) {
cpu = "ColdFire";
clockfactor = LOOP_CYCLES_COLDFIRE;
} else { } else {
cpu = "680x0"; cpu = "680x0";
clockfactor = 0; clockfactor = 0;
@ -415,6 +429,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
fpu = "68060"; fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA) else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA"; fpu = "Sun FPA";
else if (m68k_fputype & FPU_COLDFIRE)
fpu = "ColdFire";
else else
fpu = "none"; fpu = "none";
#endif #endif
@ -431,6 +447,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
mmu = "Sun-3"; mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO) else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo"; mmu = "Apollo";
else if (m68k_mmutype & MMU_COLDFIRE)
mmu = "ColdFire";
else else
mmu = "unknown"; mmu = "unknown";

View File

@ -47,7 +47,6 @@ EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE]; char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */ /* machine dependent timer functions */
void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
int (*mach_set_clock_mmss)(unsigned long); int (*mach_set_clock_mmss)(unsigned long);
/* machine dependent reboot functions */ /* machine dependent reboot functions */

View File

@ -56,7 +56,11 @@ static const int frame_extra_sizes[16] = {
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */ [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
[2] = sizeof(((struct frame *)0)->un.fmt2), [2] = sizeof(((struct frame *)0)->un.fmt2),
[3] = sizeof(((struct frame *)0)->un.fmt3), [3] = sizeof(((struct frame *)0)->un.fmt3),
#ifdef CONFIG_COLDFIRE
[4] = 0,
#else
[4] = sizeof(((struct frame *)0)->un.fmt4), [4] = sizeof(((struct frame *)0)->un.fmt4),
#endif
[5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */ [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
[6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */ [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
[7] = sizeof(((struct frame *)0)->un.fmt7), [7] = sizeof(((struct frame *)0)->un.fmt7),
@ -84,7 +88,11 @@ int handle_kernel_fault(struct pt_regs *regs)
regs->stkadj = frame_extra_sizes[regs->format]; regs->stkadj = frame_extra_sizes[regs->format];
tregs = (struct pt_regs *)((long)regs + regs->stkadj); tregs = (struct pt_regs *)((long)regs + regs->stkadj);
tregs->vector = regs->vector; tregs->vector = regs->vector;
#ifdef CONFIG_COLDFIRE
tregs->format = 4;
#else
tregs->format = 0; tregs->format = 0;
#endif
tregs->pc = fixup->fixup; tregs->pc = fixup->fixup;
tregs->sr = regs->sr; tregs->sr = regs->sr;
@ -195,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */ /* Verify the frame format. */
if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version)) if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(sc->sc_fpstate[0] != fpu_version))
goto out; goto out;
if (CPU_IS_020_OR_030) { if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 && if (m68k_fputype & FPU_68881 &&
@ -214,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc)
sc->sc_fpstate[3] == 0x60 || sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0)) sc->sc_fpstate[3] == 0xe0))
goto out; goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(sc->sc_fpstate[0] == 0x00 ||
sc->sc_fpstate[0] == 0x05 ||
sc->sc_fpstate[0] == 0xe5))
goto out;
} else } else
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fmovemx %0,%%fp0-%%fp1\n\t" __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" "fmovel %1,%%fpcr\n\t"
".chip 68k" "fmovel %2,%%fpsr\n\t"
: /* no outputs */ "fmovel %3,%%fpiar"
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl)); : /* no outputs */
: "m" (sc->sc_fpregs[0]),
"m" (sc->sc_fpcntl[0]),
"m" (sc->sc_fpcntl[1]),
"m" (sc->sc_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs),
"m" (*sc->sc_fpcntl));
}
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate));
} }
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*sc->sc_fpstate));
err = 0; err = 0;
out: out:
@ -241,7 +274,7 @@ out:
static inline int rt_restore_fpu_state(struct ucontext __user *uc) static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{ {
unsigned char fpstate[FPCONTEXT_SIZE]; unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
fpregset_t fpregs; fpregset_t fpregs;
int err = 1; int err = 1;
@ -260,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate)) if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out; goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!CPU_IS_060) if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1]; context_size = fpstate[1];
/* Verify the frame format. */ /* Verify the frame format. */
if (!CPU_IS_060 && (fpstate[0] != fpu_version)) if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(fpstate[0] != fpu_version))
goto out; goto out;
if (CPU_IS_020_OR_030) { if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 && if (m68k_fputype & FPU_68881 &&
@ -282,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
fpstate[3] == 0x60 || fpstate[3] == 0x60 ||
fpstate[3] == 0xe0)) fpstate[3] == 0xe0))
goto out; goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x05 ||
fpstate[3] == 0xe5))
goto out;
} else } else
goto out; goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs, if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs))) sizeof(fpregs)))
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t" __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
".chip 68k" "fmovel %1,%%fpcr\n\t"
: /* no outputs */ "fmovel %2,%%fpsr\n\t"
: "m" (*fpregs.f_fpregs), "fmovel %3,%%fpiar"
"m" (*fpregs.f_fpcntl)); : /* no outputs */
: "m" (fpregs.f_fpregs[0]),
"m" (fpregs.f_fpcntl[0]),
"m" (fpregs.f_fpcntl[1]),
"m" (fpregs.f_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (*fpregs.f_fpcntl));
}
} }
if (context_size && if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1, __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size)) context_size))
goto out; goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t" if (CPU_IS_COLDFIRE) {
".chip 68k" : : "m" (*fpstate)); __asm__ volatile ("frestore %0" : : "m" (*fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*fpstate));
}
err = 0; err = 0;
out: out:
@ -336,8 +394,12 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
regs->format = formatvec >> 12; regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff; regs->vector = formatvec & 0xfff;
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
__asm__ __volatile__ __asm__ __volatile__ (
(" movel %0,%/a0\n\t" #ifdef CONFIG_COLDFIRE
" movel %0,%/sp\n\t"
" bra ret_from_signal\n"
#else
" movel %0,%/a0\n\t"
" subl %1,%/a0\n\t" /* make room on stack */ " subl %1,%/a0\n\t" /* make room on stack */
" movel %/a0,%/sp\n\t" /* set stack pointer */ " movel %/a0,%/sp\n\t" /* set stack pointer */
/* move switch_stack and pt_regs */ /* move switch_stack and pt_regs */
@ -350,6 +412,7 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
"2: movel %4@+,%/a0@+\n\t" "2: movel %4@+,%/a0@+\n\t"
" dbra %1,2b\n\t" " dbra %1,2b\n\t"
" bral ret_from_signal\n" " bral ret_from_signal\n"
#endif
: /* no outputs, it doesn't ever return */ : /* no outputs, it doesn't ever return */
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1), : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
"n" (frame_offset), "a" (buf + fsize/4) "n" (frame_offset), "a" (buf + fsize/4)
@ -516,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
return; return;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fsave %0\n\t" __asm__ volatile ("fsave %0"
".chip 68k" : : "m" (*sc->sc_fpstate) : "memory");
: : "m" (*sc->sc_fpstate) : "memory"); } else {
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate) : "memory");
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0]; fpu_version = sc->sc_fpstate[0];
@ -530,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (*(unsigned short *) sc->sc_fpstate == 0x1f38) if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3; sc->sc_fpstate[0x38] |= 1 << 3;
} }
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t" if (CPU_IS_COLDFIRE) {
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
".chip 68k" "fmovel %%fpcr,%1\n\t"
: "=m" (*sc->sc_fpregs), "fmovel %%fpsr,%2\n\t"
"=m" (*sc->sc_fpcntl) "fmovel %%fpiar,%3"
: /* no inputs */ : "=m" (sc->sc_fpregs[0]),
: "memory"); "=m" (sc->sc_fpcntl[0]),
"=m" (sc->sc_fpcntl[1]),
"=m" (sc->sc_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
} }
} }
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs) static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{ {
unsigned char fpstate[FPCONTEXT_SIZE]; unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0; int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
int err = 0; int err = 0;
if (FPU_IS_EMU) { if (FPU_IS_EMU) {
@ -557,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
return err; return err;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fsave %0\n\t" __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
".chip 68k" } else {
: : "m" (*fpstate) : "memory"); __asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*fpstate) : "memory");
}
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate); err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs; fpregset_t fpregs;
if (!CPU_IS_060) if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1]; context_size = fpstate[1];
fpu_version = fpstate[0]; fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 && if (CPU_IS_020_OR_030 &&
@ -575,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (*(unsigned short *) fpstate == 0x1f38) if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3; fpstate[0x38] |= 1 << 3;
} }
__asm__ volatile (".chip 68k/68881\n\t" if (CPU_IS_COLDFIRE) {
"fmovemx %%fp0-%%fp7,%0\n\t" __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t" "fmovel %%fpcr,%1\n\t"
".chip 68k" "fmovel %%fpsr,%2\n\t"
: "=m" (*fpregs.f_fpregs), "fmovel %%fpiar,%3"
"=m" (*fpregs.f_fpcntl) : "=m" (fpregs.f_fpregs[0]),
: /* no inputs */ "=m" (fpregs.f_fpcntl[0]),
: "memory"); "=m" (fpregs.f_fpcntl[1]),
"=m" (fpregs.f_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
}
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs, err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs)); sizeof(fpregs));
} }
@ -679,8 +778,7 @@ static inline void push_cache (unsigned long vaddr)
"cpushl %%bc,(%0)\n\t" "cpushl %%bc,(%0)\n\t"
".chip 68k" ".chip 68k"
: : "a" (temp)); : : "a" (temp));
} } else if (!CPU_IS_COLDFIRE) {
else {
/* /*
* 68030/68020 have no writeback cache; * 68030/68020 have no writeback cache;
* still need to clear icache. * still need to clear icache.

View File

@ -1,4 +1,4 @@
#ifdef CONFIG_MMU #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#include "time_mm.c" #include "time_mm.c"
#else #else
#include "time_no.c" #include "time_no.c"

View File

@ -26,6 +26,9 @@
#define TICK_SIZE (tick_nsec / 1000) #define TICK_SIZE (tick_nsec / 1000)
/* machine dependent timer functions */
void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
static inline int set_rtc_mmss(unsigned long nowtime) static inline int set_rtc_mmss(unsigned long nowtime)
{ {
if (mach_set_clock_mmss) if (mach_set_clock_mmss)

View File

@ -706,6 +706,88 @@ create_atc_entry:
#endif /* CPU_M68020_OR_M68030 */ #endif /* CPU_M68020_OR_M68030 */
#endif /* !CONFIG_SUN3 */ #endif /* !CONFIG_SUN3 */
#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
#include <asm/mcfmmu.h>
/*
* The following table converts the FS encoding of a ColdFire
* exception stack frame into the error_code value needed by
* do_fault.
*/
static const unsigned char fs_err_code[] = {
0, /* 0000 */
0, /* 0001 */
0, /* 0010 */
0, /* 0011 */
1, /* 0100 */
0, /* 0101 */
0, /* 0110 */
0, /* 0111 */
2, /* 1000 */
3, /* 1001 */
2, /* 1010 */
0, /* 1011 */
1, /* 1100 */
1, /* 1101 */
0, /* 1110 */
0 /* 1111 */
};
static inline void access_errorcf(unsigned int fs, struct frame *fp)
{
unsigned long mmusr, addr;
unsigned int err_code;
int need_page_fault;
mmusr = mmu_read(MMUSR);
addr = mmu_read(MMUAR);
/*
* error_code:
* bit 0 == 0 means no page found, 1 means protection fault
* bit 1 == 0 means read, 1 means write
*/
switch (fs) {
case 5: /* 0101 TLB opword X miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
addr = fp->ptregs.pc;
break;
case 6: /* 0110 TLB extension word X miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
addr = fp->ptregs.pc + sizeof(long);
break;
case 10: /* 1010 TLB W miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
break;
case 14: /* 1110 TLB R miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
break;
default:
/* 0000 Normal */
/* 0001 Reserved */
/* 0010 Interrupt during debug service routine */
/* 0011 Reserved */
/* 0100 X Protection */
/* 0111 IFP in emulator mode */
/* 1000 W Protection*/
/* 1001 Write error*/
/* 1011 Reserved*/
/* 1100 R Protection*/
/* 1101 R Protection*/
/* 1111 OEP in emulator mode*/
need_page_fault = 1;
break;
}
if (need_page_fault) {
err_code = fs_err_code[fs];
if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
err_code |= 2; /* bit1 - write, bit0 - protection */
do_page_fault(&fp->ptregs, addr, err_code);
}
}
#endif /* CONFIG_COLDFIRE CONFIG_MMU */
asmlinkage void buserr_c(struct frame *fp) asmlinkage void buserr_c(struct frame *fp)
{ {
/* Only set esp0 if coming from user mode */ /* Only set esp0 if coming from user mode */
@ -716,6 +798,28 @@ asmlinkage void buserr_c(struct frame *fp)
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif #endif
#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
if (CPU_IS_COLDFIRE) {
unsigned int fs;
fs = (fp->ptregs.vector & 0x3) |
((fp->ptregs.vector & 0xc00) >> 8);
switch (fs) {
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xa:
case 0xd:
case 0xe:
case 0xf:
access_errorcf(fs, fp);
return;
default:
break;
}
}
#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
switch (fp->ptregs.format) { switch (fp->ptregs.format) {
#if defined (CONFIG_M68060) #if defined (CONFIG_M68060)
case 4: /* 68060 access error */ case 4: /* 68060 access error */

View File

@ -69,6 +69,7 @@ SECTIONS {
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
*(.text..lock) *(.text..lock)
*(.fixup)
. = ALIGN(16); /* Exception table */ . = ALIGN(16); /* Exception table */
__start___ex_table = .; __start___ex_table = .;
@ -161,6 +162,13 @@ SECTIONS {
_edata = . ; _edata = . ;
} > DATA } > DATA
.m68k_fixup : {
__start_fixup = .;
*(.m68k_fixup)
__stop_fixup = .;
} > DATA
NOTES > DATA
.init.text : { .init.text : {
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_begin = .; __init_begin = .;

View File

@ -31,7 +31,9 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
_sbss = .;
BSS_SECTION(0, 0, 0) BSS_SECTION(0, 0, 0)
_ebss = .;
_edata = .; /* End of data section */ _edata = .; /* End of data section */

View File

@ -44,7 +44,9 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
_sbss = .;
BSS_SECTION(0, 0, 0) BSS_SECTION(0, 0, 0)
_ebss = .;
_end = . ; _end = . ;

View File

@ -1,5 +1,14 @@
#ifdef CONFIG_MMU #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#include "vmlinux.lds_mm.S" PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS (7);
data PT_LOAD FLAGS (7);
}
#ifdef CONFIG_SUN3
#include "vmlinux-sun3.lds"
#else #else
#include "vmlinux.lds_no.S" #include "vmlinux-std.lds"
#endif
#else
#include "vmlinux-nommu.lds"
#endif #endif

View File

@ -1,10 +0,0 @@
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS (7);
data PT_LOAD FLAGS (7);
}
#ifdef CONFIG_SUN3
#include "vmlinux-sun3.lds"
#else
#include "vmlinux-std.lds"
#endif

View File

@ -6,9 +6,11 @@
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
memcpy.o memset.o memmove.o memcpy.o memset.o memmove.o
ifdef CONFIG_MMU lib-$(CONFIG_MMU) += string.o uaccess.o
lib-y += string.o uaccess.o checksum_mm.o lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o
else lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o
lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
ifndef CONFIG_GENERIC_CSUM
lib-y += checksum.o
endif endif

View File

@ -1,156 +0,0 @@
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
* Fixed some nasty bugs, causing some horrible crashes.
* A: At some points, the sum (%0) was used as
* length-counter instead of the length counter
* (%1). Thanks to Roman Hodek for pointing this out.
* B: GCC seems to mess up if one uses too many
* data-registers to hold input values and one tries to
* specify d0 and d1 as scratch registers. Letting gcc choose these
* registers itself solves the problem.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
of the assembly has to go. */
#include <linux/module.h>
#include <net/checksum.h>
static inline unsigned short from32to16(unsigned long x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
}
static unsigned long do_csum(const unsigned char * buff, int len)
{
int odd, count;
unsigned long result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
result = *buff;
len--;
buff++;
}
count = len >> 1; /* nr of 16-bit words.. */
if (count) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
count--;
len -= 2;
buff += 2;
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
unsigned long carry = 0;
do {
unsigned long w = *(unsigned long *) buff;
count--;
buff += 4;
result += carry;
result += w;
carry = (w > result);
} while (count);
result += carry;
result = (result & 0xffff) + (result >> 16);
}
if (len & 2) {
result += *(unsigned short *) buff;
buff += 2;
}
}
if (len & 1)
result += (*buff << 8);
result = from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out:
return result;
}
#ifdef CONFIG_COLDFIRE
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph,ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
#endif
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
result += (__force u32)sum;
if ((__force u32)sum > result)
result += 1;
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err)
{
if (csum_err) *csum_err = 0;
memcpy(dst, (__force const void *)src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -15,17 +15,17 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
asm volatile ("\n" asm volatile ("\n"
" tst.l %0\n" " tst.l %0\n"
" jeq 2f\n" " jeq 2f\n"
"1: moves.l (%1)+,%3\n" "1: "MOVES".l (%1)+,%3\n"
" move.l %3,(%2)+\n" " move.l %3,(%2)+\n"
" subq.l #1,%0\n" " subq.l #1,%0\n"
" jne 1b\n" " jne 1b\n"
"2: btst #1,%5\n" "2: btst #1,%5\n"
" jeq 4f\n" " jeq 4f\n"
"3: moves.w (%1)+,%3\n" "3: "MOVES".w (%1)+,%3\n"
" move.w %3,(%2)+\n" " move.w %3,(%2)+\n"
"4: btst #0,%5\n" "4: btst #0,%5\n"
" jeq 6f\n" " jeq 6f\n"
"5: moves.b (%1)+,%3\n" "5: "MOVES".b (%1)+,%3\n"
" move.b %3,(%2)+\n" " move.b %3,(%2)+\n"
"6:\n" "6:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
@ -68,17 +68,17 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
" tst.l %0\n" " tst.l %0\n"
" jeq 4f\n" " jeq 4f\n"
"1: move.l (%1)+,%3\n" "1: move.l (%1)+,%3\n"
"2: moves.l %3,(%2)+\n" "2: "MOVES".l %3,(%2)+\n"
"3: subq.l #1,%0\n" "3: subq.l #1,%0\n"
" jne 1b\n" " jne 1b\n"
"4: btst #1,%5\n" "4: btst #1,%5\n"
" jeq 6f\n" " jeq 6f\n"
" move.w (%1)+,%3\n" " move.w (%1)+,%3\n"
"5: moves.w %3,(%2)+\n" "5: "MOVES".w %3,(%2)+\n"
"6: btst #0,%5\n" "6: btst #0,%5\n"
" jeq 8f\n" " jeq 8f\n"
" move.b (%1)+,%3\n" " move.b (%1)+,%3\n"
"7: moves.b %3,(%2)+\n" "7: "MOVES".b %3,(%2)+\n"
"8:\n" "8:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
" .even\n" " .even\n"
@ -115,7 +115,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
return count; return count;
asm volatile ("\n" asm volatile ("\n"
"1: moves.b (%2)+,%4\n" "1: "MOVES".b (%2)+,%4\n"
" move.b %4,(%1)+\n" " move.b %4,(%1)+\n"
" jeq 2f\n" " jeq 2f\n"
" subq.l #1,%3\n" " subq.l #1,%3\n"
@ -152,7 +152,7 @@ long strnlen_user(const char __user *src, long n)
asm volatile ("\n" asm volatile ("\n"
"1: subq.l #1,%1\n" "1: subq.l #1,%1\n"
" jmi 3f\n" " jmi 3f\n"
"2: moves.b (%0)+,%2\n" "2: "MOVES".b (%0)+,%2\n"
" tst.b %2\n" " tst.b %2\n"
" jne 1b\n" " jne 1b\n"
" jra 4f\n" " jra 4f\n"
@ -188,15 +188,15 @@ unsigned long __clear_user(void __user *to, unsigned long n)
asm volatile ("\n" asm volatile ("\n"
" tst.l %0\n" " tst.l %0\n"
" jeq 3f\n" " jeq 3f\n"
"1: moves.l %2,(%1)+\n" "1: "MOVES".l %2,(%1)+\n"
"2: subq.l #1,%0\n" "2: subq.l #1,%0\n"
" jne 1b\n" " jne 1b\n"
"3: btst #1,%4\n" "3: btst #1,%4\n"
" jeq 5f\n" " jeq 5f\n"
"4: moves.w %2,(%1)+\n" "4: "MOVES".w %2,(%1)+\n"
"5: btst #0,%4\n" "5: btst #0,%4\n"
" jeq 7f\n" " jeq 7f\n"
"6: moves.b %2,(%1)\n" "6: "MOVES".b %2,(%1)\n"
"7:\n" "7:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
" .even\n" " .even\n"

View File

@ -4,6 +4,8 @@
obj-y := init.o obj-y := init.o
obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o obj-$(CONFIG_MMU) += cache.o fault.o
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o

View File

@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
void flush_icache_range(unsigned long address, unsigned long endaddr) void flush_icache_range(unsigned long address, unsigned long endaddr)
{ {
if (CPU_IS_COLDFIRE) {
if (CPU_IS_040_OR_060) { unsigned long start, end;
start = address & ICACHE_SET_MASK;
end = endaddr & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
address &= PAGE_MASK; address &= PAGE_MASK;
do { do {
@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
unsigned long start, end;
start = addr & ICACHE_SET_MASK;
end = (addr + len) & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
asm volatile ("nop\n\t" asm volatile ("nop\n\t"
".chip 68040\n\t" ".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t" "cpushp %%bc,(%0)\n\t"

View File

@ -24,6 +24,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/traps.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/io.h> #include <asm/io.h>
#ifdef CONFIG_ATARI #ifdef CONFIG_ATARI
@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable);
extern pmd_t *zero_pgtable; extern pmd_t *zero_pgtable;
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#define VECTORS &vectors[0]
#else
#define VECTORS _ramvec
#endif
void __init print_memmap(void)
{
#define UL(x) ((unsigned long) (x))
#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
" kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
" .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
" .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
" .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
" .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
MLK(VECTORS, VECTORS + 256),
MLM(KMAP_START, KMAP_END),
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_stext, _etext),
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(_sbss, _ebss));
}
void __init mem_init(void) void __init mem_init(void)
{ {
pg_data_t *pgdat; pg_data_t *pgdat;
@ -106,7 +139,7 @@ void __init mem_init(void)
} }
} }
#ifndef CONFIG_SUN3 #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
/* insert pointer tables allocated so far into the tablelist */ /* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir); init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) { for (i = 0; i < PTRS_PER_PGD; i++) {
@ -125,6 +158,7 @@ void __init mem_init(void)
codepages << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10)); initpages << (PAGE_SHIFT-10));
print_memmap();
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD

View File

@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
break; break;
} }
} else { } else {
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_DIRTY | _PAGE_READWRITE);
switch (cacheflag) { switch (cacheflag) {
case IOMAP_NOCACHE_SER: case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER: case IOMAP_NOCACHE_NONSER:

198
arch/m68k/mm/mcfmmu.c Normal file
View File

@ -0,0 +1,198 @@
/*
* Based upon linux/arch/m68k/mm/sun3mmu.c
* Based upon linux/arch/ppc/mm/mmu_context.c
*
* Implementations of mm routines specific to the Coldfire MMU.
*
* Copyright (c) 2008 Freescale Semiconductor, Inc.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/mcf_pgalloc.h>
#include <asm/tlbflush.h>
#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
extern unsigned long num_pages;
void free_initmem(void)
{
}
/*
* ColdFire paging_init derived from sun3.
*/
void __init paging_init(void)
{
pgd_t *pg_dir;
pte_t *pg_table;
unsigned long address, size;
unsigned long next_pgtable, bootmem_end;
unsigned long zones_size[MAX_NR_ZONES];
enum zone_type zone;
int i;
empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
pg_dir = swapper_pg_dir;
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
next_pgtable = (unsigned long) alloc_bootmem_pages(size);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
address = PAGE_OFFSET;
while (address < (unsigned long)high_memory) {
pg_table = (pte_t *) next_pgtable;
next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
pgd_val(*pg_dir) = (unsigned long) pg_table;
pg_dir++;
/* now change pg_table to kernel virtual addresses */
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
if (address >= (unsigned long) high_memory)
pte_val(pte) = 0;
set_pte(pg_table, pte);
address += PAGE_SIZE;
}
}
current->mm = NULL;
for (zone = 0; zone < MAX_NR_ZONES; zone++)
zones_size[zone] = 0x0;
zones_size[ZONE_DMA] = num_pages;
free_area_init(zones_size);
}
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
{
unsigned long flags, mmuar;
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int asid;
local_irq_save(flags);
mmuar = (dtlb) ? mmu_read(MMUAR) :
regs->pc + (extension_word * sizeof(long));
mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
if (!mm) {
local_irq_restore(flags);
return -1;
}
pgd = pgd_offset(mm, mmuar);
if (pgd_none(*pgd)) {
local_irq_restore(flags);
return -1;
}
pmd = pmd_offset(pgd, mmuar);
if (pmd_none(*pmd)) {
local_irq_restore(flags);
return -1;
}
pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
: pte_offset_map(pmd, mmuar);
if (pte_none(*pte) || !pte_present(*pte)) {
local_irq_restore(flags);
return -1;
}
if (write) {
if (!pte_write(*pte)) {
local_irq_restore(flags);
return -1;
}
set_pte(pte, pte_mkdirty(*pte));
}
set_pte(pte, pte_mkyoung(*pte));
asid = mm->context & 0xff;
if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
set_pte(pte, pte_wrprotect(*pte));
mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
(((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
>> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
if (dtlb)
mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
else
mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
local_irq_restore(flags);
return 0;
}
/*
* Initialize the context management stuff.
* The following was taken from arch/ppc/mmu_context.c
*/
void __init mmu_context_init(void)
{
/*
* Some processors have too few contexts to reserve one for
* init_mm, and require using context 0 for a normal task.
* Other processors reserve the use of context zero for the kernel.
* This code assumes FIRST_CONTEXT < 32.
*/
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
}
/*
* Steal a context from a task that has one at the moment.
* This is only used on 8xx and 4xx and we presently assume that
* they don't do SMP. If they do then thicfpgalloc.hs will have to check
* whether the MM we steal is in use.
* We also assume that this is only used on systems that don't
* use an MMU hash table - this is true for 8xx and 4xx.
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone was motivated to do it.
* -- paulus
*/
void steal_context(void)
{
struct mm_struct *mm;
/*
* free up context `next_mmu_context'
* if we shouldn't free context 0, don't...
*/
if (next_mmu_context < FIRST_CONTEXT)
next_mmu_context = FIRST_CONTEXT;
mm = context_mm[next_mmu_context];
flush_tlb_mm(mm);
destroy_context(mm);
}

View File

@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr)
void cache_clear (unsigned long paddr, int len) void cache_clear (unsigned long paddr, int len)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
flush_cf_bcache(0, DCACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
int tmp; int tmp;
/* /*
@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear);
void cache_push (unsigned long paddr, int len) void cache_push (unsigned long paddr, int len)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
flush_cf_bcache(0, DCACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
int tmp = PAGE_SIZE; int tmp = PAGE_SIZE;
/* /*

View File

@ -13,11 +13,17 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/coldfire.h> #include <asm/coldfire.h>
#include <asm/m54xxsim.h> #include <asm/m54xxsim.h>
#include <asm/mcfuart.h> #include <asm/mcfuart.h>
#include <asm/m54xxgpt.h> #include <asm/m54xxgpt.h>
#ifdef CONFIG_MMU
#include <asm/mmu_context.h>
#endif
/***************************************************************************/ /***************************************************************************/
@ -95,8 +101,49 @@ static void mcf54xx_reset(void)
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_MMU
unsigned long num_pages;
static void __init mcf54xx_bootmem_alloc(void)
{
unsigned long start_pfn;
unsigned long memstart;
/* _rambase and _ramend will be naturally page aligned */
m68k_memory[0].addr = _rambase;
m68k_memory[0].size = _ramend - _rambase;
/* compute total pages in system */
num_pages = (_ramend - _rambase) >> PAGE_SHIFT;
/* page numbers */
memstart = PAGE_ALIGN(_ramstart);
min_low_pfn = _rambase >> PAGE_SHIFT;
start_pfn = memstart >> PAGE_SHIFT;
max_low_pfn = _ramend >> PAGE_SHIFT;
high_memory = (void *)_ramend;
m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
/* setup bootmem data */
m68k_setup_node(0);
memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
min_low_pfn, max_low_pfn);
free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
}
#endif /* CONFIG_MMU */
/***************************************************************************/
void __init config_BSP(char *commandp, int size) void __init config_BSP(char *commandp, int size)
{ {
#ifdef CONFIG_MMU
mcf54xx_bootmem_alloc();
mmu_context_init();
#endif
mach_reset = mcf54xx_reset; mach_reset = mcf54xx_reset;
m54xx_uarts_init(); m54xx_uarts_init();
} }

View File

@ -14,12 +14,8 @@ obj-$(CONFIG_M68328) += config.o
obj-$(CONFIG_ROM) += romvec.o obj-$(CONFIG_ROM) += romvec.o
extra-y := head.o extra-y := head.o
extra-$(CONFIG_M68328) += bootlogo.rh head.o
$(obj)/bootlogo.rh: $(src)/bootlogo.h
perl $(src)/bootlogo.pl < $(src)/bootlogo.h > $(obj)/bootlogo.rh
$(obj)/head.o: $(obj)/$(head-y) $(obj)/head.o: $(obj)/$(head-y)
ln -sf $(head-y) $(obj)/head.o ln -sf $(head-y) $(obj)/head.o
clean-files := $(obj)/bootlogo.rh $(obj)/head.o $(head-y) clean-files := $(obj)/head.o $(head-y)

View File

@ -1,6 +1,6 @@
#define bootlogo_width 160 #define bootlogo_width 160
#define bootlogo_height 160 #define bootlogo_height 160
static unsigned char bootlogo_bits[] = { unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

View File

@ -1,10 +0,0 @@
$_ = join("", <>);
s/(0x[0-9a-f]{2})/sprintf("0x%.2x",ord(pack("b8",unpack("B8",chr(hex($1))))))/gei;
s/^ / .byte /gm;
s/[,};]+$//gm;
s/^static.*//gm;
print $_;

View File

@ -20,6 +20,9 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/MC68328.h> #include <asm/MC68328.h>
#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
#include "bootlogo.h"
#endif
/***************************************************************************/ /***************************************************************************/

View File

@ -24,19 +24,7 @@
.global _ramstart .global _ramstart
.global _ramend .global _ramend
.global penguin_bits .global bootlogo_bits
#ifdef CONFIG_PILOT
#define IMR 0xFFFFF304
.data
.align 16
penguin_bits:
#include "bootlogo.rh"
#endif
/*****************************************************************************/ /*****************************************************************************/
@ -185,9 +173,6 @@ L3:
moveq #79, %d7 moveq #79, %d7
movel %d0, _ramend movel %d0, _ramend
movel %a3, %d0
movel %d0, rom_length
pea 0 pea 0
pea env pea env
pea %sp@(4) pea %sp@(4)
@ -196,7 +181,7 @@ L3:
DBG_PUTC('H') DBG_PUTC('H')
#ifdef CONFIG_PILOT #ifdef CONFIG_PILOT
movel #penguin_bits, 0xFFFFFA00 movel #bootlogo_bits, 0xFFFFFA00
moveb #10, 0xFFFFFA05 moveb #10, 0xFFFFFA05
movew #160, 0xFFFFFA08 movew #160, 0xFFFFFA08
movew #160, 0xFFFFFA0A movew #160, 0xFFFFFA0A

View File

@ -8,7 +8,7 @@
.global _ramend .global _ramend
#ifdef CONFIG_INIT_LCD #ifdef CONFIG_INIT_LCD
.global splash_bits .global bootlogo_bits
#endif #endif
.data .data
@ -29,16 +29,11 @@ _ramend:
#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE) #define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
#ifdef CONFIG_INIT_LCD
splash_bits:
#include "bootlogo.rh"
#endif
.text .text
_start: _start:
_stext: movew #0x2700,%sr _stext: movew #0x2700,%sr
#ifdef CONFIG_INIT_LCD #ifdef CONFIG_INIT_LCD
movel #splash_bits, 0xfffffA00 /* LSSA */ movel #bootlogo_bits, 0xfffffA00 /* LSSA */
moveb #0x28, 0xfffffA05 /* LVPW */ moveb #0x28, 0xfffffA05 /* LVPW */
movew #0x280, 0xFFFFFa08 /* LXMAX */ movew #0x280, 0xFFFFFa08 /* LXMAX */
movew #0x1df, 0xFFFFFa0a /* LYMAX */ movew #0x1df, 0xFFFFFa0a /* LYMAX */

View File

@ -54,7 +54,6 @@ sw_usp:
.globl ret_from_signal .globl ret_from_signal
.globl sys_call_table .globl sys_call_table
.globl inthandler .globl inthandler
.globl fasthandler
enosys: enosys:
mov.l #sys_ni_syscall,%d3 mov.l #sys_ni_syscall,%d3
@ -63,6 +62,7 @@ enosys:
ENTRY(system_call) ENTRY(system_call)
SAVE_ALL_SYS SAVE_ALL_SYS
move #0x2000,%sr /* enable intrs again */ move #0x2000,%sr /* enable intrs again */
GET_CURRENT(%d2)
cmpl #NR_syscalls,%d0 cmpl #NR_syscalls,%d0
jcc enosys jcc enosys
@ -166,6 +166,7 @@ Lsignal_return:
*/ */
ENTRY(inthandler) ENTRY(inthandler)
SAVE_ALL_INT SAVE_ALL_INT
GET_CURRENT(%d2)
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */ andl #0x03fc,%d0 /* mask out vector only */
@ -191,7 +192,9 @@ ENTRY(resume)
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */ movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
RDUSP /* movel %usp,%a3 */ RDUSP /* movel %usp,%a3 */
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */ movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
#ifdef CONFIG_MMU
movel %a1,%a2 /* set new current */
#endif
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */ movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
WRUSP /* movel %a3,%usp */ WRUSP /* movel %a3,%usp */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */ movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */

View File

@ -3,7 +3,7 @@
/* /*
* head.S -- common startup code for ColdFire CPUs. * head.S -- common startup code for ColdFire CPUs.
* *
* (C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>. * (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
*/ */
/*****************************************************************************/ /*****************************************************************************/
@ -13,6 +13,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/coldfire.h> #include <asm/coldfire.h>
#include <asm/mcfsim.h> #include <asm/mcfsim.h>
#include <asm/mcfmmu.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
/*****************************************************************************/ /*****************************************************************************/
@ -135,6 +136,14 @@ _init_sp:
__HEAD __HEAD
#ifdef CONFIG_MMU
_start0:
jmp _start
.global kernel_pg_dir
.equ kernel_pg_dir,_start0
.equ .,_start0+0x1000
#endif
/* /*
* This is the codes first entry point. This is where it all * This is the codes first entry point. This is where it all
* begins... * begins...
@ -143,6 +152,9 @@ __HEAD
_start: _start:
nop /* filler */ nop /* filler */
movew #0x2700, %sr /* no interrupts */ movew #0x2700, %sr /* no interrupts */
movel #CACHE_INIT,%d0 /* disable cache */
movec %d0,%CACR
nop
#if defined(CONFIG_UBOOT) #if defined(CONFIG_UBOOT)
movel %sp,_init_sp /* save initial stack pointer */ movel %sp,_init_sp /* save initial stack pointer */
#endif #endif
@ -176,9 +188,6 @@ _start:
* it is very similar. Define the exact settings in the headers * it is very similar. Define the exact settings in the headers
* then the code here is the same for all. * then the code here is the same for all.
*/ */
movel #CACHE_INIT,%d0 /* invalidate whole cache */
movec %d0,%CACR
nop
movel #ACR0_MODE,%d0 /* set RAM region for caching */ movel #ACR0_MODE,%d0 /* set RAM region for caching */
movec %d0,%ACR0 movec %d0,%ACR0
movel #ACR1_MODE,%d0 /* anything else to cache? */ movel #ACR1_MODE,%d0 /* anything else to cache? */
@ -193,6 +202,26 @@ _start:
movec %d0,%CACR movec %d0,%CACR
nop nop
#ifdef CONFIG_MMU
/*
* Identity mapping for the kernel region.
*/
movel #(MMUBASE+1),%d0 /* enable MMUBAR registers */
movec %d0,%MMUBAR
movel #MMUOR_CA,%d0 /* clear TLB entries */
movel %d0,MMUOR
movel #0,%d0 /* set ASID to 0 */
movec %d0,%asid
movel #MMUCR_EN,%d0 /* Enable the identity map */
movel %d0,MMUCR
nop /* sync i-pipeline */
movel #_vstart,%a0 /* jump to "virtual" space */
jmp %a0@
_vstart:
#endif /* CONFIG_MMU */
#ifdef CONFIG_ROMFS_FS #ifdef CONFIG_ROMFS_FS
/* /*
* Move ROM filesystem above bss :-) * Move ROM filesystem above bss :-)
@ -238,6 +267,22 @@ _clear_bss:
lea init_thread_union,%a0 lea init_thread_union,%a0
lea THREAD_SIZE(%a0),%sp lea THREAD_SIZE(%a0),%sp
#ifdef CONFIG_MMU
.global m68k_cputype
.global m68k_mmutype
.global m68k_fputype
.global m68k_machtype
movel #CPU_COLDFIRE,%d0
movel %d0,m68k_cputype /* Mark us as a ColdFire */
movel #MMU_COLDFIRE,%d0
movel %d0,m68k_mmutype
movel #FPU_COLDFIRE,%d0
movel %d0,m68k_fputype
movel #MACH_M54XX,%d0
movel %d0,m68k_machtype /* Mark us as a 54xx machine */
lea init_task,%a2 /* Set "current" init task */
#endif
/* /*
* Assember start up done, start code proper. * Assember start up done, start code proper.
*/ */

View File

@ -98,16 +98,19 @@ static struct irqaction mcfslt_timer_irq = {
static cycle_t mcfslt_read_clk(struct clocksource *cs) static cycle_t mcfslt_read_clk(struct clocksource *cs)
{ {
unsigned long flags; unsigned long flags;
u32 cycles; u32 cycles, scnt;
u16 scnt;
local_irq_save(flags); local_irq_save(flags);
scnt = __raw_readl(TA(MCFSLT_SCNT)); scnt = __raw_readl(TA(MCFSLT_SCNT));
cycles = mcfslt_cnt; cycles = mcfslt_cnt;
if (__raw_readl(TA(MCFSLT_SSR)) & MCFSLT_SSR_TE) {
cycles += mcfslt_cycles_per_jiffy;
scnt = __raw_readl(TA(MCFSLT_SCNT));
}
local_irq_restore(flags); local_irq_restore(flags);
/* subtract because slice timers count down */ /* subtract because slice timers count down */
return cycles - scnt; return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt);
} }
static struct clocksource mcfslt_clk = { static struct clocksource mcfslt_clk = {