arch: remove score port

The Sunplus S+core architecture was added in 2009 by Chen Liqin,
who has been co-maintaining it with Lennox Wu <lennox.wu@gmail.com>
since then, but after they both left the company, nobody else has shown
any interest in the port and it has seen almost no activity other than
tree-wide changes.

The gcc port was removed a few years ago due to the inactivity.

While the sunplus website still advertises products with unspecified
RISC cores that might be S+core based, it's very clear that the Linux
port is completely abandoned at this point.

This removes all files related to the architecture.

Acked-by: Lennox Wu <lennox.wu@gmail.com>
Link: http://www.sunplus.com/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2018-03-07 21:46:52 +01:00
parent 553b085c20
commit b8c9c8f019
130 changed files with 0 additions and 7716 deletions

View File

@ -12235,13 +12235,6 @@ F: include/linux/sched.h
F: include/uapi/linux/sched.h F: include/uapi/linux/sched.h
F: include/linux/wait.h F: include/linux/wait.h
SCORE ARCHITECTURE
M: Chen Liqin <liqin.linux@gmail.com>
M: Lennox Wu <lennox.wu@gmail.com>
W: http://www.sunplus.com
S: Supported
F: arch/score/
SCR24X CHIP CARD INTERFACE DRIVER SCR24X CHIP CARD INTERFACE DRIVER
M: Lubomir Rintel <lkundrak@v3.sk> M: Lubomir Rintel <lkundrak@v3.sk>
S: Supported S: Supported

View File

@ -1,108 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
menu "Machine selection"
config SCORE
def_bool y
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_ATOMIC64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS
choice
prompt "System type"
default MACH_SPCT6600
config ARCH_SCORE7
bool "SCORE7 processor"
select SYS_SUPPORTS_32BIT_KERNEL
config MACH_SPCT6600
bool "SPCT6600 series based machines"
select SYS_SUPPORTS_32BIT_KERNEL
config SCORE_SIM
bool "Score simulator"
select SYS_SUPPORTS_32BIT_KERNEL
endchoice
endmenu
config NO_DMA
bool
default y
config RWSEM_GENERIC_SPINLOCK
def_bool y
config GENERIC_HWEIGHT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
menu "Kernel type"
config 32BIT
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
source "mm/Kconfig"
config MEMORY_START
hex
default 0xa0000000
source "kernel/Kconfig.hz"
source "kernel/Kconfig.preempt"
endmenu
config RWSEM_GENERIC_SPINLOCK
def_bool y
config LOCKDEP_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
def_bool y
source "init/Kconfig"
source "kernel/Kconfig.freezer"
config MMU
def_bool y
menu "Executable file formats"
source "fs/Kconfig.binfmt"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
source "arch/score/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
config NO_IOMEM
def_bool y

View File

@ -1,29 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug"
config CMDLINE
string "Default kernel command string"
default ""
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
other cases you can specify kernel args so that you don't have
to set them up in board prom initialization routines.
config RUNTIME_DEBUG
bool "Enable run-time debugging"
depends on DEBUG_KERNEL
help
If you say Y here, some debugging macros will do run-time checking.
If you say N here, those macros will mostly turn to no-ops. See
include/asm-score/debug.h for debugging macros.
If unsure, say N.
endmenu

View File

@ -1,44 +0,0 @@
#
# arch/score/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
KBUILD_DEFCONFIG := spct6600_defconfig
CROSS_COMPILE := score-linux-
#
# CPU-dependent compiler/assembler options for optimization.
#
cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
-D__linux__ -ffunction-sections -ffreestanding
#
# Board-dependent options and extra files
#
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_AFLAGS_MODULE +=
KBUILD_CFLAGS_MODULE +=
LDFLAGS += --oformat elf32-littlescore
LDFLAGS_vmlinux += -G0 -static -nostdlib
head-y := arch/score/kernel/head.o
libs-y += arch/score/lib/
core-y += arch/score/kernel/ arch/score/mm/
boot := arch/score/boot
vmlinux.bin: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
@$(MAKE) $(clean)=$(boot)
define archhelp
echo ' vmlinux.bin - Raw binary boot image'
echo
echo ' These will be default as appropriate for a configured platform.'
endef

View File

@ -1,15 +0,0 @@
#
# arch/score/boot/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
targets := vmlinux.bin
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
clean-files += vmlinux.bin

View File

@ -1,84 +0,0 @@
CONFIG_HZ_100=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=12
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_ARPD=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_STALDRV=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=8192
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_CRYPTD=y
CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MICHAEL_MIC=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_LIBCRC32C=y

View File

@ -1,13 +0,0 @@
generic-y += barrier.h
generic-y += current.h
generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += serial.h
generic-y += word-at-a-time.h
generic-y += kprobes.h

View File

@ -1 +0,0 @@
#include <generated/asm-offsets.h>

View File

@ -1,162 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_ASMMACRO_H
#define _ASM_SCORE_ASMMACRO_H
#include <asm/asm-offsets.h>
#ifdef __ASSEMBLY__
.macro SAVE_ALL
mfcr r30, cr0
mv r31, r0
nop
/* if UMs == 1, change stack. */
slli.c r30, r30, 28
bpl 1f
la r31, kernelsp
lw r31, [r31]
1:
mv r30, r0
addri r0, r31, -PT_SIZE
sw r30, [r0, PT_R0]
.set r1
sw r1, [r0, PT_R1]
.set nor1
sw r2, [r0, PT_R2]
sw r3, [r0, PT_R3]
sw r4, [r0, PT_R4]
sw r5, [r0, PT_R5]
sw r6, [r0, PT_R6]
sw r7, [r0, PT_R7]
sw r8, [r0, PT_R8]
sw r9, [r0, PT_R9]
sw r10, [r0, PT_R10]
sw r11, [r0, PT_R11]
sw r12, [r0, PT_R12]
sw r13, [r0, PT_R13]
sw r14, [r0, PT_R14]
sw r15, [r0, PT_R15]
sw r16, [r0, PT_R16]
sw r17, [r0, PT_R17]
sw r18, [r0, PT_R18]
sw r19, [r0, PT_R19]
sw r20, [r0, PT_R20]
sw r21, [r0, PT_R21]
sw r22, [r0, PT_R22]
sw r23, [r0, PT_R23]
sw r24, [r0, PT_R24]
sw r25, [r0, PT_R25]
sw r25, [r0, PT_R25]
sw r26, [r0, PT_R26]
sw r27, [r0, PT_R27]
sw r28, [r0, PT_R28]
sw r29, [r0, PT_R29]
orri r28, r0, 0x1fff
li r31, 0x00001fff
xor r28, r28, r31
mfcehl r30, r31
sw r30, [r0, PT_CEH]
sw r31, [r0, PT_CEL]
mfcr r31, cr0
sw r31, [r0, PT_PSR]
mfcr r31, cr1
sw r31, [r0, PT_CONDITION]
mfcr r31, cr2
sw r31, [r0, PT_ECR]
mfcr r31, cr5
srli r31, r31, 1
slli r31, r31, 1
sw r31, [r0, PT_EPC]
.endm
.macro RESTORE_ALL_AND_RET
mfcr r30, cr0
srli r30, r30, 1
slli r30, r30, 1
mtcr r30, cr0
nop
nop
nop
nop
nop
.set r1
ldis r1, 0x00ff
and r30, r30, r1
not r1, r1
lw r31, [r0, PT_PSR]
and r31, r31, r1
.set nor1
or r31, r31, r30
mtcr r31, cr0
nop
nop
nop
nop
nop
lw r30, [r0, PT_CONDITION]
mtcr r30, cr1
nop
nop
nop
nop
nop
lw r30, [r0, PT_CEH]
lw r31, [r0, PT_CEL]
mtcehl r30, r31
.set r1
lw r1, [r0, PT_R1]
.set nor1
lw r2, [r0, PT_R2]
lw r3, [r0, PT_R3]
lw r4, [r0, PT_R4]
lw r5, [r0, PT_R5]
lw r6, [r0, PT_R6]
lw r7, [r0, PT_R7]
lw r8, [r0, PT_R8]
lw r9, [r0, PT_R9]
lw r10, [r0, PT_R10]
lw r11, [r0, PT_R11]
lw r12, [r0, PT_R12]
lw r13, [r0, PT_R13]
lw r14, [r0, PT_R14]
lw r15, [r0, PT_R15]
lw r16, [r0, PT_R16]
lw r17, [r0, PT_R17]
lw r18, [r0, PT_R18]
lw r19, [r0, PT_R19]
lw r20, [r0, PT_R20]
lw r21, [r0, PT_R21]
lw r22, [r0, PT_R22]
lw r23, [r0, PT_R23]
lw r24, [r0, PT_R24]
lw r25, [r0, PT_R25]
lw r26, [r0, PT_R26]
lw r27, [r0, PT_R27]
lw r28, [r0, PT_R28]
lw r29, [r0, PT_R29]
lw r30, [r0, PT_EPC]
lw r0, [r0, PT_R0]
mtcr r30, cr5
rte
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_ASMMACRO_H */

View File

@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_ATOMIC_H
#define _ASM_SCORE_ATOMIC_H
#include <asm/cmpxchg.h>
#include <asm-generic/atomic.h>
#endif /* _ASM_SCORE_ATOMIC_H */

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_BITOPS_H
#define _ASM_SCORE_BITOPS_H
#include <asm/byteorder.h> /* swab32 */
#include <asm/barrier.h>
#include <asm-generic/bitops.h>
#include <asm-generic/bitops/__fls.h>
#endif /* _ASM_SCORE_BITOPS_H */

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_BUG_H
#define _ASM_SCORE_BUG_H
#include <asm-generic/bug.h>
struct pt_regs;
extern void __die(const char *, struct pt_regs *, const char *,
const char *, unsigned long) __attribute__((noreturn));
extern void __die_if_kernel(const char *, struct pt_regs *, const char *,
const char *, unsigned long);
#define die(msg, regs) \
__die(msg, regs, __FILE__ ":", __func__, __LINE__)
#define die_if_kernel(msg, regs) \
__die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__)
#endif /* _ASM_SCORE_BUG_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_BUGS_H
#define _ASM_SCORE_BUGS_H
#include <asm-generic/bugs.h>
#endif /* _ASM_SCORE_BUGS_H */

View File

@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_CACHE_H
#define _ASM_SCORE_CACHE_H
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif /* _ASM_SCORE_CACHE_H */

View File

@ -1,49 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_CACHEFLUSH_H
#define _ASM_SCORE_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma,
unsigned long page, unsigned long pfn);
extern void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_all(void);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);
#define PG_dcache_dirty PG_arch_1
#define flush_cache_dup_mm(mm) do {} while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
#define flush_cache_vmap(start, end) do {} while (0)
#define flush_cache_vunmap(start, end) do {} while (0)
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
if (vma->vm_flags & VM_EXEC) {
void *v = page_address(page);
flush_icache_range((unsigned long) v,
(unsigned long) v + PAGE_SIZE);
}
}
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if ((vma->vm_flags & VM_EXEC)) \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
} while (0)
#endif /* _ASM_SCORE_CACHEFLUSH_H */

View File

@ -1,244 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_CHECKSUM_H
#define _ASM_SCORE_CHECKSUM_H
#include <linux/in6.h>
#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
unsigned int csum_partial(const void *buff, int len, __wsum sum);
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *csum_err);
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
sum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum) -1; /* invalid checksum */
}
return sum;
}
#define csum_partial_copy_nocheck csum_partial_copy
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
{
/* the while loop is unnecessary really, it's always enough with two
iterations */
__asm__ __volatile__(
".set volatile\n\t"
".set\tr1\n\t"
"slli\tr1,%0, 16\n\t"
"add\t%0,%0, r1\n\t"
"cmp.c\tr1, %0\n\t"
"srli\t%0, %0, 16\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:ldi\tr30, 0xffff\n\t"
"xor\t%0, %0, r30\n\t"
"slli\t%0, %0, 16\n\t"
"srli\t%0, %0, 16\n\t"
".set\tnor1\n\t"
".set optimize\n\t"
: "=r" (sum)
: "0" (sum));
return sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
unsigned long dummy;
__asm__ __volatile__(
".set volatile\n\t"
".set\tnor1\n\t"
"lw\t%0, [%1]\n\t"
"subri\t%2, %2, 4\n\t"
"slli\t%2, %2, 2\n\t"
"lw\t%3, [%1, 4]\n\t"
"add\t%2, %2, %1\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"lw\t%3, [%1, 8]\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"lw\t%3, [%1, 12]\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n"
"1:\tlw\t%3, [%1, 16]\n\t"
"addi\t%1, 4\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t2f\n\t"
"addi\t%0, 0x1\n"
"2:cmp.c\t%2, %1\n\t"
"bne\t1b\n\t"
".set\tr1\n\t"
".set optimize\n\t"
: "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
: "1" (iph), "2" (ihl));
return csum_fold(sum);
}
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
unsigned long tmp = (len + proto) << 8;
__asm__ __volatile__(
".set volatile\n\t"
"add\t%0, %0, %2\n\t"
"cmp.c\t%2, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %4\n\t"
"cmp.c\t%4, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
".set optimize\n\t"
: "=r" (sum)
: "0" (daddr), "r"(saddr),
"r" (tmp),
"r" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline unsigned short ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto, __wsum sum)
{
__asm__ __volatile__(
".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
"add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
"cmp.c\t%5, %0\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %6\t\t\t# csum\n\t"
"cmp.c\t%6, %0\n\t"
"lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"1:lw\t%1, [%2, 4]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%2,8]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%2, 12]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0,%1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%3, 0]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%3, 4]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%3, 8]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"lw\t%1, [%3, 12]\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %1\n\t"
"cmp.c\t%1, %0\n\t"
"bleu 1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
".set\toptimize"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
return csum_fold(sum);
}
#endif /* _ASM_SCORE_CHECKSUM_H */

View File

@ -1,48 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_CMPXCHG_H
#define _ASM_SCORE_CMPXCHG_H
#include <linux/irqflags.h>
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
static inline
unsigned long __xchg(volatile unsigned long *m, unsigned long val)
{
unsigned long retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags);
return retval;
}
#define xchg(ptr, v) \
((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
(unsigned long)(v)))
static inline unsigned long __cmpxchg(volatile unsigned long *m,
unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags);
return retval;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
(unsigned long)(o), \
(unsigned long)(n)))
#include <asm-generic/cmpxchg-local.h>
#endif /* _ASM_SCORE_CMPXCHG_H */

View File

@ -1,29 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_DELAY_H
#define _ASM_SCORE_DELAY_H
#include <asm-generic/param.h>
static inline void __delay(unsigned long loops)
{
/* 3 cycles per loop. */
__asm__ __volatile__ (
"1:\tsubi\t%0, 3\n\t"
"cmpz.c\t%0\n\t"
"ble\t1b\n\t"
: "=r" (loops)
: "0" (loops));
}
static inline void __udelay(unsigned long usecs)
{
unsigned long loops_per_usec;
loops_per_usec = (loops_per_jiffy * HZ) / 1000000;
__delay(usecs * loops_per_usec);
}
#define udelay(usecs) __udelay(usecs)
#endif /* _ASM_SCORE_DELAY_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_DEVICE_H
#define _ASM_SCORE_DEVICE_H
#include <asm-generic/device.h>
#endif /* _ASM_SCORE_DEVICE_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_DIV64_H
#define _ASM_SCORE_DIV64_H
#include <asm-generic/div64.h>
#endif /* _ASM_SCORE_DIV64_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_DMA_H
#define _ASM_SCORE_DMA_H
#include <asm/io.h>
#define MAX_DMA_ADDRESS (0)
#endif /* _ASM_SCORE_DMA_H */

View File

@ -1,98 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_ELF_H
#define _ASM_SCORE_ELF_H
#include <linux/ptrace.h>
#define EM_SCORE7 135
/* Relocation types. */
#define R_SCORE_NONE 0
#define R_SCORE_HI16 1
#define R_SCORE_LO16 2
#define R_SCORE_BCMP 3
#define R_SCORE_24 4
#define R_SCORE_PC19 5
#define R_SCORE16_11 6
#define R_SCORE16_PC8 7
#define R_SCORE_ABS32 8
#define R_SCORE_ABS16 9
#define R_SCORE_DUMMY2 10
#define R_SCORE_GP15 11
#define R_SCORE_GNU_VTINHERIT 12
#define R_SCORE_GNU_VTENTRY 13
#define R_SCORE_GOT15 14
#define R_SCORE_GOT_LO16 15
#define R_SCORE_CALL15 16
#define R_SCORE_GPREL32 17
#define R_SCORE_REL32 18
#define R_SCORE_DUMMY_HI16 19
#define R_SCORE_IMM30 20
#define R_SCORE_IMM32 21
/* ELF register definitions */
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/* Score does not have fp regs. */
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t;
#define elf_check_arch(x) ((x)->e_machine == EM_SCORE7)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_SCORE7
struct task_struct;
struct pt_regs;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
_r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
_r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
_r->regs[30] = _r->regs[31] = 0; \
} while (0)
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
#endif /* _ASM_SCORE_ELF_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_EMERGENCY_RESTART_H
#define _ASM_SCORE_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_SCORE_EMERGENCY_RESTART_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_EXEC_H
#define _ASM_SCORE_EXEC_H
extern unsigned long arch_align_stack(unsigned long sp);
#endif /* _ASM_SCORE_EXEC_H */

View File

@ -1,83 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_FIXMAP_H
#define _ASM_SCORE_FIXMAP_H
#include <asm/page.h>
#define PHY_RAM_BASE 0x00000000
#define PHY_IO_BASE 0x10000000
#define VIRTUAL_RAM_BASE 0xa0000000
#define VIRTUAL_IO_BASE 0xb0000000
#define RAM_SPACE_SIZE 0x10000000
#define IO_SPACE_SIZE 0x10000000
/* Kernel unmapped, cached 512MB */
#define KSEG1 0xa0000000
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
__end_of_fixed_addresses
};
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) \
((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
return __virt_to_fix(vaddr);
}
#endif /* _ASM_SCORE_FIXMAP_H */

View File

@ -1,4 +0,0 @@
#ifndef _ASM_SCORE_FTRACE_H
#define _ASM_SCORE_FTRACE_H
#endif /* _ASM_SCORE_FTRACE_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_FUTEX_H
#define _ASM_SCORE_FUTEX_H
#include <asm-generic/futex.h>
#endif /* _ASM_SCORE_FUTEX_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_HARDIRQ_H
#define _ASM_SCORE_HARDIRQ_H
#include <asm-generic/hardirq.h>
#endif /* _ASM_SCORE_HARDIRQ_H */

View File

@ -1,4 +0,0 @@
#ifndef _ASM_SCORE_HW_IRQ_H
#define _ASM_SCORE_HW_IRQ_H
#endif /* _ASM_SCORE_HW_IRQ_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_IO_H
#define _ASM_SCORE_IO_H
#include <asm-generic/io.h>
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif /* _ASM_SCORE_IO_H */

View File

@ -1,26 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_IRQ_H
#define _ASM_SCORE_IRQ_H
#define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000
#define VECTOR_ADDRESS_OFFSET_MODE4 0
#define VECTOR_ADDRESS_OFFSET_MODE16 1
#define DEBUG_VECTOR_SIZE (0x4)
#define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc)
#define GENERAL_VECTOR_SIZE (0x10)
#define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200)
#define NR_IRQS 64
#define IRQ_VECTOR_SIZE (0x10)
#define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210)
#define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0)
#define irq_canonicalize(irq) (irq)
#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */
extern void interrupt_exception_vector(void);
#endif /* _ASM_SCORE_IRQ_H */

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_IRQ_REGS_H
#define _ASM_SCORE_IRQ_REGS_H
#include <linux/thread_info.h>
static inline struct pt_regs *get_irq_regs(void)
{
return current_thread_info()->regs;
}
#endif /* _ASM_SCORE_IRQ_REGS_H */

View File

@ -1,121 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_IRQFLAGS_H
#define _ASM_SCORE_IRQFLAGS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile(
" mfcr r8, cr0 \n"
" nop \n"
" nop \n"
" mv %0, r8 \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" ldi r9, 0x1 \n"
" and %0, %0, r9 \n"
: "=r" (flags)
:
: "r8", "r9");
return flags;
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile(
" mfcr r8, cr0 \n"
" li r9, 0xfffffffe \n"
" nop \n"
" mv %0, r8 \n"
" and r8, r8, r9 \n"
" mtcr r8, cr0 \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
: "=r" (flags)
:
: "r8", "r9", "memory");
return flags;
}
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(
" mfcr r8, cr0 \n"
" ldi r9, 0x1 \n"
" and %0, %0, r9 \n"
" or r8, r8, %0 \n"
" mtcr r8, cr0 \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
:
: "r"(flags)
: "r8", "r9", "memory");
}
static inline void arch_local_irq_enable(void)
{
asm volatile(
" mfcr r8,cr0 \n"
" nop \n"
" nop \n"
" ori r8,0x1 \n"
" mtcr r8,cr0 \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
:
:
: "r8", "memory");
}
static inline void arch_local_irq_disable(void)
{
asm volatile(
" mfcr r8,cr0 \n"
" nop \n"
" nop \n"
" srli r8,r8,1 \n"
" slli r8,r8,1 \n"
" mtcr r8,cr0 \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
:
:
: "r8", "memory");
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & 1);
}
static inline bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_IRQFLAGS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_KDEBUG_H
#define _ASM_SCORE_KDEBUG_H
#include <asm-generic/kdebug.h>
#endif /* _ASM_SCORE_KDEBUG_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_KMAP_TYPES_H
#define _ASM_SCORE_KMAP_TYPES_H
#include <asm-generic/kmap_types.h>
#endif /* _ASM_SCORE_KMAP_TYPES_H */

View File

@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_LINKAGE_H
#define _ASM_SCORE_LINKAGE_H
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
#endif /* _ASM_SCORE_LINKAGE_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_LOCAL_H
#define _ASM_SCORE_LOCAL_H
#include <asm-generic/local.h>
#endif /* _ASM_SCORE_LOCAL_H */

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_MMU_H
#define _ASM_SCORE_MMU_H
typedef unsigned long mm_context_t;
#endif /* _ASM_SCORE_MMU_H */

View File

@ -1,116 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_MMU_CONTEXT_H
#define _ASM_SCORE_MMU_CONTEXT_H
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/slab.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/scoreregs.h>
/*
* For the fast tlb miss handlers, we keep a per cpu array of pointers
* to the current pgd for each processor. Also, the proc. id is stuffed
* into the context register.
*/
extern unsigned long asid_cache;
extern unsigned long pgd_current;
#define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd))
#define TLBMISS_HANDLER_SETUP() \
do { \
write_c0_context(0); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \
} while (0)
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
#define ASID_VERSION_MASK 0xfffff000
#define ASID_FIRST_VERSION 0x1000
/* PEVN --------- VPN ---------- --ASID--- -NA- */
/* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */
/* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */
#define ASID_INC 0x10
#define ASID_MASK 0xff0
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{}
static inline void
get_new_mmu_context(struct mm_struct *mm)
{
unsigned long asid = asid_cache + ASID_INC;
if (!(asid & ASID_MASK)) {
local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
}
mm->context = asid;
asid_cache = asid;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = 0;
return 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
if ((next->context ^ asid_cache) & ASID_VERSION_MASK)
get_new_mmu_context(next);
pevn_set(next->context);
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
local_irq_restore(flags);
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{}
static inline void
deactivate_mm(struct task_struct *task, struct mm_struct *mm)
{}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
get_new_mmu_context(next);
pevn_set(next->context);
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
local_irq_restore(flags);
}
#endif /* _ASM_SCORE_MMU_CONTEXT_H */

View File

@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_MODULE_H
#define _ASM_SCORE_MODULE_H
#include <linux/list.h>
#include <asm/extable.h>
#include <asm-generic/module.h>
struct mod_arch_specific {
/* Data Bus Error exception tables */
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
/* Given an address, look for it in the exception tables. */
#ifdef CONFIG_MODULES
const struct exception_table_entry *search_module_dbetables(unsigned long addr);
#else
static inline const struct exception_table_entry
*search_module_dbetables(unsigned long addr)
{
return NULL;
}
#endif
#define MODULE_PROC_FAMILY "SCORE7"
#define MODULE_KERNEL_TYPE "32BIT "
#define MODULE_KERNEL_SMTC ""
#define MODULE_ARCH_VERMAGIC \
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC
#endif /* _ASM_SCORE_MODULE_H */

View File

@ -1,94 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PAGE_H
#define _ASM_SCORE_PAGE_H
#include <linux/pfn.h>
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
/*
* PAGE_OFFSET -- the first address of the first page of memory. When not
* using MMU this corresponds to the first free page in physical memory (aligned
* on a page boundary).
*/
#define PAGE_OFFSET (0xA0000000UL)
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
#define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE)
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
extern unsigned long max_pfn;
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn))
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#endif /* __KERNEL__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* _ASM_SCORE_PAGE_H */

View File

@ -1,4 +0,0 @@
#ifndef _ASM_SCORE_PCI_H
#define _ASM_SCORE_PCI_H
#endif /* _ASM_SCORE_PCI_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PERCPU_H
#define _ASM_SCORE_PERCPU_H
#include <asm-generic/percpu.h>
#endif /* _ASM_SCORE_PERCPU_H */

View File

@ -1,86 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PGALLOC_H
#define _ASM_SCORE_PGALLOC_H
#include <linux/mm.h>
#include <linux/highmem.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte)
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGD_ORDER);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
if (!pgtable_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
return pte;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, PTE_ORDER);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_pages(pte, PTE_ORDER);
}
#define __pte_free_tlb(tlb, pte, buf) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
#define check_pgt_cache() do {} while (0)
#endif /* _ASM_SCORE_PGALLOC_H */

View File

@ -1,25 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PGTABLE_BITS_H
#define _ASM_SCORE_PGTABLE_BITS_H
#define _PAGE_ACCESSED (1<<5) /* implemented in software */
#define _PAGE_READ (1<<6) /* implemented in software */
#define _PAGE_WRITE (1<<7) /* implemented in software */
#define _PAGE_PRESENT (1<<9) /* implemented in software */
#define _PAGE_MODIFIED (1<<10) /* implemented in software */
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
#define _PAGE_SILENT_READ (1<<1) /* synonym */
#define _PAGE_DIRTY (1<<2) /* Write bit */
#define _PAGE_SILENT_WRITE (1<<2)
#define _PAGE_CACHE (1<<3) /* cache */
#define _CACHE_MASK (1<<3)
#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */
#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
#define _PAGE_CHG_MASK \
(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE)
#endif /* _ASM_SCORE_PGTABLE_BITS_H */

View File

@ -1,270 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PGTABLE_H
#define _ASM_SCORE_PGTABLE_H
#include <linux/const.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include <asm/pgtable-bits.h>
extern void load_pgd(unsigned long pg_dir);
extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT 22
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/*
* Entries per page directory level: we use two-level, so
* we don't really have any PUD/PMD directory physically.
*/
#define PGD_ORDER 0
#define PTE_ORDER 0
#define PTRS_PER_PGD 1024
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START (0xc0000000UL)
#define PKMAP_BASE (0xfd000000UL)
#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE)
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
__FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
/*
* Empty pgd/pmd entries point to the invalid_pte_table.
*/
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == (unsigned long) invalid_pte_table;
}
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT))
#define pfn_pte(pfn, prot) \
__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define __pgd_offset(address) pgd_index(address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte)})
#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
#define pmd_phys(pmd) __pa((void *)pmd_val(pmd))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
#define pte_clear(mm, addr, xp) \
do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
#define pgd_present(pgd) (1)
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp) do { } while (0)
#define kern_addr_valid(addr) (1)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_CACHE)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_CACHE)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
__WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
prot = (prot & ~_CACHE_MASK);
return __pgprot(prot);
}
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ((x).val >> 10)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)})
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + \
(((unsigned long)(vaddr)) & zero_page_mask))))
#define pgtable_cache_init() do {} while (0)
#define arch_enter_lazy_cpu_mode() do {} while (0)
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & _PAGE_WRITE;
}
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_MODIFIED;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
#define pte_special(pte) (0)
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
pte_val(pte) |= _PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_MODIFIED;
if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) |= _PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
if (pte_val(pte) & _PAGE_READ)
pte_val(pte) |= _PAGE_SILENT_READ;
return pte;
}
#define set_pmd(pmdptr, pmdval) \
do { *(pmdptr) = (pmdval); } while (0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
extern unsigned long pgd_current;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
extern void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h>
void setup_memory(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_PGTABLE_H */

View File

@ -1,104 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PROCESSOR_H
#define _ASM_SCORE_PROCESSOR_H
#include <linux/cpumask.h>
#include <linux/threads.h>
#include <asm/segment.h>
struct task_struct;
/*
* System setup and hardware flags..
*/
extern void (*cpu_wait)(void);
extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp);
extern unsigned long get_wchan(struct task_struct *p);
/*
* Return current * instruction pointer ("program counter").
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#define cpu_relax() barrier()
#define release_thread(thread) do {} while (0)
/*
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x7fff8000UL
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE))
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
#endif
/*
* If you change thread_struct remember to change the #defines below too!
*/
struct thread_struct {
unsigned long reg0, reg2, reg3;
unsigned long reg12, reg13, reg14, reg15, reg16;
unsigned long reg17, reg18, reg19, reg20, reg21;
unsigned long cp0_psr;
unsigned long cp0_ema; /* Last user fault */
unsigned long cp0_badvaddr; /* Last user fault */
unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long error_code;
unsigned long trap_no;
unsigned long mflags;
unsigned long reg29;
unsigned long single_step;
unsigned long ss_nextcnt;
unsigned long insn1_type;
unsigned long addr1;
unsigned long insn1;
unsigned long insn2_type;
unsigned long addr2;
unsigned long insn2;
mm_segment_t current_ds;
};
#define INIT_THREAD { \
.reg0 = 0, \
.reg2 = 0, \
.reg3 = 0, \
.reg12 = 0, \
.reg13 = 0, \
.reg14 = 0, \
.reg15 = 0, \
.reg16 = 0, \
.reg17 = 0, \
.reg18 = 0, \
.reg19 = 0, \
.reg20 = 0, \
.reg21 = 0, \
.cp0_psr = 0, \
.error_code = 0, \
.trap_no = 0, \
}
#define kstk_tos(tsk) \
((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
#define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#endif /* _ASM_SCORE_PROCESSOR_H */

View File

@ -1,26 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_PTRACE_H
#define _ASM_SCORE_PTRACE_H
#include <uapi/asm/ptrace.h>
struct task_struct;
/*
* Does the process account for user or for system time?
*/
#define user_mode(regs) ((regs->cp0_psr & 8) == 8)
#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
#define user_stack_pointer(r) ((unsigned long)(r)->regs[0])
extern void do_syscall_trace(struct pt_regs *regs, int entryexit);
extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *);
extern int read_tsk_short(struct task_struct *, unsigned long,
unsigned short *);
#define arch_has_single_step() (1)
#endif /* _ASM_SCORE_PTRACE_H */

View File

@ -1,52 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SCOREREGS_H
#define _ASM_SCORE_SCOREREGS_H
#include <linux/linkage.h>
/* TIMER register */
#define TIME0BASE 0x96080000
#define P_TIMER0_CTRL (TIME0BASE + 0x00)
#define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04)
#define P_TIMER0_PRELOAD (TIME0BASE + 0x08)
#define P_TIMER0_CPP_REG (TIME0BASE + 0x0C)
#define P_TIMER0_UPCNT (TIME0BASE + 0x10)
/* Timer Controller Register */
/* bit 0 Timer enable */
#define TMR_DISABLE 0x0000
#define TMR_ENABLE 0x0001
/* bit 1 Interrupt enable */
#define TMR_IE_DISABLE 0x0000
#define TMR_IE_ENABLE 0x0002
/* bit 2 Output enable */
#define TMR_OE_DISABLE 0x0004
#define TMR_OE_ENABLE 0x0000
/* bit4 Up/Down counting selection */
#define TMR_UD_DOWN 0x0000
#define TMR_UD_UP 0x0010
/* bit5 Up/Down counting control selection */
#define TMR_UDS_UD 0x0000
#define TMR_UDS_EXTUD 0x0020
/* bit6 Time output mode */
#define TMR_OM_TOGGLE 0x0000
#define TMR_OM_PILSE 0x0040
/* bit 8..9 External input active edge selection */
#define TMR_ES_PE 0x0000
#define TMR_ES_NE 0x0100
#define TMR_ES_BOTH 0x0200
/* bit 10..11 Operating mode */
#define TMR_M_FREE 0x0000 /* free running timer mode */
#define TMR_M_PERIODIC 0x0400 /* periodic timer mode */
#define TMR_M_FC 0x0800 /* free running counter mode */
#define TMR_M_PC 0x0c00 /* periodic counter mode */
#define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */
#endif /* _ASM_SCORE_SCOREREGS_H */

View File

@ -1,22 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SEGMENT_H
#define _ASM_SCORE_SEGMENT_H
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define KERNEL_DS ((mm_segment_t){0})
#define USER_DS KERNEL_DS
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(x) \
do { current_thread_info()->addr_limit = (x); } while (0)
# define segment_eq(a, b) ((a).seg == (b).seg)
# endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_SEGMENT_H */

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SETUP_H
#define _ASM_SCORE_SETUP_H
#include <uapi/asm/setup.h>
extern void pagetable_init(void);
extern void pgd_init(unsigned long page);
extern void setup_early_printk(void);
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void handle_nmi(void);
extern void handle_adelinsn(void);
extern void handle_adedata(void);
extern void handle_ibe(void);
extern void handle_pel(void);
extern void handle_sys(void);
extern void handle_ccu(void);
extern void handle_ri(void);
extern void handle_tr(void);
extern void handle_ades(void);
extern void handle_cee(void);
extern void handle_cpe(void);
extern void handle_dve(void);
extern void handle_dbe(void);
extern void handle_reserved(void);
extern void handle_tlb_refill(void);
extern void handle_tlb_invaild(void);
extern void handle_mod(void);
extern void debug_exception_vector(void);
extern void general_exception_vector(void);
extern void interrupt_exception_vector(void);
#endif /* _ASM_SCORE_SETUP_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SHMPARAM_H
#define _ASM_SCORE_SHMPARAM_H
#include <asm-generic/shmparam.h>
#endif /* _ASM_SCORE_SHMPARAM_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_STRING_H
#define _ASM_SCORE_STRING_H
extern void *memset(void *__s, int __c, size_t __count);
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
#endif /* _ASM_SCORE_STRING_H */

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SWITCH_TO_H
#define _ASM_SCORE_SWITCH_TO_H
extern void *resume(void *last, void *next, void *next_ti);
#define switch_to(prev, next, last) \
do { \
(last) = resume(prev, next, task_thread_info(next)); \
} while (0)
#endif /* _ASM_SCORE_SWITCH_TO_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_SYSCALLS_H
#define _ASM_SCORE_SYSCALLS_H
asmlinkage long score_rt_sigreturn(struct pt_regs *regs);
#include <asm-generic/syscalls.h>
#endif /* _ASM_SCORE_SYSCALLS_H */

View File

@ -1,90 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_THREAD_INFO_H
#define _ASM_SCORE_THREAD_INFO_H
#ifdef __KERNEL__
#define KU_MASK 0x08
#define KU_USER 0x08
#define KU_KERN 0x00
#include <asm/page.h>
#include <linux/const.h>
/* thread information allocation */
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
#ifndef __ASSEMBLY__
#include <asm/processor.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants
* must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
/* 0 => preemptable, < 0 => BUG */
int preempt_count;
/*
* thread address space:
* 0-0xBFFFFFFF for user-thead
* 0-0xFFFFFFFF for kernel-thread
*/
mm_segment_t addr_limit;
struct pt_regs *regs;
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
}
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
#endif /* !__ASSEMBLY__ */
/*
* thread information flags
* - these are process state flags that various assembly files may need to
* access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_WORK_MASK (0x0000ffff)
#endif /* __KERNEL__ */
#endif /* _ASM_SCORE_THREAD_INFO_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_TIMEX_H
#define _ASM_SCORE_TIMEX_H
#define CLOCK_TICK_RATE 27000000 /* Timer input freq. */
#include <asm-generic/timex.h>
#endif /* _ASM_SCORE_TIMEX_H */

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_TLB_H
#define _ASM_SCORE_TLB_H
/*
* SCORE doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
*/
#define tlb_start_vma(tlb, vma) do {} while (0)
#define tlb_end_vma(tlb, vma) do {} while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
extern void score7_FTLB_refill_Handler(void);
#include <asm-generic/tlb.h>
#endif /* _ASM_SCORE_TLB_H */

View File

@ -1,143 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_TLBFLUSH_H
#define _ASM_SCORE_TLBFLUSH_H
#include <linux/mm.h>
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void local_flush_tlb_kernel_range(unsigned long start,
unsigned long end);
extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_range(vma, vmaddr, end) \
local_flush_tlb_range(vma, vmaddr, end)
#define flush_tlb_kernel_range(vmaddr, end) \
local_flush_tlb_kernel_range(vmaddr, end)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
#ifndef __ASSEMBLY__
static inline unsigned long pevn_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr11\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void pevn_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr11\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline void pectx_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr12\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline unsigned long pectx_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr12\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline unsigned long tlblock_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr7\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void tlblock_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr7\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline void tlbpt_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr8\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline long tlbpt_get(void)
{
long val;
__asm__ __volatile__(
"mfcr %0, cr8\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void peaddr_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr9\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
/* TLB operations. */
static inline void tlb_probe(void)
{
__asm__ __volatile__("stlb;nop;nop;nop;nop;nop");
}
static inline void tlb_read(void)
{
__asm__ __volatile__("mftlb;nop;nop;nop;nop;nop");
}
static inline void tlb_write_indexed(void)
{
__asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop");
}
static inline void tlb_write_random(void)
{
__asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop");
}
#endif /* Not __ASSEMBLY__ */
#endif /* _ASM_SCORE_TLBFLUSH_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_TOPOLOGY_H
#define _ASM_SCORE_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _ASM_SCORE_TOPOLOGY_H */

View File

@ -1,373 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SCORE_UACCESS_H
#define __SCORE_UACCESS_H
#include <linux/kernel.h>
#include <asm/extable.h>
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define segment_eq(a, b) ((a).seg == (b).seg)
/*
* Is a address valid? This does a straighforward calculation rather
* than tests.
*
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
* - AND "addr+size" doesn't have any high-bits set
* - OR we are in kernel mode.
*
* __ua_size() is a trick to avoid runtime checking of positive constant
* sizes; for those we already know at compile time that the size is ok.
*/
#define __ua_size(size) \
((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
/*
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
* to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Checks if a pointer to a block of memory in user space is valid.
*
* Returns true (nonzero) if the memory block may be valid, false (zero)
* if it is definitely invalid.
*
* Note that, depending on architecture, this function probably just
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
#define __access_ok(addr, size) \
(((long)((get_fs().seg) & \
((addr) | ((addr) + (size)) | \
__ua_size(size)))) == 0)
#define access_ok(type, addr, size) \
likely(__access_ok((unsigned long)(addr), (size)))
/*
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
/*
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
/*
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
/*
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
extern void __get_user_unknown(void);
#define __get_user_common(val, size, ptr) \
do { \
switch (size) { \
case 1: \
__get_user_asm(val, "lb", ptr); \
break; \
case 2: \
__get_user_asm(val, "lh", ptr); \
break; \
case 4: \
__get_user_asm(val, "lw", ptr); \
break; \
case 8: \
if (__copy_from_user((void *)&val, ptr, 8) == 0) \
__gu_err = 0; \
else \
__gu_err = -EFAULT; \
break; \
default: \
__get_user_unknown(); \
break; \
} \
} while (0)
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err = 0; \
__get_user_common((x), size, ptr); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
else \
(x) = 0; \
\
__gu_err; \
})
#define __get_user_asm(val, insn, addr) \
{ \
long __gu_tmp; \
\
__asm__ __volatile__( \
"1:" insn " %1, %3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"li %1, 0\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".word 1b, 3b\n" \
".previous\n" \
: "=r" (__gu_err), "=r" (__gu_tmp) \
: "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
\
(val) = (__typeof__(*(addr))) __gu_tmp; \
}
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
#define __put_user_nocheck(val, ptr, size) \
({ \
__typeof__(*(ptr)) __pu_val; \
long __pu_err = 0; \
\
__pu_val = (val); \
switch (size) { \
case 1: \
__put_user_asm("sb", ptr); \
break; \
case 2: \
__put_user_asm("sh", ptr); \
break; \
case 4: \
__put_user_asm("sw", ptr); \
break; \
case 8: \
if ((__copy_to_user((void *)ptr, &__pu_val, 8)) == 0) \
__pu_err = 0; \
else \
__pu_err = -EFAULT; \
break; \
default: \
__put_user_unknown(); \
break; \
} \
__pu_err; \
})
#define __put_user_check(val, ptr, size) \
({ \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__typeof__(*(ptr)) __pu_val = (val); \
long __pu_err = -EFAULT; \
\
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
switch (size) { \
case 1: \
__put_user_asm("sb", __pu_addr); \
break; \
case 2: \
__put_user_asm("sh", __pu_addr); \
break; \
case 4: \
__put_user_asm("sw", __pu_addr); \
break; \
case 8: \
if ((__copy_to_user((void *)__pu_addr, &__pu_val, 8)) == 0)\
__pu_err = 0; \
else \
__pu_err = -EFAULT; \
break; \
default: \
__put_user_unknown(); \
break; \
} \
} \
__pu_err; \
})
#define __put_user_asm(insn, ptr) \
__asm__ __volatile__( \
"1:" insn " %2, %3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".word 1b, 3b\n" \
".previous\n" \
: "=r" (__pu_err) \
: "0" (0), "r" (__pu_val), "o" (__m(ptr)), \
"i" (-EFAULT));
extern void __put_user_unknown(void);
extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long len)
{
return __copy_tofrom_user(to, (__force const void *)from, len);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long len)
{
return __copy_tofrom_user((__force void *)to, from, len);
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
* Zero a block of memory in user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be cleared.
* On success, this will be zero.
*/
extern unsigned long __clear_user(void __user *src, unsigned long size);
static inline unsigned long clear_user(char *src, unsigned long size)
{
if (access_ok(VERIFY_WRITE, src, size))
return __clear_user(src, size);
return -EFAULT;
}
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
extern int __strncpy_from_user(char *dst, const char *src, long len);
static inline int strncpy_from_user(char *dst, const char *src, long len)
{
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, len);
return -EFAULT;
}
extern int __strnlen_user(const char *str, long len);
static inline long strnlen_user(const char __user *str, long len)
{
if (!access_ok(VERIFY_READ, str, 0))
return 0;
else
return __strnlen_user(str, len);
}
#endif /* __SCORE_UACCESS_H */

View File

@ -1 +0,0 @@
#include <asm-generic/ucontext.h>

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_UNALIGNED_H
#define _ASM_SCORE_UNALIGNED_H
#include <asm-generic/unaligned.h>
#endif /* _ASM_SCORE_UNALIGNED_H */

View File

@ -1,22 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCORE_USER_H
#define _ASM_SCORE_USER_H
struct user_regs_struct {
unsigned long regs[32];
unsigned long cel;
unsigned long ceh;
unsigned long sr0; /* cnt */
unsigned long sr1; /* lcr */
unsigned long sr2; /* scr */
unsigned long cp0_epc;
unsigned long cp0_ema;
unsigned long cp0_psr;
unsigned long cp0_ecr;
unsigned long cp0_condition;
};
#endif /* _ASM_SCORE_USER_H */

View File

@ -1,6 +0,0 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += poll.h
generic-y += siginfo.h

View File

@ -1,4 +0,0 @@
#ifndef _ASM_SCORE_AUXVEC_H
#define _ASM_SCORE_AUXVEC_H
#endif /* _ASM_SCORE_AUXVEC_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_BITSPERLONG_H
#define _ASM_SCORE_BITSPERLONG_H
#include <asm-generic/bitsperlong.h>
#endif /* _ASM_SCORE_BITSPERLONG_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_BYTEORDER_H
#define _ASM_SCORE_BYTEORDER_H
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_SCORE_BYTEORDER_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_ERRNO_H
#define _ASM_SCORE_ERRNO_H
#include <asm-generic/errno.h>
#endif /* _ASM_SCORE_ERRNO_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_FCNTL_H
#define _ASM_SCORE_FCNTL_H
#include <asm-generic/fcntl.h>
#endif /* _ASM_SCORE_FCNTL_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_IOCTL_H
#define _ASM_SCORE_IOCTL_H
#include <asm-generic/ioctl.h>
#endif /* _ASM_SCORE_IOCTL_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_IOCTLS_H
#define _ASM_SCORE_IOCTLS_H
#include <asm-generic/ioctls.h>
#endif /* _ASM_SCORE_IOCTLS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_IPCBUF_H
#define _ASM_SCORE_IPCBUF_H
#include <asm-generic/ipcbuf.h>
#endif /* _ASM_SCORE_IPCBUF_H */

View File

@ -1,2 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_MMAN_H
#define _ASM_SCORE_MMAN_H
#include <asm-generic/mman.h>
#endif /* _ASM_SCORE_MMAN_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_MSGBUF_H
#define _ASM_SCORE_MSGBUF_H
#include <asm-generic/msgbuf.h>
#endif /* _ASM_SCORE_MSGBUF_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_PARAM_H
#define _ASM_SCORE_PARAM_H
#include <asm-generic/param.h>
#endif /* _ASM_SCORE_PARAM_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_POSIX_TYPES_H
#define _ASM_SCORE_POSIX_TYPES_H
#include <asm-generic/posix_types.h>
#endif /* _ASM_SCORE_POSIX_TYPES_H */

View File

@ -1,66 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SCORE_PTRACE_H
#define _UAPI_ASM_SCORE_PTRACE_H
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define SINGLESTEP16_INSN 0x7006
#define SINGLESTEP32_INSN 0x840C8000
#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */
#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */
/* Define instruction mask */
#define INSN32_MASK 0x80008000
#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */
#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */
#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */
#define B32M 0xFC008000
#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */
#define BL32M B32
#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */
#define BR32M 0xFFE0807E
#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */
#define BRL32M BR32M
#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32)
#define J16 0x3000 /* 0_011_....... */
#define J16M 0xF000
#define B16 0x4000 /* 0_100_....... */
#define B16M 0xF000
#define BR16 0x0004 /* 0_000.......0100 */
#define BR16M 0xF00F
#define B16_SET (J16 | B16 | BR16)
/*
* This struct defines the way the registers are stored on the stack during a
* system call/exception. As usual the registers k0/k1 aren't being saved.
*/
struct pt_regs {
unsigned long pad0[6]; /* stack arguments */
unsigned long orig_r4;
unsigned long orig_r7;
long is_syscall;
unsigned long regs[32];
unsigned long cel;
unsigned long ceh;
unsigned long sr0; /* cnt */
unsigned long sr1; /* lcr */
unsigned long sr2; /* scr */
unsigned long cp0_epc;
unsigned long cp0_ema;
unsigned long cp0_psr;
unsigned long cp0_ecr;
unsigned long cp0_condition;
};
#endif /* _UAPI_ASM_SCORE_PTRACE_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_RESOURCE_H
#define _ASM_SCORE_RESOURCE_H
#include <asm-generic/resource.h>
#endif /* _ASM_SCORE_RESOURCE_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SEMBUF_H
#define _ASM_SCORE_SEMBUF_H
#include <asm-generic/sembuf.h>
#endif /* _ASM_SCORE_SEMBUF_H */

View File

@ -1,10 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SCORE_SETUP_H
#define _UAPI_ASM_SCORE_SETUP_H
#define COMMAND_LINE_SIZE 256
#define MEMORY_START 0
#define MEMORY_SIZE 0x2000000
#endif /* _UAPI_ASM_SCORE_SETUP_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SHMBUF_H
#define _ASM_SCORE_SHMBUF_H
#include <asm-generic/shmbuf.h>
#endif /* _ASM_SCORE_SHMBUF_H */

View File

@ -1,23 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SIGCONTEXT_H
#define _ASM_SCORE_SIGCONTEXT_H
/*
* Keep this struct definition in sync with the sigcontext fragment
* in arch/score/tools/offset.c
*/
struct sigcontext {
unsigned int sc_regmask;
unsigned int sc_psr;
unsigned int sc_condition;
unsigned long sc_pc;
unsigned long sc_regs[32];
unsigned int sc_ssflags;
unsigned int sc_mdceh;
unsigned int sc_mdcel;
unsigned int sc_ecr;
unsigned long sc_ema;
unsigned long sc_sigset[4];
};
#endif /* _ASM_SCORE_SIGCONTEXT_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SIGNAL_H
#define _ASM_SCORE_SIGNAL_H
#include <asm-generic/signal.h>
#endif /* _ASM_SCORE_SIGNAL_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SOCKET_H
#define _ASM_SCORE_SOCKET_H
#include <asm-generic/socket.h>
#endif /* _ASM_SCORE_SOCKET_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SOCKIOS_H
#define _ASM_SCORE_SOCKIOS_H
#include <asm-generic/sockios.h>
#endif /* _ASM_SCORE_SOCKIOS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_STAT_H
#define _ASM_SCORE_STAT_H
#include <asm-generic/stat.h>
#endif /* _ASM_SCORE_STAT_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_STATFS_H
#define _ASM_SCORE_STATFS_H
#include <asm-generic/statfs.h>
#endif /* _ASM_SCORE_STATFS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_SWAB_H
#define _ASM_SCORE_SWAB_H
#include <asm-generic/swab.h>
#endif /* _ASM_SCORE_SWAB_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_TERMBITS_H
#define _ASM_SCORE_TERMBITS_H
#include <asm-generic/termbits.h>
#endif /* _ASM_SCORE_TERMBITS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_TERMIOS_H
#define _ASM_SCORE_TERMIOS_H
#include <asm-generic/termios.h>
#endif /* _ASM_SCORE_TERMIOS_H */

View File

@ -1,7 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_SCORE_TYPES_H
#define _ASM_SCORE_TYPES_H
#include <asm-generic/types.h>
#endif /* _ASM_SCORE_TYPES_H */

View File

@ -1,13 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#define __ARCH_HAVE_MMU
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_SYSCALL_NO_AT
#define __ARCH_WANT_SYSCALL_NO_FLAGS
#define __ARCH_WANT_SYSCALL_OFF_T
#define __ARCH_WANT_SYSCALL_DEPRECATED
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#include <asm-generic/unistd.h>

View File

@ -1,12 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux/SCORE kernel.
#
extra-y := head.o vmlinux.lds
obj-y += entry.o irq.o process.o ptrace.o \
setup.o signal.o sys_score.o time.o traps.o \
sys_call_table.o
obj-$(CONFIG_MODULES) += module.o

View File

@ -1,214 +0,0 @@
/*
* arch/score/kernel/asm-offsets.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kbuild.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm-generic/cmpxchg-local.h>
void output_ptreg_defines(void)
{
COMMENT("SCORE pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_ORIG_R4, pt_regs, orig_r4);
OFFSET(PT_ORIG_R7, pt_regs, orig_r7);
OFFSET(PT_CEL, pt_regs, cel);
OFFSET(PT_CEH, pt_regs, ceh);
OFFSET(PT_SR0, pt_regs, sr0);
OFFSET(PT_SR1, pt_regs, sr1);
OFFSET(PT_SR2, pt_regs, sr2);
OFFSET(PT_EPC, pt_regs, cp0_epc);
OFFSET(PT_EMA, pt_regs, cp0_ema);
OFFSET(PT_PSR, pt_regs, cp0_psr);
OFFSET(PT_ECR, pt_regs, cp0_ecr);
OFFSET(PT_CONDITION, pt_regs, cp0_condition);
OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall);
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void)
{
COMMENT("SCORE task_struct offsets.");
OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void)
{
COMMENT("SCORE thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
DEFINE(KERNEL_STACK_MASK, THREAD_MASK);
BLANK();
}
void output_thread_defines(void)
{
COMMENT("SCORE specific thread_struct offsets.");
OFFSET(THREAD_REG0, task_struct, thread.reg0);
OFFSET(THREAD_REG2, task_struct, thread.reg2);
OFFSET(THREAD_REG3, task_struct, thread.reg3);
OFFSET(THREAD_REG12, task_struct, thread.reg12);
OFFSET(THREAD_REG13, task_struct, thread.reg13);
OFFSET(THREAD_REG14, task_struct, thread.reg14);
OFFSET(THREAD_REG15, task_struct, thread.reg15);
OFFSET(THREAD_REG16, task_struct, thread.reg16);
OFFSET(THREAD_REG17, task_struct, thread.reg17);
OFFSET(THREAD_REG18, task_struct, thread.reg18);
OFFSET(THREAD_REG19, task_struct, thread.reg19);
OFFSET(THREAD_REG20, task_struct, thread.reg20);
OFFSET(THREAD_REG21, task_struct, thread.reg21);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_PSR, task_struct, thread.cp0_psr);
OFFSET(THREAD_EMA, task_struct, thread.cp0_ema);
OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, thread.error_code);
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
BLANK();
}
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
}
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_MDCEH, sigcontext, sc_mdceh);
OFFSET(SC_MDCEL, sigcontext, sc_mdcel);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_PSR, sigcontext, sc_psr);
OFFSET(SC_ECR, sigcontext, sc_ecr);
OFFSET(SC_EMA, sigcontext, sc_ema);
BLANK();
}
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}

View File

@ -1,493 +0,0 @@
/*
* arch/score/kernel/entry.S
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
/*
* disable interrupts.
*/
.macro disable_irq
mfcr r8, cr0
srli r8, r8, 1
slli r8, r8, 1
mtcr r8, cr0
nop
nop
nop
nop
nop
.endm
/*
* enable interrupts.
*/
.macro enable_irq
mfcr r8, cr0
ori r8, 1
mtcr r8, cr0
nop
nop
nop
nop
nop
.endm
__INIT
ENTRY(debug_exception_vector)
nop!
nop!
nop!
nop!
nop!
nop!
nop!
nop!
ENTRY(general_exception_vector) # should move to addr 0x200
j general_exception
nop!
nop!
nop!
nop!
nop!
nop!
ENTRY(interrupt_exception_vector) # should move to addr 0x210
j interrupt_exception
nop!
nop!
nop!
nop!
nop!
nop!
.section ".text", "ax"
.align 2;
general_exception:
mfcr r31, cr2
nop
la r30, exception_handlers
andi r31, 0x1f # get ecr.exc_code
slli r31, r31, 2
add r30, r30, r31
lw r30, [r30]
br r30
interrupt_exception:
SAVE_ALL
mfcr r4, cr2
nop
lw r16, [r28, TI_REGS]
sw r0, [r28, TI_REGS]
la r3, ret_from_irq
srli r4, r4, 18 # get ecr.ip[7:2], interrupt No.
mv r5, r0
j do_IRQ
ENTRY(handle_nmi) # NMI #1
SAVE_ALL
mv r4, r0
la r8, nmi_exception_handler
brl r8
j restore_all
ENTRY(handle_adelinsn) # AdEL-instruction #2
SAVE_ALL
mfcr r8, cr6
nop
nop
sw r8, [r0, PT_EMA]
mv r4, r0
la r8, do_adelinsn
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ibe) # BusEL-instruction #5
SAVE_ALL
mv r4, r0
la r8, do_be
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_pel) # P-EL #6
SAVE_ALL
mv r4, r0
la r8, do_pel
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ccu) # CCU #8
SAVE_ALL
mv r4, r0
la r8, do_ccu
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ri) # RI #9
SAVE_ALL
mv r4, r0
la r8, do_ri
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_tr) # Trap #10
SAVE_ALL
mv r4, r0
la r8, do_tr
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_adedata) # AdES-instruction #12
SAVE_ALL
mfcr r8, cr6
nop
nop
sw r8, [r0, PT_EMA]
mv r4, r0
la r8, do_adedata
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_cee) # CeE #16
SAVE_ALL
mv r4, r0
la r8, do_cee
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_cpe) # CpE #17
SAVE_ALL
mv r4, r0
la r8, do_cpe
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_dbe) # BusEL-data #18
SAVE_ALL
mv r4, r0
la r8, do_be
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_reserved) # others
SAVE_ALL
mv r4, r0
la r8, do_reserved
brl r8
mv r4, r0
j ret_from_exception
nop
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
#else
#define __ret_from_irq ret_from_exception
#endif
.align 2
#ifndef CONFIG_PREEMPT
ENTRY(ret_from_exception)
disable_irq # preempt stop
nop
j __ret_from_irq
nop
#endif
ENTRY(ret_from_irq)
sw r16, [r28, TI_REGS]
ENTRY(__ret_from_irq)
lw r8, [r0, PT_PSR] # returning to kernel mode?
andri.c r8, r8, KU_USER
beq resume_kernel
resume_userspace:
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r8, r6 # ignoring syscall_trace
bne work_pending
nop
j restore_all
nop
#ifdef CONFIG_PREEMPT
resume_kernel:
disable_irq
lw r8, [r28, TI_PRE_COUNT]
cmpz.c r8
bne restore_all
need_resched:
lw r8, [r28, TI_FLAGS]
andri.c r9, r8, _TIF_NEED_RESCHED
beq restore_all
lw r8, [r28, PT_PSR] # Interrupts off?
andri.c r8, r8, 1
beq restore_all
bl preempt_schedule_irq
nop
j need_resched
nop
#endif
ENTRY(ret_from_kernel_thread)
bl schedule_tail # r4=struct task_struct *prev
nop
mv r4, r13
brl r12
j syscall_exit
ENTRY(ret_from_fork)
bl schedule_tail # r4=struct task_struct *prev
ENTRY(syscall_exit)
nop
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r6, r8
bne syscall_exit_work
ENTRY(restore_all) # restore full frame
RESTORE_ALL_AND_RET
work_pending:
andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS
beq work_notifysig
work_resched:
bl schedule
nop
disable_irq
lw r6, [r28, TI_FLAGS]
li r8, _TIF_WORK_MASK
and.c r8, r6, r8 # is there any work to be done
# other than syscall tracing?
beq restore_all
andri.c r8, r6, _TIF_NEED_RESCHED
bne work_resched
work_notifysig:
mv r4, r0
li r5, 0
bl do_notify_resume # r6 already loaded
nop
j resume_userspace
nop
ENTRY(syscall_exit_work)
li r8, _TIF_SYSCALL_TRACE
and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS
beq work_pending # trace bit set?
nop
enable_irq
mv r4, r0
li r5, 1
bl do_syscall_trace
nop
b resume_userspace
nop
.macro save_context reg
sw r12, [\reg, THREAD_REG12];
sw r13, [\reg, THREAD_REG13];
sw r14, [\reg, THREAD_REG14];
sw r15, [\reg, THREAD_REG15];
sw r16, [\reg, THREAD_REG16];
sw r17, [\reg, THREAD_REG17];
sw r18, [\reg, THREAD_REG18];
sw r19, [\reg, THREAD_REG19];
sw r20, [\reg, THREAD_REG20];
sw r21, [\reg, THREAD_REG21];
sw r29, [\reg, THREAD_REG29];
sw r2, [\reg, THREAD_REG2];
sw r0, [\reg, THREAD_REG0]
.endm
.macro restore_context reg
lw r12, [\reg, THREAD_REG12];
lw r13, [\reg, THREAD_REG13];
lw r14, [\reg, THREAD_REG14];
lw r15, [\reg, THREAD_REG15];
lw r16, [\reg, THREAD_REG16];
lw r17, [\reg, THREAD_REG17];
lw r18, [\reg, THREAD_REG18];
lw r19, [\reg, THREAD_REG19];
lw r20, [\reg, THREAD_REG20];
lw r21, [\reg, THREAD_REG21];
lw r29, [\reg, THREAD_REG29];
lw r0, [\reg, THREAD_REG0];
lw r2, [\reg, THREAD_REG2];
lw r3, [\reg, THREAD_REG3]
.endm
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti)
*/
ENTRY(resume)
mfcr r9, cr0
nop
nop
sw r9, [r4, THREAD_PSR]
save_context r4
sw r3, [r4, THREAD_REG3]
mv r28, r6
restore_context r5
mv r8, r6
addi r8, KERNEL_STACK_SIZE
subi r8, 32
la r9, kernelsp;
sw r8, [r9];
mfcr r9, cr0
ldis r7, 0x00ff
nop
and r9, r9, r7
lw r6, [r5, THREAD_PSR]
not r7, r7
and r6, r6, r7
or r6, r6, r9
mtcr r6, cr0
nop; nop; nop; nop; nop
br r3
ENTRY(handle_sys)
SAVE_ALL
sw r8, [r0, 16] # argument 5 from user r8
sw r9, [r0, 20] # argument 6 from user r9
enable_irq
sw r4, [r0, PT_ORIG_R4] #for restart syscall
sw r7, [r0, PT_ORIG_R7] #for restart syscall
sw r27, [r0, PT_IS_SYSCALL] # it from syscall
lw r9, [r0, PT_EPC] # skip syscall on return
addi r9, 4
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
bcs illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table
add r11, r11, r8
lw r10, [r11] # get syscall entry
cmpz.c r10
beq illegal_syscall
lw r8, [r28, TI_FLAGS]
li r9, _TIF_SYSCALL_TRACE
and.c r8, r8, r9
bne syscall_trace_entry
brl r10 # Do The Real system call
cmpi.c r4, 0
blt 1f
ldi r8, 0
sw r8, [r0, PT_R7]
b 2f
1:
cmpi.c r4, -MAX_ERRNO - 1
ble 2f
ldi r8, 0x1;
sw r8, [r0, PT_R7]
neg r4, r4
2:
sw r4, [r0, PT_R4] # save result
syscall_return:
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r6, r8
bne syscall_return_work
j restore_all
syscall_return_work:
j syscall_exit_work
syscall_trace_entry:
mv r16, r10
mv r4, r0
li r5, 0
bl do_syscall_trace
mv r8, r16
lw r4, [r0, PT_R4] # Restore argument registers
lw r5, [r0, PT_R5]
lw r6, [r0, PT_R6]
lw r7, [r0, PT_R7]
brl r8
li r8, -MAX_ERRNO - 1
sw r8, [r0, PT_R7] # set error flag
neg r4, r4 # error
sw r4, [r0, PT_R0] # set flag for syscall
# restarting
1: sw r4, [r0, PT_R2] # result
j syscall_exit
illegal_syscall:
ldi r4, -ENOSYS # error
sw r4, [r0, PT_ORIG_R4]
sw r4, [r0, PT_R4]
ldi r9, 1 # set error flag
sw r9, [r0, PT_R7]
j syscall_return
ENTRY(sys_rt_sigreturn)
mv r4, r0
la r8, score_rt_sigreturn
br r8

View File

@ -1,70 +0,0 @@
/*
* arch/score/kernel/head.S
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.extern start_kernel
.global init_thread_union
.global kernelsp
__INIT
ENTRY(_stext)
la r30, __bss_start /* initialize BSS segment. */
la r31, _end
xor r8, r8, r8
1: cmp.c r31, r30
beq 2f
sw r8, [r30] /* clean memory. */
addi r30, 4
b 1b
2: la r28, init_thread_union /* set kernel stack. */
mv r0, r28
addi r0, KERNEL_STACK_SIZE - 32
la r30, kernelsp
sw r0, [r30]
subi r0, 4*4
xor r30, r30, r30
ori r30, 0x02 /* enable MMU. */
mtcr r30, cr4
nop
nop
nop
nop
nop
nop
nop
/* there is no parameter */
xor r4, r4, r4
xor r5, r5, r5
xor r6, r6, r6
xor r7, r7, r7
la r30, start_kernel /* jump to init_arch */
br r30

View File

@ -1,111 +0,0 @@
/*
* arch/score/kernel/irq.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/io.h>
/* the interrupt controller is hardcoded at this address */
#define SCORE_PIC ((u32 __iomem __force *)0x95F50000)
#define INT_PNDL 0
#define INT_PNDH 1
#define INT_PRIORITY_M 2
#define INT_PRIORITY_SG0 4
#define INT_PRIORITY_SG1 5
#define INT_PRIORITY_SG2 6
#define INT_PRIORITY_SG3 7
#define INT_MASKL 8
#define INT_MASKH 9
/*
* handles all normal device IRQs
*/
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
static void score_mask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \
(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) | \
(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
static void score_unmask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \
~(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) & \
~(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
struct irq_chip score_irq_chip = {
.name = "Score7-level",
.irq_mask = score_mask,
.irq_mask_ack = score_mask,
.irq_unmask = score_unmask,
};
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int index;
unsigned long target_addr;
for (index = 0; index < NR_IRQS; ++index)
irq_set_chip_and_handler(index, &score_irq_chip,
handle_level_irq);
for (target_addr = IRQ_VECTOR_BASE_ADDR;
target_addr <= IRQ_VECTOR_END_ADDR;
target_addr += IRQ_VECTOR_SIZE)
memcpy((void *)target_addr, \
interrupt_exception_vector, IRQ_VECTOR_SIZE);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKL);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKH);
__asm__ __volatile__(
"mtcr %0, cr3\n\t"
: : "r" (EXCEPTION_VECTOR_BASE_ADDR | \
VECTOR_ADDRESS_OFFSET_MODE16));
}

Some files were not shown because too many files have changed in this diff Show More