Merge git://git.linux-xtensa.org/kernel/xtensa-feed

* git://git.linux-xtensa.org/kernel/xtensa-feed:
  [patch 1/2] Xtensa: enable arbitary tty speed setting ioctls
  [patch 2/2] xtensa console.c: remove duplicate #include
  [XTENSA] Add support for cache-aliasing
  [XTENSA] Add kernel module support
  [XTENSA] Add support for executable/non-executable feature in the mmu
  [XTENSA] Use the generic version of get_order
  [XTENSA] Initialize semaphore_wake_lock
  [XTENSA] Add typecast macro for constants
  [XTENSA] Fix timer instabilities.
  [XTENSA] Fix fadvise64_64
  [XTENSA] Remove extraneous include statement
  [XTENSA] Move string-io functions to io.c from pci.c
  [XTENSA] Move pre-initialized structures to init_task.c
  [XTENSA] Add freestanding option to CFLAGS
  [XTENSA] Add getpgrp system-call to unistd.h
  [XTENSA] add missing system calls
  [XTENSA] fix wrong usage of __init and __initdata in traps.c
This commit is contained in:
Linus Torvalds 2007-09-14 17:07:33 -07:00
commit 2605a103ca
36 changed files with 1522 additions and 711 deletions

View File

@ -27,7 +27,12 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
PLATFORM = $(platform-y) PLATFORM = $(platform-y)
export PLATFORM export PLATFORM
CFLAGS += -pipe -mlongcalls # temporarily until string.h is fixed
cflags-y += -ffreestanding
cflags-y += -pipe -mlongcalls
CFLAGS += $(cflags-y)
KBUILD_DEFCONFIG := iss_defconfig KBUILD_DEFCONFIG := iss_defconfig

View File

@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
pci-dma.o pci-dma.o init_task.o io.o
## windowspill.o ## windowspill.o

View File

@ -18,12 +18,13 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/mm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val)) #define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void) int main(void)
{ {
@ -63,7 +64,6 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
BLANK();
/* struct task_struct */ /* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
@ -73,27 +73,26 @@ int main(void)
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
BLANK();
/* struct thread_info (offset from start_struct) */ /* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save)); DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
BLANK();
/* struct mm_struct */ /* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd)); DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
BLANK();
DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT); /* struct page */
DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
/* constants */ /* constants */
DEFINE(_CLONE_VM, CLONE_VM); DEFINE(_CLONE_VM, CLONE_VM);
DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(PG_ARCH_1, PG_arch_1);
return 0; return 0;
} }

View File

@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2004-2005 by Tensilica Inc. * Copyright (C) 2004-2007 by Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
@ -169,7 +169,7 @@ _user_exception:
* We have to save all registers up to the first '1' from * We have to save all registers up to the first '1' from
* the right, except the current frame (bit 0). * the right, except the current frame (bit 0).
* Assume a2 is: 001001000110001 * Assume a2 is: 001001000110001
* All regiser frames starting from the top fiel to the marked '1' * All register frames starting from the top field to the marked '1'
* must be saved. * must be saved.
*/ */
@ -1572,10 +1572,12 @@ ENTRY(fast_second_level_miss)
l32i a0, a1, TASK_MM # tsk->mm l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f beqz a0, 9f
8: rsr a1, EXCVADDR # fault address
_PGD_OFFSET(a0, a1, a1) /* We deliberately destroy a3 that holds the exception table. */
8: rsr a3, EXCVADDR # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval l32i a0, a0, 0 # read pmdval
//beqi a0, _PAGE_USER, 2f
beqz a0, 2f beqz a0, 2f
/* Read ptevaddr and convert to top of page-table page. /* Read ptevaddr and convert to top of page-table page.
@ -1588,7 +1590,7 @@ ENTRY(fast_second_level_miss)
* The messy computation for 'pteval' above really simplifies * The messy computation for 'pteval' above really simplifies
* into the following: * into the following:
* *
* pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
*/ */
movi a1, -PAGE_OFFSET movi a1, -PAGE_OFFSET
@ -1596,20 +1598,34 @@ ENTRY(fast_second_level_miss)
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1 xor a0, a0, a1
movi a1, _PAGE_DIRECTORY
movi a1, PAGE_DIRECTORY
or a0, a0, a1 # ... | PAGE_DIRECTORY or a0, a0, a1 # ... | PAGE_DIRECTORY
rsr a1, PTEVADDR /*
srli a1, a1, PAGE_SHIFT * We utilize all three wired-ways (7-9) to hold pmd translations.
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK * Memory regions are mapped to the DTLBs according to bits 28 and 29.
addi a1, a1, DTLB_WAY_PGD # ... + way_number * This allows to map the three most common regions to three different
* DTLBs:
* 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
* 2 -> way 8 shared libaries (2000.0000)
* 3 -> way 0 stack (3000.0000)
*/
wdtlb a0, a1 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
rsr a1, PTEVADDR
addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT
extui a3, a3, 2, 2 # -> 0,0,1,2
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
addi a3, a3, DTLB_WAY_PGD
add a1, a1, a3 # ... + way_number
3: wdtlb a0, a1
dsync dsync
/* Exit critical section. */ /* Exit critical section. */
4: movi a3, exc_table # restore a3
movi a0, 0 movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP s32i a0, a3, EXC_TABLE_FIXUP
@ -1636,8 +1652,76 @@ ENTRY(fast_second_level_miss)
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
j 8b j 8b
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
2: /* Special case for cache aliasing.
* We (should) only get here if a clear_user_page, copy_user_page
* or the aliased cache flush functions got preemptively interrupted
* by another task. Re-establish temporary mapping to the
* TLBTEMP_BASE areas.
*/
/* We shouldn't be in a double exception */
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
rsr a3, EPC_1
bltu a3, a0, 2f
movi a0, __tlbtemp_mapping_end
bgeu a3, a0, 2f
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
rsr a0, EXCVADDR
bltu a0, a3, 2f
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
bgeu a1, a3, 2f
/* Check if we have to restore an ITLB mapping. */
movi a1, __tlbtemp_mapping_itlb
rsr a3, EPC_1
sub a3, a3, a1
/* Calculate VPN */
movi a1, PAGE_MASK
and a1, a1, a0
/* Jump for ITLB entry */
bgez a3, 1f
/* We can use up to two TLBTEMP areas, one for src and one for dst. */
extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
add a1, a3, a1
/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
mov a0, a6
movnez a0, a7, a3
j 3b
/* ITLB entry. We only use dst in a6. */
1: witlb a6, a1
isync
j 4b
#endif // DCACHE_WAY_SIZE > PAGE_SIZE
2: /* Invalid PGD, default exception handling */ 2: /* Invalid PGD, default exception handling */
movi a3, exc_table
rsr a1, DEPC rsr a1, DEPC
xsr a3, EXCSAVE_1 xsr a3, EXCSAVE_1
s32i a1, a2, PT_AREG2 s32i a1, a2, PT_AREG2
@ -1682,15 +1766,15 @@ ENTRY(fast_store_prohibited)
8: rsr a1, EXCVADDR # fault address 8: rsr a1, EXCVADDR # fault address
_PGD_OFFSET(a0, a1, a4) _PGD_OFFSET(a0, a1, a4)
l32i a0, a0, 0 l32i a0, a0, 0
//beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
beqz a0, 2f beqz a0, 2f
/* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
_PTE_OFFSET(a0, a1, a4) _PTE_OFFSET(a0, a1, a4)
l32i a4, a0, 0 # read pteval l32i a4, a0, 0 # read pteval
movi a1, _PAGE_VALID | _PAGE_RW bbci.l a4, _PAGE_WRITABLE_BIT, 2f
bnall a4, a1, 2f
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a4, a4, a1 or a4, a4, a1
rsr a1, EXCVADDR rsr a1, EXCVADDR
s32i a4, a0, 0 s32i a4, a0, 0
@ -1700,10 +1784,7 @@ ENTRY(fast_store_prohibited)
dhwb a0, 0 dhwb a0, 0
#endif #endif
pdtlb a0, a1 pdtlb a0, a1
beqz a0, 1f
idtlb a0 // FIXME do we need this?
wdtlb a4, a0 wdtlb a4, a0
1:
/* Exit critical section. */ /* Exit critical section. */

View File

@ -0,0 +1,38 @@
/*
* arch/xtensa/kernel/init_task.c
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);

75
arch/xtensa/kernel/io.c Normal file
View File

@ -0,0 +1,75 @@
/*
* arch/xtensa/io.c
*
* IO primitives
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copied from sparc.
*
* Chris Zankel <chris@zankel.net>
*
*/
#include <asm/io.h>
#include <asm/byteorder.h>
void outsb(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 1;
writeb(*(const char *)src, addr);
src += 1;
addr += 1;
}
}
void outsw(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 2;
writew(*(const short *)src, addr);
src += 2;
addr += 2;
}
}
void outsl(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 4;
writel(*(const long *)src, addr);
src += 4;
addr += 4;
}
}
void insb(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 1;
*(unsigned char *)dst = readb(addr);
dst += 1;
addr += 1;
}
}
void insw(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 2;
*(unsigned short *)dst = readw(addr);
dst += 2;
addr += 2;
}
}
void insl(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 4;
/*
* XXX I am sure we are in for an unaligned trap here.
*/
*(unsigned long *)dst = readl(addr);
dst += 4;
addr += 4;
}
}

View File

@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2006 Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
* *
@ -22,57 +22,216 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cache.h> #include <linux/cache.h>
LIST_HEAD(module_buf_list); #undef DEBUG_RELOCATE
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
panic("module_alloc not implemented"); if (size == 0)
return NULL;
return vmalloc(size);
} }
void module_free(struct module *mod, void *module_region) void module_free(struct module *mod, void *module_region)
{ {
panic("module_free not implemented"); vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
} }
int module_frob_arch_sections(Elf32_Ehdr *hdr, int module_frob_arch_sections(Elf32_Ehdr *hdr,
Elf32_Shdr *sechdrs, Elf32_Shdr *sechdrs,
char *secstrings, char *secstrings,
struct module *me) struct module *mod)
{ {
panic("module_frob_arch_sections not implemented"); return 0;
}
static int
decode_calln_opcode (unsigned char *location)
{
#ifdef __XTENSA_EB__
return (location[0] & 0xf0) == 0x50;
#endif
#ifdef __XTENSA_EL__
return (location[0] & 0xf) == 0x5;
#endif
}
static int
decode_l32r_opcode (unsigned char *location)
{
#ifdef __XTENSA_EB__
return (location[0] & 0xf0) == 0x10;
#endif
#ifdef __XTENSA_EL__
return (location[0] & 0xf) == 0x1;
#endif
} }
int apply_relocate(Elf32_Shdr *sechdrs, int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab, const char *strtab,
unsigned int symindex, unsigned int symindex,
unsigned int relsec, unsigned int relsec,
struct module *module) struct module *mod)
{ {
panic ("apply_relocate not implemented"); printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
mod->name);
return -ENOEXEC;
} }
int apply_relocate_add(Elf32_Shdr *sechdrs, int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab, const char *strtab,
unsigned int symindex, unsigned int symindex,
unsigned int relsec, unsigned int relsec,
struct module *module) struct module *mod)
{ {
panic("apply_relocate_add not implemented"); unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned char *location;
uint32_t value;
#ifdef DEBUG_RELOCATE
printk("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rela[i].r_info);
value = sym->st_value + rela[i].r_addend;
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_XTENSA_NONE:
case R_XTENSA_DIFF8:
case R_XTENSA_DIFF16:
case R_XTENSA_DIFF32:
case R_XTENSA_ASM_EXPAND:
break;
case R_XTENSA_32:
case R_XTENSA_PLT:
*(uint32_t *)location += value;
break;
case R_XTENSA_SLOT0_OP:
if (decode_calln_opcode(location)) {
value -= ((unsigned long)location & -4) + 4;
if ((value & 3) != 0 ||
((value + (1 << 19)) >> 20) != 0) {
printk("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
strtab + sym->st_name);
return -ENOEXEC;
}
value = (signed int)value >> 2;
#ifdef __XTENSA_EB__
location[0] = ((location[0] & ~0x3) |
((value >> 16) & 0x3));
location[1] = (value >> 8) & 0xff;
location[2] = value & 0xff;
#endif
#ifdef __XTENSA_EL__
location[0] = ((location[0] & ~0xc0) |
((value << 6) & 0xc0));
location[1] = (value >> 2) & 0xff;
location[2] = (value >> 10) & 0xff;
#endif
} else if (decode_l32r_opcode(location)) {
value -= (((unsigned long)location + 3) & -4);
if ((value & 3) != 0 ||
(signed int)value >> 18 != -1) {
printk("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
strtab + sym->st_name);
return -ENOEXEC;
}
value = (signed int)value >> 2;
#ifdef __XTENSA_EB__
location[1] = (value >> 8) & 0xff;
location[2] = value & 0xff;
#endif
#ifdef __XTENSA_EL__
location[1] = value & 0xff;
location[2] = (value >> 8) & 0xff;
#endif
}
/* FIXME: Ignore any other opcodes. The Xtensa
assembler currently assumes that the linker will
always do relaxation and so all PC-relative
operands need relocations. (The assembler also
writes out the tentative PC-relative values,
assuming no link-time relaxation, so it is usually
safe to ignore the relocations.) If the
assembler's "--no-link-relax" flag can be made to
work, and if all kernel modules can be assembled
with that flag, then unexpected relocations could
be detected here. */
break;
case R_XTENSA_SLOT1_OP:
case R_XTENSA_SLOT2_OP:
case R_XTENSA_SLOT3_OP:
case R_XTENSA_SLOT4_OP:
case R_XTENSA_SLOT5_OP:
case R_XTENSA_SLOT6_OP:
case R_XTENSA_SLOT7_OP:
case R_XTENSA_SLOT8_OP:
case R_XTENSA_SLOT9_OP:
case R_XTENSA_SLOT10_OP:
case R_XTENSA_SLOT11_OP:
case R_XTENSA_SLOT12_OP:
case R_XTENSA_SLOT13_OP:
case R_XTENSA_SLOT14_OP:
printk("%s: unexpected FLIX relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
case R_XTENSA_SLOT0_ALT:
case R_XTENSA_SLOT1_ALT:
case R_XTENSA_SLOT2_ALT:
case R_XTENSA_SLOT3_ALT:
case R_XTENSA_SLOT4_ALT:
case R_XTENSA_SLOT5_ALT:
case R_XTENSA_SLOT6_ALT:
case R_XTENSA_SLOT7_ALT:
case R_XTENSA_SLOT8_ALT:
case R_XTENSA_SLOT9_ALT:
case R_XTENSA_SLOT10_ALT:
case R_XTENSA_SLOT11_ALT:
case R_XTENSA_SLOT12_ALT:
case R_XTENSA_SLOT13_ALT:
case R_XTENSA_SLOT14_ALT:
printk("%s: unexpected ALT relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
default:
printk("%s: unexpected relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
} }
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *mod)
{ {
panic ("module_finalize not implemented"); return 0;
} }
void module_arch_cleanup(struct module *mod) void module_arch_cleanup(struct module *mod)
{ {
panic("module_arch_cleanup not implemented");
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
panic("module_find_bug not implemented");
} }

View File

@ -394,72 +394,3 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret; return ret;
} }
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
* Also, think for a moment about likes of floppy.c that
* include architecture specific parts. They may want to redefine ins/outs.
*
* We do not use horrible macros here because we want to
* advance pointer by sizeof(size).
*/
void outsb(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 1;
writeb(*(const char *)src, addr);
src += 1;
addr += 1;
}
}
void outsw(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 2;
writew(*(const short *)src, addr);
src += 2;
addr += 2;
}
}
void outsl(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 4;
writel(*(const long *)src, addr);
src += 4;
addr += 4;
}
}
void insb(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 1;
*(unsigned char *)dst = readb(addr);
dst += 1;
addr += 1;
}
}
void insw(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 2;
*(unsigned short *)dst = readw(addr);
dst += 2;
addr += 2;
}
}
void insl(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 4;
/*
* XXX I am sure we are in for an unaligned trap here.
*/
*(unsigned long *)dst = readl(addr);
dst += 4;
addr += 4;
}
}

View File

@ -46,20 +46,6 @@
extern void ret_from_fork(void); extern void ret_from_fork(void);
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
struct task_struct *current_set[NR_CPUS] = {&init_task, }; struct task_struct *current_set[NR_CPUS] = {&init_task, };
void (*pm_power_off)(void) = NULL; void (*pm_power_off)(void) = NULL;

View File

@ -100,7 +100,7 @@ static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
return ret; return ret;
} }
spinlock_t semaphore_wake_lock; DEFINE_SPINLOCK(semaphore_wake_lock);
/* /*
* Semaphores are implemented using a two-way counter: * Semaphores are implemented using a two-way counter:

View File

@ -93,3 +93,8 @@ asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
return (long)ret; return (long)ret;
} }
asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}

View File

@ -32,12 +32,20 @@ EXPORT_SYMBOL(rtc_lock);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy; /* per 1/HZ */ unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long ccount_nsec; /* nsec per ccount increment */ unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif #endif
unsigned int last_ccount_stamp;
static long last_rtc_update = 0; static long last_rtc_update = 0;
/*
* Scheduler clock - returns current tim in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id); static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = { static struct irqaction timer_irqaction = {
.handler = timer_interrupt, .handler = timer_interrupt,
@ -69,7 +77,6 @@ void __init time_init(void)
xtime.tv_nsec = 0; xtime.tv_nsec = 0;
last_rtc_update = xtime.tv_sec = sec_n; last_rtc_update = xtime.tv_sec = sec_n;
last_ccount_stamp = get_ccount();
set_normalized_timespec(&wall_to_monotonic, set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec); -xtime.tv_sec, -xtime.tv_nsec);
@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv)
{ {
time_t wtm_sec, sec = tv->tv_sec; time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec; long wtm_nsec, nsec = tv->tv_nsec;
unsigned long ccount; unsigned long delta;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL; return -EINVAL;
@ -97,8 +104,10 @@ int do_settimeofday(struct timespec *tv)
* wall time. Discover what correction gettimeofday() would have * wall time. Discover what correction gettimeofday() would have
* made, and then undo it! * made, and then undo it!
*/ */
ccount = get_ccount();
nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC; delta = CCOUNT_PER_JIFFY;
delta += get_ccount() - get_linux_timer();
nsec -= delta * NSEC_PER_CCOUNT;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@ -117,17 +126,21 @@ EXPORT_SYMBOL(do_settimeofday);
void do_gettimeofday(struct timeval *tv) void do_gettimeofday(struct timeval *tv)
{ {
unsigned long flags; unsigned long flags;
unsigned long sec, usec, delta, seq; unsigned long volatile sec, usec, delta, seq;
do { do {
seq = read_seqbegin_irqsave(&xtime_lock, flags); seq = read_seqbegin_irqsave(&xtime_lock, flags);
delta = get_ccount() - last_ccount_stamp;
sec = xtime.tv_sec; sec = xtime.tv_sec;
usec = (xtime.tv_nsec / NSEC_PER_USEC); usec = (xtime.tv_nsec / NSEC_PER_USEC);
delta = get_linux_timer() - get_ccount();
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
usec += (delta * CCOUNT_NSEC) / NSEC_PER_USEC; usec += (((unsigned long) CCOUNT_PER_JIFFY - delta)
* (unsigned long) NSEC_PER_CCOUNT) / NSEC_PER_USEC;
for (; usec >= 1000000; sec++, usec -= 1000000) for (; usec >= 1000000; sec++, usec -= 1000000)
; ;
@ -158,9 +171,12 @@ again:
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
last_ccount_stamp = next; do_timer(1); /* Linux handler in kernel/timer.c */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY; next += CCOUNT_PER_JIFFY;
do_timer (1); /* Linux handler in kernel/timer.c */ set_linux_timer(next);
if (ntp_synced() && if (ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 && xtime.tv_sec - last_rtc_update >= 659 &&
@ -175,19 +191,15 @@ again:
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
} }
/* NOTE: writing CCOMPAREn clears the interrupt. */ /* Allow platform to do something useful (Wdog). */
set_linux_timer (next); platform_heartbeat();
/* Make sure we didn't miss any tick... */ /* Make sure we didn't miss any tick... */
if ((signed long)(get_ccount() - next) > 0) if ((signed long)(get_ccount() - next) > 0)
goto again; goto again;
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -83,7 +83,7 @@ typedef struct {
void* handler; void* handler;
} dispatch_init_table_t; } dispatch_init_table_t;
dispatch_init_table_t __init dispatch_init_table[] = { static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, { EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel }, { EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
@ -305,7 +305,7 @@ do_debug(struct pt_regs *regs)
#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler)) #define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
void trap_init(void) void __init trap_init(void)
{ {
int i; int i;

View File

@ -5,9 +5,5 @@
# removes any old dependencies. DON'T put your own dependencies here # removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file). # unless it's something special (ie not a .c file).
# #
# Note 2! The CFLAGS definition is now in the main makefile...
obj-y := init.o fault.o tlb.o misc.o obj-y := init.o fault.o tlb.o misc.o cache.o
obj-m :=
obj-n :=
obj- :=

256
arch/xtensa/mm/cache.c Normal file
View File

@ -0,0 +1,256 @@
/*
* arch/xtensa/mm/cache.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2006 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor
* Marc Gauthier
*
*/
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <asm/pgtable.h>
#include <asm/bootparam.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
//#define printd(x...) printk(x)
#define printd(x...) do { } while(0)
/*
* Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that
* can be used for cache coherency.
*
* I$-D$ coherency.
*
* The Xtensa architecture doesn't keep the instruction cache coherent with
* the data cache. We use the architecture bit to indicate if the caches
* are coherent. The kernel clears this bit whenever a page is added to the
* page cache. At that time, the caches might not be in sync. We, therefore,
* define this flag as 'clean' if set.
*
* D-cache aliasing.
*
* With cache aliasing, we have to always flush the cache when pages are
* unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
* page.
*
*
*
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
* Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called.
*
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
/*
* If we have a mapping but the page is not mapped to user-space
* yet, we simply mark this page dirty and defer flushing the
* caches until update_mmu().
*/
if (mapping && !mapping_mapped(mapping)) {
if (!test_bit(PG_arch_1, &page->flags))
set_bit(PG_arch_1, &page->flags);
return;
} else {
unsigned long phys = page_to_phys(page);
unsigned long temp = page->index << PAGE_SHIFT;
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
unsigned long virt;
/*
* Flush the page in kernel space and user space.
* Note that we can omit that step if aliasing is not
* an issue, but we do have to synchronize I$ and D$
* if we have a mapping.
*/
if (!alias && !mapping)
return;
__flush_invalidate_dcache_page((long)page_address(page));
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
if (alias)
__flush_invalidate_dcache_page_alias(virt, phys);
if (mapping)
__invalidate_icache_page_alias(virt, phys);
}
/* There shouldn't be an entry in the cache for this page anymore. */
}
/*
* For now, flush the whole cache. FIXME??
*/
void flush_cache_range(struct vm_area_struct* vma,
unsigned long start, unsigned long end)
{
__flush_invalidate_dcache_all();
__invalidate_icache_all();
}
/*
* Remove any entry in the cache for this page.
*
* Note that this function is only called for user pages, so use the
* alias versions of the cache flush functions.
*/
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn)
{
/* Note that we have to use the 'alias' address to avoid multi-hit */
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
}
#endif
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
/* Invalidate old entry in TLBs */
invalidate_itlb_mapping(addr);
invalidate_dtlb_mapping(addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page);
__flush_invalidate_dcache_page(paddr);
__flush_invalidate_dcache_page_alias(vaddr, phys);
__invalidate_icache_page_alias(vaddr, phys);
clear_bit(PG_arch_1, &page->flags);
}
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
unsigned long vaddr = addr & PAGE_MASK;
__flush_dcache_page(vaddr);
__invalidate_icache_page(vaddr);
set_bit(PG_arch_1, &page->flags);
}
#endif
}
/*
* access_process_vm() has called get_user_pages(), which has done a
* flush_dcache_page() on the page.
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
unsigned long phys = page_to_phys(page);
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
/* Flush and invalidate user page if aliased. */
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys);
}
/* Copy data */
memcpy(dst, src, len);
/*
* Flush and invalidate kernel page if aliased and synchronize
* data and instruction caches for executable pages.
*/
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0) {
__invalidate_icache_page_alias(temp, phys);
}
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
__invalidate_icache_range((unsigned long) dst, len);
}
}
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
unsigned long phys = page_to_phys(page);
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
/*
* Flush user page if aliased.
* (Note: a simply flush would be sufficient)
*/
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys);
}
memcpy(dst, src, len);
}
#endif

View File

@ -24,6 +24,8 @@
unsigned long asid_cache = ASID_USER_FIRST; unsigned long asid_cache = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int); void bad_page_fault(struct pt_regs*, unsigned long, int);
#undef DEBUG_PAGE_FAULT
/* /*
* This routine handles page faults. It determines the address, * This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate * and the problem, and then passes it off to one of the appropriate
@ -64,7 +66,7 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS || exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0 #ifdef DEBUG_PAGE_FAULT
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif #endif
@ -219,7 +221,7 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */ /* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) { if ((entry = search_exception_tables(regs->pc)) != NULL) {
#if 1 #ifdef DEBUG_PAGE_FAULT
printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
current->comm, regs->pc, entry->fixup); current->comm, regs->pc, entry->fixup);
#endif #endif

View File

@ -15,40 +15,24 @@
* Kevin Chea * Kevin Chea
*/ */
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h>
#define DEBUG 0
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
//static DEFINE_SPINLOCK(tlb_lock);
/*
* This flag is used to indicate that the page was mapped and modified in
* kernel space, so the cache is probably dirty at that address.
* If cache aliasing is enabled and the page color mismatches, update_mmu_cache
* synchronizes the caches if this bit is set.
*/
#define PG_cache_clean PG_arch_1
/* References to section boundaries */ /* References to section boundaries */
@ -323,228 +307,22 @@ void show_mem(void)
printk("%d free pages\n", free); printk("%d free pages\n", free);
} }
/* ------------------------------------------------------------------------- */ struct kmem_cache *pgtable_cache __read_mostly;
#if (DCACHE_WAY_SIZE > PAGE_SIZE) static void pgd_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
/*
* With cache aliasing, the page color of the page in kernel space and user
* space might mismatch. We temporarily map the page to a different virtual
* address with the same color and clear the page there.
*/
void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
{ {
pte_t* ptep = (pte_t*)addr;
int i;
/* There shouldn't be any entries for this page. */ for (i = 0; i < 1024; i++, ptep++)
pte_clear(NULL, 0, ptep);
__flush_invalidate_dcache_page_phys(__pa(page_address(page)));
if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
unsigned long v, p;
/* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
spin_lock(&tlb_lock);
p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
__asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
clear_page(kaddr);
spin_unlock(&tlb_lock);
} else {
clear_page(kaddr);
}
/* We need to make sure that i$ and d$ are coherent. */
clear_bit(PG_cache_clean, &page->flags);
} }
/* void __init pgtable_cache_init(void)
* With cache aliasing, we have to make sure that the page color of the page
* in kernel space matches that of the virtual user address before we read
* the page. If the page color differ, we create a temporary DTLB entry with
* the corrent page color and use this 'temporary' address as the source.
* We then use the same approach as in clear_user_page and copy the data
* to the kernel space and clear the PG_cache_clean bit to synchronize caches
* later.
*
* Note:
* Instead of using another 'way' for the temporary DTLB entry, we could
* probably use the same entry that points to the kernel address (after
* saving the original value and restoring it when we are done).
*/
void copy_user_page(void* to, void* from, unsigned long vaddr,
struct page* to_page)
{ {
/* There shouldn't be any entries for the new page. */ pgtable_cache = kmem_cache_create("pgd",
PAGE_SIZE, PAGE_SIZE,
__flush_invalidate_dcache_page_phys(__pa(page_address(to_page))); SLAB_HWCACHE_ALIGN,
pgd_ctor);
spin_lock(&tlb_lock);
if (!PAGE_COLOR_EQ(vaddr, from)) {
unsigned long v, p, t;
__asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
: "=a"(p), "=a"(t) : "a"(from));
from = (void*)PAGE_COLOR_MAP0(vaddr);
v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
}
if (!PAGE_COLOR_EQ(vaddr, to)) {
unsigned long v, p;
p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
to = (void*)PAGE_COLOR_MAP1(vaddr);
v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
}
copy_page(to, from);
spin_unlock(&tlb_lock);
/* We need to make sure that i$ and d$ are coherent. */
clear_bit(PG_cache_clean, &to_page->flags);
} }
/*
* Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called.
*
* Note:
* The kernel currently only provides one architecture bit in the page
* flags that we use for I$/D$ coherency. Maybe, in future, we can
* use a sepearte bit for deferred dcache aliasing:
* If the page is not mapped yet, we only need to set a flag,
* if mapped, we need to invalidate the page.
*/
// FIXME: we probably need this for WB caches not only for Page Coloring..
void flush_dcache_page(struct page *page)
{
unsigned long addr = __pa(page_address(page));
struct address_space *mapping = page_mapping(page);
__flush_invalidate_dcache_page_phys(addr);
if (!test_bit(PG_cache_clean, &page->flags))
return;
/* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
#if 0
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_cache_clean, &page->flags);
else
#endif
__invalidate_icache_page_phys(addr);
}
void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
unsigned long e)
{
__flush_invalidate_cache_all();
}
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn)
{
struct page *page = pfn_to_page(pfn);
/* Remove any entry for the old mapping. */
if (current->active_mm == vma->vm_mm) {
unsigned long addr = __pa(page_address(page));
__flush_invalidate_dcache_page_phys(addr);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_phys(addr);
} else {
BUG();
}
}
#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
{
pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (likely(pte)) {
pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(mm, addr, ptep);
}
return pte;
}
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
if (likely(page)) {
pte_t* ptep = kmap_atomic(page, KM_USER0);
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(mm, addr, ptep);
kunmap_atomic(ptep, KM_USER0);
}
return page;
}
/*
* Handle D$/I$ coherency.
*
* Note:
* We only have one architecture bit for the page flags, so we cannot handle
* cache aliasing, yet.
*/
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
unsigned long vaddr = addr & PAGE_MASK;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
invalidate_itlb_mapping(addr);
invalidate_dtlb_mapping(addr);
/* We have a new mapping. Use it. */
write_dtlb_entry(pte, dtlb_probe(addr));
/* If the processor can execute from this page, synchronize D$/I$. */
if ((vma->vm_flags & VM_EXEC) != 0) {
write_itlb_entry(pte, itlb_probe(addr));
/* Synchronize caches, if not clean. */
if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
__flush_dcache_page(vaddr);
__invalidate_icache_page(vaddr);
}
}
}

View File

@ -7,29 +7,33 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
* *
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
*/ */
/* Note: we might want to implement some of the loops as zero-overhead-loops,
* where applicable and if supported by the processor.
*/
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/cacheasm.h> #include <asm/cacheasm.h>
#include <asm/tlbflush.h>
/* clear_page (page) */
/*
* clear_page and clear_user_page are the same for non-cache-aliased configs.
*
* clear_page (unsigned long page)
* a2
*/
ENTRY(clear_page) ENTRY(clear_page)
entry a1, 16 entry a1, 16
addi a4, a2, PAGE_SIZE
movi a3, 0
1: s32i a3, a2, 0 movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32
s32i a3, a2, 0
s32i a3, a2, 4 s32i a3, a2, 4
s32i a3, a2, 8 s32i a3, a2, 8
s32i a3, a2, 12 s32i a3, a2, 12
@ -37,42 +41,277 @@ ENTRY(clear_page)
s32i a3, a2, 20 s32i a3, a2, 20
s32i a3, a2, 24 s32i a3, a2, 24
s32i a3, a2, 28 s32i a3, a2, 28
addi a2, a2, 32 __endla a2, a7, 32
blt a2, a4, 1b
retw retw
/* /*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
*
* copy_page (void *to, void *from) * copy_page (void *to, void *from)
* a2 a3 * a2 a3
*/ */
ENTRY(copy_page) ENTRY(copy_page)
entry a1, 16 entry a1, 16
addi a4, a2, PAGE_SIZE
1: l32i a5, a3, 0 __loopi a2, a4, PAGE_SIZE, 32
l32i a6, a3, 4
l32i a7, a3, 8 l32i a8, a3, 0
s32i a5, a2, 0 l32i a9, a3, 4
s32i a6, a2, 4 s32i a8, a2, 0
s32i a7, a2, 8 s32i a9, a2, 4
l32i a5, a3, 12
l32i a6, a3, 16 l32i a8, a3, 8
l32i a7, a3, 20 l32i a9, a3, 12
s32i a5, a2, 12 s32i a8, a2, 8
s32i a6, a2, 16 s32i a9, a2, 12
s32i a7, a2, 20
l32i a5, a3, 24 l32i a8, a3, 16
l32i a6, a3, 28 l32i a9, a3, 20
s32i a5, a2, 24 s32i a8, a2, 16
s32i a6, a2, 28 s32i a9, a2, 20
addi a2, a2, 32
addi a3, a3, 32 l32i a8, a3, 24
blt a2, a4, 1b l32i a9, a3, 28
s32i a8, a2, 24
s32i a9, a2, 28
addi a2, a2, 32
addi a3, a3, 32
__endl a2, a4
retw retw
/*
* If we have to deal with cache aliasing, we use temporary memory mappings
* to ensure that the source and destination pages have the same color as
* the virtual address. We use way 0 and 1 for temporary mappings in such cases.
*
* The temporary DTLB entries shouldn't be flushed by interrupts, but are
* flushed by preemptive task switches. Special code in the
* fast_second_level_miss handler re-established the temporary mapping.
* It requires that the PPNs for the destination and source addresses are
* in a6, and a7, respectively.
*/
/* TLB miss exceptions are treated special in the following region */
ENTRY(__tlbtemp_mapping_start)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
* clear_user_page (void *addr, unsigned long vaddr, struct page *page)
* a2 a3 a4
*/
ENTRY(clear_user_page)
entry a1, 32
/* Mark page dirty and determine alias. */
movi a7, (1 << PG_ARCH_1)
l32i a5, a4, PAGE_FLAGS
xor a6, a2, a3
extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
or a5, a5, a7
slli a3, a3, PAGE_SHIFT
s32i a5, a4, PAGE_FLAGS
/* Skip setting up a temporary DTLB if not aliased. */
beqz a6, 1f
/* Invalidate kernel page. */
mov a10, a2
call8 __invalidate_dcache_page
/* Setup a temporary DTLB with the color of the VPN */
movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
movi a5, TLBTEMP_BASE_1 # virt
add a6, a2, a4 # ppn
add a2, a5, a3 # add 'color'
wdtlb a6, a2
dsync
1: movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32
s32i a3, a2, 0
s32i a3, a2, 4
s32i a3, a2, 8
s32i a3, a2, 12
s32i a3, a2, 16
s32i a3, a2, 20
s32i a3, a2, 24
s32i a3, a2, 28
__endla a2, a7, 32
bnez a6, 1f
retw
/* We need to invalidate the temporary idtlb entry, if any. */
1: addi a2, a2, -PAGE_SIZE
idtlb a2
dsync
retw
/*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5
*/
ENTRY(copy_user_page)
entry a1, 32
/* Mark page dirty and determine alias for destination. */
movi a8, (1 << PG_ARCH_1)
l32i a9, a5, PAGE_FLAGS
xor a6, a2, a4
xor a7, a3, a4
extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
or a9, a9, a8
slli a4, a4, PAGE_SHIFT
s32i a9, a5, PAGE_FLAGS
movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
beqz a6, 1f
/* Invalidate dcache */
mov a10, a2
call8 __invalidate_dcache_page
/* Setup a temporary DTLB with a matching color. */
movi a8, TLBTEMP_BASE_1 # base
add a6, a2, a5 # ppn
add a2, a8, a4 # add 'color'
wdtlb a6, a2
dsync
/* Skip setting up a temporary DTLB for destination if not aliased. */
1: beqz a7, 1f
/* Setup a temporary DTLB with a matching color. */
movi a8, TLBTEMP_BASE_2 # base
add a7, a3, a5 # ppn
add a3, a8, a4
addi a8, a3, 1 # way1
wdtlb a7, a8
dsync
1: __loopi a2, a4, PAGE_SIZE, 32
l32i a8, a3, 0
l32i a9, a3, 4
s32i a8, a2, 0
s32i a9, a2, 4
l32i a8, a3, 8
l32i a9, a3, 12
s32i a8, a2, 8
s32i a9, a2, 12
l32i a8, a3, 16
l32i a9, a3, 20
s32i a8, a2, 16
s32i a9, a2, 20
l32i a8, a3, 24
l32i a9, a3, 28
s32i a8, a2, 24
s32i a9, a2, 28
addi a2, a2, 32
addi a3, a3, 32
__endl a2, a4
/* We need to invalidate any temporary mapping! */
bnez a6, 1f
bnez a7, 2f
retw
1: addi a2, a2, -PAGE_SIZE
idtlb a2
dsync
bnez a7, 2f
retw
2: addi a3, a3, -PAGE_SIZE+1
idtlb a3
dsync
retw
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
* void __flush_invalidate_dcache_page_alias (addr, phys)
* a2 a3
*/
ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16
movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
mov a4, a2
wdtlb a6, a2
dsync
___flush_invalidate_dcache_page a2 a3
idtlb a4
dsync
retw
#endif
ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias)
entry sp, 16
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
mov a4, a2
witlb a6, a2
isync
___invalidate_icache_page a2 a3
iitlb a4
isync
retw
#endif
/* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end)
/* /*
* void __invalidate_icache_page(ulong start) * void __invalidate_icache_page(ulong start)
*/ */
@ -121,8 +360,6 @@ ENTRY(__flush_dcache_page)
dsync dsync
retw retw
/* /*
* void __invalidate_icache_range(ulong start, ulong size) * void __invalidate_icache_range(ulong start, ulong size)
*/ */
@ -168,7 +405,6 @@ ENTRY(__invalidate_dcache_range)
___invalidate_dcache_range a2 a3 a4 ___invalidate_dcache_range a2 a3 a4
retw retw
/* /*

View File

@ -20,7 +20,6 @@
#include <linux/param.h> #include <linux/param.h>
#include <linux/serial.h> #include <linux/serial.h>
#include <linux/serialP.h> #include <linux/serialP.h>
#include <linux/console.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/irq.h> #include <asm/irq.h>

View File

@ -13,10 +13,6 @@
#ifndef _XTENSA_BUGS_H #ifndef _XTENSA_BUGS_H
#define _XTENSA_BUGS_H #define _XTENSA_BUGS_H
#include <asm/processor.h> static void check_bugs(void) { }
static void __init check_bugs(void)
{
}
#endif /* _XTENSA_BUGS_H */ #endif /* _XTENSA_BUGS_H */

View File

@ -19,6 +19,15 @@
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) #define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) #define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
/* Maximum cache size per way. */
#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
#else
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
#endif /* _XTENSA_CACHE_H */ #endif /* _XTENSA_CACHE_H */

View File

@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* (C) 2001 - 2006 Tensilica Inc. * (C) 2001 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_CACHEFLUSH_H #ifndef _XTENSA_CACHEFLUSH_H
@ -18,10 +18,7 @@
#include <asm/page.h> #include <asm/page.h>
/* /*
* flush and invalidate data cache, invalidate instruction cache: * Lo-level routines for cache flushing.
*
* __flush_invalidate_cache_all()
* __flush_invalidate_cache_range(from,sze)
* *
* invalidate data or instruction cache: * invalidate data or instruction cache:
* *
@ -40,26 +37,39 @@
* __flush_invalidate_dcache_all() * __flush_invalidate_dcache_all()
* __flush_invalidate_dcache_page(adr) * __flush_invalidate_dcache_page(adr)
* __flush_invalidate_dcache_range(from,size) * __flush_invalidate_dcache_range(from,size)
*
* specials for cache aliasing:
*
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_icache_page_alias(vaddr,paddr)
*/ */
extern void __flush_invalidate_cache_all(void); extern void __invalidate_dcache_all(void);
extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_all(void);
extern void __invalidate_icache_all(void); extern void __invalidate_icache_all(void);
extern void __invalidate_dcache_page(unsigned long); extern void __invalidate_dcache_page(unsigned long);
extern void __invalidate_icache_page(unsigned long); extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long); extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long); extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK #if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long); extern void __flush_dcache_page(unsigned long);
extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long); extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else #else
# define __flush_dcache_page(p) do { } while(0) # define __flush_dcache_range(p,s) do { } while(0)
# define __flush_invalidate_dcache_page(p) do { } while(0) # define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_range(p,s) do { } while(0) # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
#endif
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#endif #endif
/* /*
@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
* (see also Documentation/cachetlb.txt) * (see also Documentation/cachetlb.txt)
*/ */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE)
#define flush_cache_all() __flush_invalidate_cache_all(); #define flush_cache_all() \
#define flush_cache_mm(mm) __flush_invalidate_cache_all(); do { \
#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); __flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
} while (0)
#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); #define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all()
extern void flush_dcache_page(struct page*); extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
#endif #endif
/* Ensure consistency between data and instruction cache. */
#define flush_icache_range(start,end) \ #define flush_icache_range(start,end) \
__invalidate_icache_range(start,(end)-(start)) do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
/* This is not required, see Documentation/cachetlb.txt */ /* This is not required, see Documentation/cachetlb.txt */
#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_page(vma,page) do { } while(0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ extern void copy_to_user_page(struct vm_area_struct*, struct page*,
memcpy(dst, src, len) unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
__flush_dcache_range((unsigned long) dst, len); \
__invalidate_icache_range((unsigned long) dst, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) memcpy(dst, src, len)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */ #endif /* _XTENSA_CACHEFLUSH_H */

View File

@ -20,6 +20,56 @@
#define EM_XTENSA 94 #define EM_XTENSA 94
#define EM_XTENSA_OLD 0xABC7 #define EM_XTENSA_OLD 0xABC7
/* Xtensa relocations defined by the ABIs */
#define R_XTENSA_NONE 0
#define R_XTENSA_32 1
#define R_XTENSA_RTLD 2
#define R_XTENSA_GLOB_DAT 3
#define R_XTENSA_JMP_SLOT 4
#define R_XTENSA_RELATIVE 5
#define R_XTENSA_PLT 6
#define R_XTENSA_OP0 8
#define R_XTENSA_OP1 9
#define R_XTENSA_OP2 10
#define R_XTENSA_ASM_EXPAND 11
#define R_XTENSA_ASM_SIMPLIFY 12
#define R_XTENSA_GNU_VTINHERIT 15
#define R_XTENSA_GNU_VTENTRY 16
#define R_XTENSA_DIFF8 17
#define R_XTENSA_DIFF16 18
#define R_XTENSA_DIFF32 19
#define R_XTENSA_SLOT0_OP 20
#define R_XTENSA_SLOT1_OP 21
#define R_XTENSA_SLOT2_OP 22
#define R_XTENSA_SLOT3_OP 23
#define R_XTENSA_SLOT4_OP 24
#define R_XTENSA_SLOT5_OP 25
#define R_XTENSA_SLOT6_OP 26
#define R_XTENSA_SLOT7_OP 27
#define R_XTENSA_SLOT8_OP 28
#define R_XTENSA_SLOT9_OP 29
#define R_XTENSA_SLOT10_OP 30
#define R_XTENSA_SLOT11_OP 31
#define R_XTENSA_SLOT12_OP 32
#define R_XTENSA_SLOT13_OP 33
#define R_XTENSA_SLOT14_OP 34
#define R_XTENSA_SLOT0_ALT 35
#define R_XTENSA_SLOT1_ALT 36
#define R_XTENSA_SLOT2_ALT 37
#define R_XTENSA_SLOT3_ALT 38
#define R_XTENSA_SLOT4_ALT 39
#define R_XTENSA_SLOT5_ALT 40
#define R_XTENSA_SLOT6_ALT 41
#define R_XTENSA_SLOT7_ALT 42
#define R_XTENSA_SLOT8_ALT 43
#define R_XTENSA_SLOT9_ALT 44
#define R_XTENSA_SLOT10_ALT 45
#define R_XTENSA_SLOT11_ALT 46
#define R_XTENSA_SLOT12_ALT 47
#define R_XTENSA_SLOT13_ALT 48
#define R_XTENSA_SLOT14_ALT 49
/* ELF register definitions. This is needed for core dump support. */ /* ELF register definitions. This is needed for core dump support. */
/* /*

View File

@ -14,6 +14,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>

View File

@ -91,6 +91,10 @@
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */ #define TIOCSBRK _IO('T', 39) /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
#define TCGETS2 _IOR('T', 42, struct termios2)
#define TCSETS2 _IOW('T', 43, struct termios2)
#define TCSETSW2 _IOW('T', 44, struct termios2)
#define TCSETSF2 _IOW('T', 45, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */

View File

@ -1,11 +1,11 @@
/* /*
* linux/include/asm-xtensa/page.h * include/asm-xtensa/page.h
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as * it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_PAGE_H #ifndef _XTENSA_PAGE_H
@ -14,6 +14,12 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
/*
* Fixed TLB translations in the processor.
*/
#define XCHAL_KSEG_CACHED_VADDR 0xd0000000 #define XCHAL_KSEG_CACHED_VADDR 0xd0000000
#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 #define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
@ -26,13 +32,60 @@
*/ */
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT) #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE #define MAX_MEM_PFN XCHAL_KSEG_SIZE
#define PGTABLE_START 0x80000000 #define PGTABLE_START 0x80000000
/*
* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* | |cache| cache index
* | pfn |off| virtual address
* |xxxx:X|zzz|
* | : | |
* | \ / | |
* |trans.| |
* | / \ | |
* |yyyy:Y|zzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are part of the cache index are also translated (Y).
* If this translation changes bit(s) (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if DCACHE_WAY_SIZE > PAGE_SIZE
# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
#else
# define DCACHE_ALIAS_ORDER 0
#endif
#if ICACHE_WAY_SIZE > PAGE_SIZE
# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
#else
# define ICACHE_ALIAS_ORDER 0
#endif
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
@ -58,34 +111,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/* /*
* Pure 2^n version of get_order * Pure 2^n version of get_order
* Use 'nsau' instructions if supported by the processor or the generic version.
*/ */
static inline int get_order(unsigned long size) #if XCHAL_HAVE_NSA
static inline __attribute_const__ int get_order(unsigned long size)
{ {
int order; int lz;
#ifndef XCHAL_HAVE_NSU asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
unsigned long x1, x2, x4, x8, x16; return 32 - lz;
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
x1 = size & 0xAAAAAAAA;
x2 = size & 0xCCCCCCCC;
x4 = size & 0xF0F0F0F0;
x8 = size & 0xFF00FF00;
x16 = size & 0xFFFF0000;
order = x2 ? 2 : 0;
order += (x16 != 0) * 16;
order += (x8 != 0) * 8;
order += (x4 != 0) * 4;
order += (x1 != 0);
return order;
#else
size = (size - 1) >> PAGE_SHIFT;
asm ("nsau %0, %1" : "=r" (order) : "r" (size));
return 32 - order;
#endif
} }
#else
# include <asm-generic/page.h>
#endif
struct page; struct page;
extern void clear_page(void *page); extern void clear_page(void *page);
@ -96,11 +138,11 @@ extern void copy_page(void *to, void *from);
* some extra work * some extra work
*/ */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if DCACHE_WAY_SIZE > PAGE_SIZE
void clear_user_page(void *addr, unsigned long vaddr, struct page* page); extern void clear_user_page(void*, unsigned long, struct page*);
void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); extern void copy_user_page(void*, void*, unsigned long, struct page*);
#else #else
# define clear_user_page(page,vaddr,pg) clear_page(page) # define clear_user_page(page, vaddr, pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif #endif

View File

@ -1,11 +1,11 @@
/* /*
* linux/include/asm-xtensa/pgalloc.h * include/asm-xtensa/pgalloc.h
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* Copyright (C) 2001-2005 Tensilica Inc. * Copyright (C) 2001-2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_PGALLOC_H #ifndef _XTENSA_PGALLOC_H
@ -13,103 +13,54 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/threads.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
/* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* |cache |
* |pgnum |page| virtual address
* |xxxxxX|zzzz|
* | | |
* \ / | |
* trans.| |
* / \ | |
* |yyyyyY|zzzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are also part of the cache index are also translated (Y).
* If this translation changes this bit (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
# define PAGE_COLOR(a) \
(((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
# define PAGE_COLOR_EQ(a,b) \
((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
# define PAGE_COLOR_MAP0(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
# define PAGE_COLOR_MAP1(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
#endif
/* /*
* Allocating and freeing a pmd is trivial: the 1-entry pmd is * Allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it. * inside the pgd, so has no extra memory associated with it.
*/ */
#define pgd_free(pgd) free_page((unsigned long)(pgd)) #define pmd_populate_kernel(mm, pmdp, ptep) \
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte)
{
pmd_val(*(pmdp)) = (unsigned long)(pte);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
{
pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
}
#else
# define pmd_populate_kernel(mm, pmdp, pte) \
(pmd_val(*(pmdp)) = (unsigned long)(pte))
# define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
#endif
static inline pgd_t* static inline pgd_t*
pgd_alloc(struct mm_struct *mm) pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd; return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
if (likely(pgd != NULL))
__flush_dcache_page((unsigned long)pgd);
return pgd;
} }
extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); static inline void pgd_free(pgd_t *pgd)
extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); {
free_page((unsigned long)pgd);
}
#define pte_free_kernel(pte) free_page((unsigned long)pte) /* Use a slab cache for the pte pages (see also sparc64 implementation) */
#define pte_free(pte) __free_page(pte)
extern struct kmem_cache *pgtable_cache;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
{
return virt_to_page(pte_alloc_one_kernel(mm, addr));
}
static inline void pte_free_kernel(pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
}
static inline void pte_free(struct page *page)
{
kmem_cache_free(pgtable_cache, page_address(page));
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */ #endif /* _XTENSA_PGALLOC_H */

View File

@ -1,11 +1,11 @@
/* /*
* linux/include/asm-xtensa/pgtable.h * include/asm-xtensa/pgtable.h
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_PGTABLE_H #ifndef _XTENSA_PGTABLE_H
@ -23,7 +23,7 @@
/* /*
* The Xtensa architecture port of Linux has a two-level page table system, * The Xtensa architecture port of Linux has a two-level page table system,
* i.e. the logical three-level Linux page table layout are folded. * i.e. the logical three-level Linux page table layout is folded.
* Each task has the following memory page tables: * Each task has the following memory page tables:
* *
* PGD table (page directory), ie. 3rd-level page table: * PGD table (page directory), ie. 3rd-level page table:
@ -43,6 +43,7 @@
* *
* The individual pages are 4 kB big with special pages for the empty_zero_page. * The individual pages are 4 kB big with special pages for the empty_zero_page.
*/ */
#define PGDIR_SHIFT 22 #define PGDIR_SHIFT 22
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
@ -53,24 +54,26 @@
*/ */
#define PTRS_PER_PTE 1024 #define PTRS_PER_PTE 1024
#define PTRS_PER_PTE_SHIFT 10 #define PTRS_PER_PTE_SHIFT 10
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024 #define PTRS_PER_PGD 1024
#define PGD_ORDER 0 #define PGD_ORDER 0
#define PMD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0 #define FIRST_USER_ADDRESS 0
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/* virtual memory area. We keep a distance to other memory regions to be /*
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing. * on the safe side. We also use this area for cache aliasing.
*/ */
// FIXME: virtual memory area must be configuration-dependent
#define VMALLOC_START 0xC0000000 #define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC7FF0000 #define VMALLOC_END 0xC6FEFFFF
#define TLBTEMP_BASE_1 0xC6FF0000
#define TLBTEMP_BASE_2 0xC6FF8000
#define MODULE_START 0xC7000000
#define MODULE_END 0xC7FFFFFF
/* Xtensa Linux config PTE layout (when present): /*
* Xtensa Linux config PTE layout (when present):
* 31-12: PPN * 31-12: PPN
* 11-6: Software * 11-6: Software
* 5-4: RING * 5-4: RING
@ -86,47 +89,55 @@
* See further below for PTE layout for swapped-out pages. * See further below for PTE layout for swapped-out pages.
*/ */
#define _PAGE_VALID (1<<0) /* hardware: page is accessible */ #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
/* None of these cache modes include MP coherency: */ /* None of these cache modes include MP coherency: */
#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
#if XCHAL_DCACHE_IS_WRITEBACK #define _PAGE_CA_WB (1<<2) /* write-back */
# define _PAGE_WRITEBACK (1<<2) /* write back */ #define _PAGE_CA_WT (2<<2) /* write-through */
# define _PAGE_WRITETHRU (2<<2) /* write through */ #define _PAGE_CA_MASK (3<<2)
#else #define _PAGE_INVALID (3<<2)
# define _PAGE_WRITEBACK (1<<2) /* assume write through */
# define _PAGE_WRITETHRU (1<<2)
#endif
#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */
#define _CACHE_MASK (3<<2)
#define _PAGE_USER (1<<4) /* user access (ring=1) */ #define _PAGE_USER (1<<4) /* user access (ring=1) */
#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */
/* Software */ /* Software */
#define _PAGE_RW (1<<6) /* software: page writable */ #define _PAGE_WRITABLE_BIT 6
#define _PAGE_WRITABLE (1<<6) /* software: page writable */
#define _PAGE_DIRTY (1<<7) /* software: page dirty */ #define _PAGE_DIRTY (1<<7) /* software: page dirty */
#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) /* On older HW revisions, we always have to set bit 0 */
#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) #if XCHAL_HW_VERSION_MAJOR < 2000
# define _PAGE_VALID (1<<0)
#else
# define _PAGE_VALID 0
#endif
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
# define PAGE_NONE __pgprot(_PAGE_PRESENT) #define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
# define PAGE_INVALID __pgprot(_PAGE_USER) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
# if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
# else #else
# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
# endif #endif
#else /* no mmu */ #else /* no mmu */
@ -145,23 +156,23 @@
* What follows is the closest we can get by reasonable means.. * What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions. * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/ */
#define __P000 PAGE_NONE /* private --- */ #define __P000 PAGE_NONE /* private --- */
#define __P001 PAGE_READONLY /* private --r */ #define __P001 PAGE_READONLY /* private --r */
#define __P010 PAGE_COPY /* private -w- */ #define __P010 PAGE_COPY /* private -w- */
#define __P011 PAGE_COPY /* private -wr */ #define __P011 PAGE_COPY /* private -wr */
#define __P100 PAGE_READONLY /* private x-- */ #define __P100 PAGE_READONLY_EXEC /* private x-- */
#define __P101 PAGE_READONLY /* private x-r */ #define __P101 PAGE_READONLY_EXEC /* private x-r */
#define __P110 PAGE_COPY /* private xw- */ #define __P110 PAGE_COPY_EXEC /* private xw- */
#define __P111 PAGE_COPY /* private xwr */ #define __P111 PAGE_COPY_EXEC /* private xwr */
#define __S000 PAGE_NONE /* shared --- */ #define __S000 PAGE_NONE /* shared --- */
#define __S001 PAGE_READONLY /* shared --r */ #define __S001 PAGE_READONLY /* shared --r */
#define __S010 PAGE_SHARED /* shared -w- */ #define __S010 PAGE_SHARED /* shared -w- */
#define __S011 PAGE_SHARED /* shared -wr */ #define __S011 PAGE_SHARED /* shared -wr */
#define __S100 PAGE_READONLY /* shared x-- */ #define __S100 PAGE_READONLY_EXEC /* shared x-- */
#define __S101 PAGE_READONLY /* shared x-r */ #define __S101 PAGE_READONLY_EXEC /* shared x-r */
#define __S110 PAGE_SHARED /* shared xw- */ #define __S110 PAGE_SHARED_EXEC /* shared xw- */
#define __S111 PAGE_SHARED /* shared xwr */ #define __S111 PAGE_SHARED_EXEC /* shared xwr */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
@ -183,35 +194,42 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
#define pmd_page(pmd) virt_to_page(pmd_val(pmd)) #define pmd_page(pmd) virt_to_page(pmd_val(pmd))
/* /*
* The following only work if pte_present() is true. * pte status.
*/ */
#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) #define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
#define pte_present(pte) (pte_val(pte) & _PAGE_VALID) #define pte_present(pte) \
(((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
|| ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
#define pte_clear(mm,addr,ptep) \ #define pte_clear(mm,addr,ptep) \
do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkdirty(pte_t pte)
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_same(a,b) (pte_val(a) == pte_val(b)) #define pte_same(a,b) (pte_val(a) == pte_val(b))
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
@ -232,8 +250,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; *ptep = pteval;
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
#endif #endif
} }
struct mm_struct; struct mm_struct;
@ -249,9 +268,6 @@ static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval) set_pmd(pmd_t *pmdp, pmd_t pmdval)
{ {
*pmdp = pmdval; *pmdp = pmdval;
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
#endif
} }
struct vm_area_struct; struct vm_area_struct;
@ -306,52 +322,34 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/* /*
* Encode and decode a swap entry. * Encode and decode a swap entry.
* Each PTE in a process VM's page table is either:
* "present" -- valid and not swapped out, protection bits are meaningful;
* "not present" -- which further subdivides in these two cases:
* "none" -- no mapping at all; identified by pte_none(), set by pte_clear(
* "swapped out" -- the page is swapped out, and the SWP macros below
* are used to store swap file info in the PTE itself.
* *
* In the Xtensa processor MMU, any PTE entries in user space (or anywhere * Format of swap pte:
* in virtual memory that can map differently across address spaces) * bit 0 MBZ
* must have a correct ring value that represents the RASID field that * bit 1 page-file (must be zero)
* is changed when switching address spaces. Eg. such PTE entries cannot * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
* be set to ring zero, because that can cause a (global) kernel ASID * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
* entry to be created in the TLBs (even with invalid cache attribute), * bits 6 - 10 swap type (5 bits -> 32 types)
* potentially causing a multihit exception when going back to another * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
* address space that mapped the same virtual address at another ring.
* * Format of file pte:
* SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. * bit 0 MBZ
* We also avoid using the _PAGE_VALID bit which must be zero for non-present * bit 1 page-file (must be one: _PAGE_FILE)
* pages. * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
* * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
* We end up with the following available bits: 1..3 and 7..31. * bits 6 - 31 file offset / PAGE_SIZE
* We don't bother with 1..3 for now (we can use them later if needed),
* and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits
* for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it
* is currently implemented as an index into swap_info[MAX_SWAPFILES]
* and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>.
* However, for some reason all other architectures in the 2.4 kernel
* reserve either 6, 7, or 8 bits so I'll not detract from that for now. :)
* SWP_OFFSET is an offset into the swap file in page-size units, so
* with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB.
*
* FIXME: 2 GB isn't very big. Other bits can be used to allow
* larger swap sizes. In the meantime, it appears relatively easy to get
* around the 2 GB limitation by simply using multiple swap files.
*/ */
#define __swp_type(entry) (((entry).val >> 7) & 0x3f) #define __swp_type(entry) (((entry).val >> 6) & 0x1f)
#define __swp_offset(entry) ((entry).val >> 13) #define __swp_offset(entry) ((entry).val >> 11)
#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) #define __swp_entry(type,offs) \
((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 29 #define PTE_FILE_MAX_BITS 28
#define pte_to_pgoff(pte) (pte_val(pte) >> 3) #define pte_to_pgoff(pte) (pte_val(pte) >> 4)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) #define pgoff_to_pte(off) \
((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
#endif /* !defined (__ASSEMBLY__) */ #endif /* !defined (__ASSEMBLY__) */
@ -394,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
* remap a physical page `pfn' of size `size' with page protection `prot' * remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from' * into virtual address `from'
*/ */
#define io_remap_pfn_range(vma,from,pfn,size,prot) \ #define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot) remap_pfn_range(vma, from, pfn, size, prot)
/* No page table caches to init */ extern void pgtable_cache_init(void);
#define pgtable_cache_init() do { } while (0)
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;

View File

@ -33,7 +33,7 @@
* the 1 GB requirement applies to the stack as well. * the 1 GB requirement applies to the stack as well.
*/ */
#define TASK_SIZE 0x40000000 #define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
/* /*
* General exception cause assigned to debug exceptions. Debug exceptions go * General exception cause assigned to debug exceptions. Debug exceptions go

View File

@ -1,3 +1,13 @@
/*
* include/asm-xtensa/syscall.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
struct pt_regs; struct pt_regs;
struct sigaction; struct sigaction;
asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
@ -17,4 +27,16 @@ asmlinkage long sys_rt_sigaction(int,
const struct sigaction __user *, const struct sigaction __user *,
struct sigaction __user *, struct sigaction __user *,
size_t); size_t);
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg); asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
/* Should probably move to linux/syscalls.h */
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp, const sigset_t __user *sigmask,
size_t sigsetsize);

View File

@ -157,6 +157,7 @@ struct ktermios {
#define HUPCL 0002000 #define HUPCL 0002000
#define CLOCAL 0004000 #define CLOCAL 0004000
#define CBAUDEX 0010000 #define CBAUDEX 0010000
#define BOTHER 0010000
#define B57600 0010001 #define B57600 0010001
#define B115200 0010002 #define B115200 0010002
#define B230400 0010003 #define B230400 0010003
@ -172,10 +173,12 @@ struct ktermios {
#define B3000000 0010015 #define B3000000 0010015
#define B3500000 0010016 #define B3500000 0010016
#define B4000000 0010017 #define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate (not used) */ #define CIBAUD 002003600000 /* input baud rate */
#define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */ #define CRTSCTS 020000000000 /* flow control */
#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */ /* c_lflag bits */
#define ISIG 0000001 #define ISIG 0000001

View File

@ -95,8 +95,10 @@ struct termio {
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
}) })
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -41,10 +41,10 @@
extern unsigned long ccount_per_jiffy; extern unsigned long ccount_per_jiffy;
extern unsigned long ccount_nsec; extern unsigned long ccount_nsec;
#define CCOUNT_PER_JIFFY ccount_per_jiffy #define CCOUNT_PER_JIFFY ccount_per_jiffy
#define CCOUNT_NSEC ccount_nsec #define NSEC_PER_CCOUNT ccount_nsec
#else #else
#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK) #define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
#endif #endif

View File

@ -11,14 +11,36 @@
#ifndef _XTENSA_TLB_H #ifndef _XTENSA_TLB_H
#define _XTENSA_TLB_H #define _XTENSA_TLB_H
#define tlb_start_vma(tlb,vma) do { } while (0) #include <asm/cache.h>
#define tlb_end_vma(tlb,vma) do { } while (0) #include <asm/page.h>
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
# define tlb_start_vma(tlb,vma) do { } while (0)
# define tlb_end_vma(tlb,vma) do { } while (0)
#else
# define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
# define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#include <asm/page.h>
#define __pte_free_tlb(tlb,pte) pte_free(pte) #define __pte_free_tlb(tlb,pte) pte_free(pte)

View File

@ -11,6 +11,15 @@
#ifndef _XTENSA_TYPES_H #ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H #define _XTENSA_TYPES_H
#ifdef __ASSEMBLY__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef unsigned short umode_t; typedef unsigned short umode_t;

View File

@ -151,7 +151,7 @@ __SYSCALL( 61, sys_fcntl64, 3)
#define __NR_available62 62 #define __NR_available62 62
__SYSCALL( 62, sys_ni_syscall, 0) __SYSCALL( 62, sys_ni_syscall, 0)
#define __NR_fadvise64_64 63 #define __NR_fadvise64_64 63
__SYSCALL( 63, sys_fadvise64_64, 6) __SYSCALL( 63, xtensa_fadvise64_64, 6)
#define __NR_utime 64 /* glibc 2.3.3 ?? */ #define __NR_utime 64 /* glibc 2.3.3 ?? */
__SYSCALL( 64, sys_utime, 2) __SYSCALL( 64, sys_utime, 2)
#define __NR_utimes 65 #define __NR_utimes 65
@ -339,8 +339,8 @@ __SYSCALL(148, sys_setpgid, 2)
__SYSCALL(149, sys_getpgid, 1) __SYSCALL(149, sys_getpgid, 1)
#define __NR_getppid 150 #define __NR_getppid 150
__SYSCALL(150, sys_getppid, 0) __SYSCALL(150, sys_getppid, 0)
#define __NR_available151 151 #define __NR_getpgrp 151
__SYSCALL(151, sys_ni_syscall, 0) __SYSCALL(151, sys_getpgrp, 0)
#define __NR_reserved152 152 /* set_thread_area */ #define __NR_reserved152 152 /* set_thread_area */
__SYSCALL(152, sys_ni_syscall, 0) __SYSCALL(152, sys_ni_syscall, 0)
@ -577,7 +577,112 @@ __SYSCALL(258, sys_keyctl, 5)
#define __NR_available259 259 #define __NR_available259 259
__SYSCALL(259, sys_ni_syscall, 0) __SYSCALL(259, sys_ni_syscall, 0)
#define __NR_syscall_count 261
#define __NR_readahead 260
__SYSCALL(260, sys_readahead, 5)
#define __NR_remap_file_pages 261
__SYSCALL(261, sys_remap_file_pages, 5)
#define __NR_migrate_pages 262
__SYSCALL(262, sys_migrate_pages, 0)
#define __NR_mbind 263
__SYSCALL(263, sys_mbind, 6)
#define __NR_get_mempolicy 264
__SYSCALL(264, sys_get_mempolicy, 5)
#define __NR_set_mempolicy 265
__SYSCALL(265, sys_set_mempolicy, 3)
#define __NR_unshare 266
__SYSCALL(266, sys_unshare, 1)
#define __NR_move_pages 267
__SYSCALL(267, sys_move_pages, 0)
#define __NR_splice 268
__SYSCALL(268, sys_splice, 0)
#define __NR_tee 269
__SYSCALL(269, sys_tee, 0)
#define __NR_vmsplice 270
__SYSCALL(270, sys_vmsplice, 0)
#define __NR_available271 271
__SYSCALL(271, sys_ni_syscall, 0)
#define __NR_pselect6 272
__SYSCALL(272, sys_pselect6, 0)
#define __NR_ppoll 273
__SYSCALL(273, sys_ppoll, 0)
#define __NR_epoll_pwait 274
__SYSCALL(274, sys_epoll_pwait, 0)
#define __NR_available275 275
__SYSCALL(275, sys_ni_syscall, 0)
#define __NR_inotify_init 276
__SYSCALL(276, sys_inotify_init, 0)
#define __NR_inotify_add_watch 277
__SYSCALL(277, sys_inotify_add_watch, 3)
#define __NR_inotify_rm_watch 278
__SYSCALL(278, sys_inotify_rm_watch, 2)
#define __NR_available279 279
__SYSCALL(279, sys_ni_syscall, 0)
#define __NR_getcpu 280
__SYSCALL(280, sys_getcpu, 0)
#define __NR_kexec_load 281
__SYSCALL(281, sys_ni_syscall, 0)
#define __NR_ioprio_set 282
__SYSCALL(282, sys_ioprio_set, 2)
#define __NR_ioprio_get 283
__SYSCALL(283, sys_ioprio_get, 3)
#define __NR_set_robust_list 284
__SYSCALL(284, sys_set_robust_list, 3)
#define __NR_get_robust_list 285
__SYSCALL(285, sys_get_robust_list, 3)
#define __NR_reserved286 286 /* sync_file_rangeX */
__SYSCALL(286, sys_ni_syscall, 3)
#define __NR_available287 287
__SYSCALL(287, sys_faccessat, 0)
/* Relative File Operations */
#define __NR_openat 288
__SYSCALL(288, sys_openat, 4)
#define __NR_mkdirat 289
__SYSCALL(289, sys_mkdirat, 3)
#define __NR_mknodat 290
__SYSCALL(290, sys_mknodat, 4)
#define __NR_unlinkat 291
__SYSCALL(291, sys_unlinkat, 3)
#define __NR_renameat 292
__SYSCALL(292, sys_renameat, 4)
#define __NR_linkat 293
__SYSCALL(293, sys_linkat, 5)
#define __NR_symlinkat 294
__SYSCALL(294, sys_symlinkat, 3)
#define __NR_readlinkat 295
__SYSCALL(295, sys_readlinkat, 4)
#define __NR_utimensat 296
__SYSCALL(296, sys_utimensat, 0)
#define __NR_fchownat 297
__SYSCALL(297, sys_fchownat, 5)
#define __NR_futimesat 298
__SYSCALL(298, sys_futimesat, 4)
#define __NR_fstatat64 299
__SYSCALL(299, sys_fstatat64, 0)
#define __NR_fchmodat 300
__SYSCALL(300, sys_fchmodat, 4)
#define __NR_faccessat 301
__SYSCALL(301, sys_faccessat, 4)
#define __NR_available302 302
__SYSCALL(302, sys_ni_syscall, 0)
#define __NR_available303 303
__SYSCALL(303, sys_ni_syscall, 0)
#define __NR_signalfd 304
__SYSCALL(304, sys_signalfd, 3)
#define __NR_timerfd 305
__SYSCALL(305, sys_timerfd, 4)
#define __NR_eventfd 306
__SYSCALL(306, sys_eventfd, 1)
#define __NR_syscall_count 307
/* /*
* sysxtensa syscall handler * sysxtensa syscall handler
@ -612,8 +717,19 @@ __SYSCALL(259, sys_ni_syscall, 0)
#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_GETPGRP
/*
* Ignore legacy system calls in the checksyscalls.sh script
*/
#define __IGNORE_fork /* use clone */
#define __IGNORE_time
#define __IGNORE_alarm /* use setitimer */
#define __IGNORE_pause
#define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_UNISTD_H */ #endif /* _XTENSA_UNISTD_H */