arch: remove tile port
The Tile architecture port was added by Chris Metcalf in 2010, and maintained until early 2018 when he orphaned it due to his departure from Mellanox, and nobody else stepped up to maintain it. The product line is still around in the form of the BlueField SoC, but no longer uses the Tile architecture. There are also still products for sale with Tile-GX SoCs, notably the Mikrotik CCR router family. The products all use old (linux-3.3) kernels with lots of patches and won't be upgraded by their manufacturers. There have been efforts to port both OpenWRT and Debian to these, but both projects have stalled and are very unlikely to be continued in the future. Given that we are reasonably sure that nobody is still using the port with an upstream kernel any more, it seems better to remove it now while the port is in a good shape than to let it bitrot for a few years first. Cc: Chris Metcalf <chris.d.metcalf@gmail.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Link: http://www.mellanox.com/page/npu_multicore_overview Link: https://jenkins.debian.net/view/rebootstrap/job/rebootstrap_tilegx_gcc7/ Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
parent
4ba66a9760
commit
bb9d812643
|
@ -305,7 +305,6 @@ Code Seq#(hex) Include File Comments
|
||||||
0xA0 all linux/sdp/sdp.h Industrial Device Project
|
0xA0 all linux/sdp/sdp.h Industrial Device Project
|
||||||
<mailto:kenji@bitgate.com>
|
<mailto:kenji@bitgate.com>
|
||||||
0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
|
0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
|
||||||
0xA2 00-0F arch/tile/include/asm/hardwall.h
|
|
||||||
0xA3 80-8F Port ACL in development:
|
0xA3 80-8F Port ACL in development:
|
||||||
<mailto:tlewis@mindspring.com>
|
<mailto:tlewis@mindspring.com>
|
||||||
0xA3 90-9F linux/dtlk.h
|
0xA3 90-9F linux/dtlk.h
|
||||||
|
|
13
MAINTAINERS
13
MAINTAINERS
|
@ -13840,19 +13840,6 @@ S: Orphan
|
||||||
F: drivers/net/wireless/ti/
|
F: drivers/net/wireless/ti/
|
||||||
F: include/linux/wl12xx.h
|
F: include/linux/wl12xx.h
|
||||||
|
|
||||||
TILE ARCHITECTURE
|
|
||||||
W: http://www.mellanox.com/repository/solutions/tile-scm/
|
|
||||||
S: Orphan
|
|
||||||
F: arch/tile/
|
|
||||||
F: drivers/char/tile-srom.c
|
|
||||||
F: drivers/edac/tile_edac.c
|
|
||||||
F: drivers/net/ethernet/tile/
|
|
||||||
F: drivers/rtc/rtc-tile.c
|
|
||||||
F: drivers/tty/hvc/hvc_tile.c
|
|
||||||
F: drivers/tty/serial/tilegx.c
|
|
||||||
F: drivers/usb/host/*-tilegx.c
|
|
||||||
F: include/linux/usb/tilegx.h
|
|
||||||
|
|
||||||
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
|
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
|
||||||
M: John Stultz <john.stultz@linaro.org>
|
M: John Stultz <john.stultz@linaro.org>
|
||||||
M: Thomas Gleixner <tglx@linutronix.de>
|
M: Thomas Gleixner <tglx@linutronix.de>
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -339,14 +339,6 @@ ifeq ($(ARCH),sh64)
|
||||||
SRCARCH := sh
|
SRCARCH := sh
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Additional ARCH settings for tile
|
|
||||||
ifeq ($(ARCH),tilepro)
|
|
||||||
SRCARCH := tile
|
|
||||||
endif
|
|
||||||
ifeq ($(ARCH),tilegx)
|
|
||||||
SRCARCH := tile
|
|
||||||
endif
|
|
||||||
|
|
||||||
KCONFIG_CONFIG ?= .config
|
KCONFIG_CONFIG ?= .config
|
||||||
export KCONFIG_CONFIG
|
export KCONFIG_CONFIG
|
||||||
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
obj-y += kernel/
|
|
||||||
obj-y += mm/
|
|
|
@ -1,481 +0,0 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
# For a description of the syntax of this configuration file,
|
|
||||||
# see Documentation/kbuild/kconfig-language.txt.
|
|
||||||
|
|
||||||
config TILE
|
|
||||||
def_bool y
|
|
||||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
|
||||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
|
||||||
select ARCH_WANT_FRAME_POINTERS
|
|
||||||
select CC_OPTIMIZE_FOR_SIZE
|
|
||||||
select EDAC_SUPPORT
|
|
||||||
select GENERIC_CLOCKEVENTS
|
|
||||||
select GENERIC_FIND_FIRST_BIT
|
|
||||||
select GENERIC_IRQ_PROBE
|
|
||||||
select GENERIC_IRQ_SHOW
|
|
||||||
select GENERIC_PENDING_IRQ if SMP
|
|
||||||
select GENERIC_STRNCPY_FROM_USER
|
|
||||||
select GENERIC_STRNLEN_USER
|
|
||||||
select HAVE_ARCH_SECCOMP_FILTER
|
|
||||||
select HAVE_ARCH_TRACEHOOK
|
|
||||||
select HAVE_CONTEXT_TRACKING
|
|
||||||
select HAVE_DEBUG_BUGVERBOSE
|
|
||||||
select HAVE_DEBUG_KMEMLEAK
|
|
||||||
select HAVE_DEBUG_STACKOVERFLOW
|
|
||||||
select HAVE_DMA_API_DEBUG
|
|
||||||
select HAVE_EXIT_THREAD
|
|
||||||
select HAVE_KVM if !TILEGX
|
|
||||||
select HAVE_NMI if USE_PMC
|
|
||||||
select HAVE_PERF_EVENTS
|
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
|
||||||
select MODULES_USE_ELF_RELA
|
|
||||||
select SYSCTL_EXCEPTION_TRACE
|
|
||||||
select SYS_HYPERVISOR
|
|
||||||
select USER_STACKTRACE_SUPPORT
|
|
||||||
select USE_PMC if PERF_EVENTS
|
|
||||||
select VIRT_TO_BUS
|
|
||||||
|
|
||||||
config MMU
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config GENERIC_CSUM
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config HAVE_ARCH_ALLOC_REMAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config HAVE_SETUP_PER_CPU_AREA
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config SYS_SUPPORTS_HUGETLBFS
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# Support for additional huge page sizes besides HPAGE_SIZE.
|
|
||||||
# The software support is currently only present in the TILE-Gx
|
|
||||||
# hypervisor. TILEPro in any case does not support page sizes
|
|
||||||
# larger than the default HPAGE_SIZE.
|
|
||||||
config HUGETLB_SUPER_PAGES
|
|
||||||
depends on HUGETLB_PAGE && TILEGX
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config GENERIC_TIME_VSYSCALL
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# Enable PMC if PERF_EVENTS, OPROFILE, or WATCHPOINTS are enabled.
|
|
||||||
config USE_PMC
|
|
||||||
bool
|
|
||||||
|
|
||||||
# FIXME: tilegx can implement a more efficient rwsem.
|
|
||||||
config RWSEM_GENERIC_SPINLOCK
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# We only support gcc 4.4 and above, so this should work.
|
|
||||||
config ARCH_SUPPORTS_OPTIMIZED_INLINING
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_PHYS_ADDR_T_64BIT
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_DMA_ADDR_T_64BIT
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config NEED_DMA_MAP_STATE
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_HAS_DMA_SET_COHERENT_MASK
|
|
||||||
bool
|
|
||||||
|
|
||||||
config LOCKDEP_SUPPORT
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config STACKTRACE_SUPPORT
|
|
||||||
def_bool y
|
|
||||||
select STACKTRACE
|
|
||||||
|
|
||||||
# We use discontigmem for now; at some point we may want to switch
|
|
||||||
# to sparsemem (Tilera bug 7996).
|
|
||||||
config ARCH_DISCONTIGMEM_ENABLE
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_DISCONTIGMEM_DEFAULT
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config TRACE_IRQFLAGS_SUPPORT
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# SMP is required for Tilera Linux.
|
|
||||||
config SMP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config HVC_TILE
|
|
||||||
depends on TTY
|
|
||||||
select HVC_DRIVER
|
|
||||||
select HVC_IRQ if TILEGX
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# Building with ARCH=tilegx (or ARCH=tile) implies using the
|
|
||||||
# 64-bit TILE-Gx toolchain, so force CONFIG_TILEGX on.
|
|
||||||
config TILEGX
|
|
||||||
def_bool ARCH != "tilepro"
|
|
||||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
|
||||||
select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
|
|
||||||
select HAVE_ARCH_JUMP_LABEL
|
|
||||||
select HAVE_ARCH_KGDB
|
|
||||||
select HAVE_DYNAMIC_FTRACE
|
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
|
||||||
select HAVE_FUNCTION_TRACER
|
|
||||||
select HAVE_KPROBES
|
|
||||||
select HAVE_KRETPROBES
|
|
||||||
select SPARSE_IRQ
|
|
||||||
|
|
||||||
config TILEPRO
|
|
||||||
def_bool !TILEGX
|
|
||||||
|
|
||||||
config 64BIT
|
|
||||||
def_bool TILEGX
|
|
||||||
|
|
||||||
config ARCH_DEFCONFIG
|
|
||||||
string
|
|
||||||
default "arch/tile/configs/tilepro_defconfig" if !TILEGX
|
|
||||||
default "arch/tile/configs/tilegx_defconfig" if TILEGX
|
|
||||||
|
|
||||||
config PGTABLE_LEVELS
|
|
||||||
int
|
|
||||||
default 3 if 64BIT
|
|
||||||
default 2
|
|
||||||
|
|
||||||
source "init/Kconfig"
|
|
||||||
|
|
||||||
source "kernel/Kconfig.freezer"
|
|
||||||
|
|
||||||
menu "Tilera-specific configuration"
|
|
||||||
|
|
||||||
config NR_CPUS
|
|
||||||
int "Maximum number of tiles (2-255)"
|
|
||||||
range 2 255
|
|
||||||
depends on SMP
|
|
||||||
default "64"
|
|
||||||
---help---
|
|
||||||
Building with 64 is the recommended value, but a slightly
|
|
||||||
smaller kernel memory footprint results from using a smaller
|
|
||||||
value on chips with fewer tiles.
|
|
||||||
|
|
||||||
choice
|
|
||||||
prompt "Kernel page size"
|
|
||||||
default PAGE_SIZE_64KB
|
|
||||||
help
|
|
||||||
This lets you select the page size of the kernel. For best
|
|
||||||
performance on memory-intensive applications, a page size of 64KB
|
|
||||||
is recommended. For workloads involving many small files, many
|
|
||||||
connections, etc., it may be better to select 16KB, which uses
|
|
||||||
memory more efficiently at some cost in TLB performance.
|
|
||||||
|
|
||||||
Note that for TILEPro, you must also rebuild the hypervisor
|
|
||||||
with a matching page size.
|
|
||||||
|
|
||||||
config PAGE_SIZE_4KB
|
|
||||||
bool "4KB" if TILEPRO
|
|
||||||
|
|
||||||
config PAGE_SIZE_16KB
|
|
||||||
bool "16KB"
|
|
||||||
|
|
||||||
config PAGE_SIZE_64KB
|
|
||||||
bool "64KB"
|
|
||||||
|
|
||||||
endchoice
|
|
||||||
|
|
||||||
source "kernel/Kconfig.hz"
|
|
||||||
|
|
||||||
config KEXEC
|
|
||||||
bool "kexec system call"
|
|
||||||
select KEXEC_CORE
|
|
||||||
---help---
|
|
||||||
kexec is a system call that implements the ability to shutdown your
|
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
|
||||||
but it is independent of the system firmware. It is used
|
|
||||||
to implement the "mboot" Tilera booter.
|
|
||||||
|
|
||||||
The name comes from the similarity to the exec system call.
|
|
||||||
|
|
||||||
config COMPAT
|
|
||||||
bool "Support 32-bit TILE-Gx binaries in addition to 64-bit"
|
|
||||||
depends on TILEGX
|
|
||||||
select COMPAT_BINFMT_ELF
|
|
||||||
default y
|
|
||||||
---help---
|
|
||||||
If enabled, the kernel will support running TILE-Gx binaries
|
|
||||||
that were built with the -m32 option.
|
|
||||||
|
|
||||||
config SECCOMP
|
|
||||||
bool "Enable seccomp to safely compute untrusted bytecode"
|
|
||||||
depends on PROC_FS
|
|
||||||
help
|
|
||||||
This kernel feature is useful for number crunching applications
|
|
||||||
that may need to compute untrusted bytecode during their
|
|
||||||
execution. By using pipes or other transports made available to
|
|
||||||
the process as file descriptors supporting the read/write
|
|
||||||
syscalls, it's possible to isolate those applications in
|
|
||||||
their own address space using seccomp. Once seccomp is
|
|
||||||
enabled via prctl, it cannot be disabled and the task is only
|
|
||||||
allowed to execute a few safe syscalls defined by each seccomp
|
|
||||||
mode.
|
|
||||||
|
|
||||||
If unsure, say N.
|
|
||||||
|
|
||||||
config SYSVIPC_COMPAT
|
|
||||||
def_bool y
|
|
||||||
depends on COMPAT && SYSVIPC
|
|
||||||
|
|
||||||
# We do not currently support disabling HIGHMEM on tilepro.
|
|
||||||
config HIGHMEM
|
|
||||||
bool # "Support for more than 512 MB of RAM"
|
|
||||||
default !TILEGX
|
|
||||||
---help---
|
|
||||||
Linux can use the full amount of RAM in the system by
|
|
||||||
default. However, the address space of TILE processors is
|
|
||||||
only 4 Gigabytes large. That means that, if you have a large
|
|
||||||
amount of physical memory, not all of it can be "permanently
|
|
||||||
mapped" by the kernel. The physical memory that's not
|
|
||||||
permanently mapped is called "high memory".
|
|
||||||
|
|
||||||
If you are compiling a kernel which will never run on a
|
|
||||||
machine with more than 512 MB total physical RAM, answer
|
|
||||||
"false" here. This will result in the kernel mapping all of
|
|
||||||
physical memory into the top 1 GB of virtual memory space.
|
|
||||||
|
|
||||||
If unsure, say "true".
|
|
||||||
|
|
||||||
config ZONE_DMA32
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config IOMMU_HELPER
|
|
||||||
bool
|
|
||||||
|
|
||||||
config NEED_SG_DMA_LENGTH
|
|
||||||
bool
|
|
||||||
|
|
||||||
config SWIOTLB
|
|
||||||
bool
|
|
||||||
default TILEGX
|
|
||||||
select DMA_DIRECT_OPS
|
|
||||||
select IOMMU_HELPER
|
|
||||||
select NEED_SG_DMA_LENGTH
|
|
||||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
|
||||||
|
|
||||||
# We do not currently support disabling NUMA.
|
|
||||||
config NUMA
|
|
||||||
bool # "NUMA Memory Allocation and Scheduler Support"
|
|
||||||
depends on SMP && DISCONTIGMEM
|
|
||||||
default y
|
|
||||||
---help---
|
|
||||||
NUMA memory allocation is required for TILE processors
|
|
||||||
unless booting with memory striping enabled in the
|
|
||||||
hypervisor, or with only a single memory controller.
|
|
||||||
It is recommended that this option always be enabled.
|
|
||||||
|
|
||||||
config NODES_SHIFT
|
|
||||||
int "Log base 2 of the max number of memory controllers"
|
|
||||||
default 2
|
|
||||||
depends on NEED_MULTIPLE_NODES
|
|
||||||
---help---
|
|
||||||
By default, 2, i.e. 2^2 == 4 DDR2 controllers.
|
|
||||||
In a system with more controllers, this value should be raised.
|
|
||||||
|
|
||||||
choice
|
|
||||||
depends on !TILEGX
|
|
||||||
prompt "Memory split" if EXPERT
|
|
||||||
default VMSPLIT_3G
|
|
||||||
---help---
|
|
||||||
Select the desired split between kernel and user memory.
|
|
||||||
|
|
||||||
If the address range available to the kernel is less than the
|
|
||||||
physical memory installed, the remaining memory will be available
|
|
||||||
as "high memory". Accessing high memory is a little more costly
|
|
||||||
than low memory, as it needs to be mapped into the kernel first.
|
|
||||||
Note that increasing the kernel address space limits the range
|
|
||||||
available to user programs, making the address space there
|
|
||||||
tighter. Selecting anything other than the default 3G/1G split
|
|
||||||
will also likely make your kernel incompatible with binary-only
|
|
||||||
kernel modules.
|
|
||||||
|
|
||||||
If you are not absolutely sure what you are doing, leave this
|
|
||||||
option alone!
|
|
||||||
|
|
||||||
config VMSPLIT_3_75G
|
|
||||||
bool "3.75G/0.25G user/kernel split (no kernel networking)"
|
|
||||||
config VMSPLIT_3_5G
|
|
||||||
bool "3.5G/0.5G user/kernel split"
|
|
||||||
config VMSPLIT_3G
|
|
||||||
bool "3G/1G user/kernel split"
|
|
||||||
config VMSPLIT_2_75G
|
|
||||||
bool "2.75G/1.25G user/kernel split (for full 1G low memory)"
|
|
||||||
config VMSPLIT_2_5G
|
|
||||||
bool "2.5G/1.5G user/kernel split"
|
|
||||||
config VMSPLIT_2_25G
|
|
||||||
bool "2.25G/1.75G user/kernel split"
|
|
||||||
config VMSPLIT_2G
|
|
||||||
bool "2G/2G user/kernel split"
|
|
||||||
config VMSPLIT_1G
|
|
||||||
bool "1G/3G user/kernel split"
|
|
||||||
endchoice
|
|
||||||
|
|
||||||
config PAGE_OFFSET
|
|
||||||
hex
|
|
||||||
depends on !64BIT
|
|
||||||
default 0xF0000000 if VMSPLIT_3_75G
|
|
||||||
default 0xE0000000 if VMSPLIT_3_5G
|
|
||||||
default 0xB0000000 if VMSPLIT_2_75G
|
|
||||||
default 0xA0000000 if VMSPLIT_2_5G
|
|
||||||
default 0x90000000 if VMSPLIT_2_25G
|
|
||||||
default 0x80000000 if VMSPLIT_2G
|
|
||||||
default 0x40000000 if VMSPLIT_1G
|
|
||||||
default 0xC0000000
|
|
||||||
|
|
||||||
source "mm/Kconfig"
|
|
||||||
|
|
||||||
source "kernel/Kconfig.preempt"
|
|
||||||
|
|
||||||
config CMDLINE_BOOL
|
|
||||||
bool "Built-in kernel command line"
|
|
||||||
default n
|
|
||||||
---help---
|
|
||||||
Allow for specifying boot arguments to the kernel at
|
|
||||||
build time. On some systems (e.g. embedded ones), it is
|
|
||||||
necessary or convenient to provide some or all of the
|
|
||||||
kernel boot arguments with the kernel itself (that is,
|
|
||||||
to not rely on the boot loader to provide them.)
|
|
||||||
|
|
||||||
To compile command line arguments into the kernel,
|
|
||||||
set this option to 'Y', then fill in the
|
|
||||||
the boot arguments in CONFIG_CMDLINE.
|
|
||||||
|
|
||||||
Systems with fully functional boot loaders (e.g. mboot, or
|
|
||||||
if booting over PCI) should leave this option set to 'N'.
|
|
||||||
|
|
||||||
config CMDLINE
|
|
||||||
string "Built-in kernel command string"
|
|
||||||
depends on CMDLINE_BOOL
|
|
||||||
default ""
|
|
||||||
---help---
|
|
||||||
Enter arguments here that should be compiled into the kernel
|
|
||||||
image and used at boot time. If the boot loader provides a
|
|
||||||
command line at boot time, it is appended to this string to
|
|
||||||
form the full kernel command line, when the system boots.
|
|
||||||
|
|
||||||
However, you can use the CONFIG_CMDLINE_OVERRIDE option to
|
|
||||||
change this behavior.
|
|
||||||
|
|
||||||
In most cases, the command line (whether built-in or provided
|
|
||||||
by the boot loader) should specify the device for the root
|
|
||||||
file system.
|
|
||||||
|
|
||||||
config CMDLINE_OVERRIDE
|
|
||||||
bool "Built-in command line overrides boot loader arguments"
|
|
||||||
default n
|
|
||||||
depends on CMDLINE_BOOL
|
|
||||||
---help---
|
|
||||||
Set this option to 'Y' to have the kernel ignore the boot loader
|
|
||||||
command line, and use ONLY the built-in command line.
|
|
||||||
|
|
||||||
This is used to work around broken boot loaders. This should
|
|
||||||
be set to 'N' under normal conditions.
|
|
||||||
|
|
||||||
config VMALLOC_RESERVE
|
|
||||||
hex
|
|
||||||
default 0x2000000
|
|
||||||
|
|
||||||
config HARDWALL
|
|
||||||
bool "Hardwall support to allow access to user dynamic network"
|
|
||||||
default y
|
|
||||||
|
|
||||||
config KERNEL_PL
|
|
||||||
int "Processor protection level for kernel"
|
|
||||||
range 1 2
|
|
||||||
default 2 if TILEGX
|
|
||||||
default 1 if !TILEGX
|
|
||||||
---help---
|
|
||||||
Since MDE 4.2, the Tilera hypervisor runs the kernel
|
|
||||||
at PL2 by default. If running under an older hypervisor,
|
|
||||||
or as a KVM guest, you must run at PL1. (The current
|
|
||||||
hypervisor may also be recompiled with "make HV_PL=2" to
|
|
||||||
allow it to run a kernel at PL1, but clients running at PL1
|
|
||||||
are not expected to be supported indefinitely.)
|
|
||||||
|
|
||||||
If you're not sure, don't change the default.
|
|
||||||
|
|
||||||
source "arch/tile/gxio/Kconfig"
|
|
||||||
|
|
||||||
endmenu # Tilera-specific configuration
|
|
||||||
|
|
||||||
menu "Bus options"
|
|
||||||
|
|
||||||
config PCI
|
|
||||||
bool "PCI support"
|
|
||||||
default y
|
|
||||||
select PCI_DOMAINS
|
|
||||||
select GENERIC_PCI_IOMAP
|
|
||||||
select TILE_GXIO_TRIO if TILEGX
|
|
||||||
select PCI_MSI if TILEGX
|
|
||||||
---help---
|
|
||||||
Enable PCI root complex support, so PCIe endpoint devices can
|
|
||||||
be attached to the Tile chip. Many, but not all, PCI devices
|
|
||||||
are supported under Tilera's root complex driver.
|
|
||||||
|
|
||||||
config PCI_DOMAINS
|
|
||||||
bool
|
|
||||||
|
|
||||||
config NO_IOMEM
|
|
||||||
def_bool !PCI
|
|
||||||
|
|
||||||
config NO_IOPORT_MAP
|
|
||||||
def_bool !PCI
|
|
||||||
|
|
||||||
config TILE_PCI_IO
|
|
||||||
bool "PCI I/O space support"
|
|
||||||
default n
|
|
||||||
depends on PCI
|
|
||||||
depends on TILEGX
|
|
||||||
---help---
|
|
||||||
Enable PCI I/O space support on TILEGx. Since the PCI I/O space
|
|
||||||
is used by few modern PCIe endpoint devices, its support is disabled
|
|
||||||
by default to save the TRIO PIO Region resource for other purposes.
|
|
||||||
|
|
||||||
source "drivers/pci/Kconfig"
|
|
||||||
|
|
||||||
config TILE_USB
|
|
||||||
tristate "Tilera USB host adapter support"
|
|
||||||
default y
|
|
||||||
depends on USB
|
|
||||||
depends on TILEGX
|
|
||||||
select TILE_GXIO_USB_HOST
|
|
||||||
---help---
|
|
||||||
Provides USB host adapter support for the built-in EHCI and OHCI
|
|
||||||
interfaces on TILE-Gx chips.
|
|
||||||
|
|
||||||
endmenu
|
|
||||||
|
|
||||||
menu "Executable file formats"
|
|
||||||
|
|
||||||
source "fs/Kconfig.binfmt"
|
|
||||||
|
|
||||||
endmenu
|
|
||||||
|
|
||||||
source "net/Kconfig"
|
|
||||||
|
|
||||||
source "drivers/Kconfig"
|
|
||||||
|
|
||||||
source "fs/Kconfig"
|
|
||||||
|
|
||||||
source "arch/tile/Kconfig.debug"
|
|
||||||
|
|
||||||
source "security/Kconfig"
|
|
||||||
|
|
||||||
source "crypto/Kconfig"
|
|
||||||
|
|
||||||
source "lib/Kconfig"
|
|
||||||
|
|
||||||
source "arch/tile/kvm/Kconfig"
|
|
|
@ -1,26 +0,0 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
menu "Kernel hacking"
|
|
||||||
|
|
||||||
source "lib/Kconfig.debug"
|
|
||||||
|
|
||||||
config EARLY_PRINTK
|
|
||||||
bool "Early printk" if EXPERT && DEBUG_KERNEL
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
Write kernel log output directly via the hypervisor console.
|
|
||||||
|
|
||||||
This is useful for kernel debugging when your machine crashes very
|
|
||||||
early before the console code is initialized. For normal operation
|
|
||||||
it is not recommended because it looks ugly and doesn't cooperate
|
|
||||||
with klogd/syslogd. You should normally N here,
|
|
||||||
unless you want to debug such a crash.
|
|
||||||
|
|
||||||
config TILE_HVGLUE_TRACE
|
|
||||||
bool "Provide wrapper functions for hypervisor ABI calls"
|
|
||||||
default n
|
|
||||||
help
|
|
||||||
Provide wrapper functions for the hypervisor ABI calls
|
|
||||||
defined in arch/tile/kernel/hvglue.S. This allows tracing
|
|
||||||
mechanisms, etc., to have visibility into those calls.
|
|
||||||
|
|
||||||
endmenu
|
|
|
@ -1,77 +0,0 @@
|
||||||
#
|
|
||||||
# This file is subject to the terms and conditions of the GNU General Public
|
|
||||||
# License. See the file "COPYING" in the main directory of this archive
|
|
||||||
# for more details.
|
|
||||||
#
|
|
||||||
# This file is included by the global makefile so that you can add your own
|
|
||||||
# architecture-specific flags and dependencies. Remember to do have actions
|
|
||||||
# for "archclean" and "archdep" for cleaning up and making dependencies for
|
|
||||||
# this architecture
|
|
||||||
|
|
||||||
# If building with TILERA_ROOT set (i.e. using the Tilera Multicore
|
|
||||||
# Development Environment) we can set CROSS_COMPILE based on that.
|
|
||||||
# If we're not cross-compiling, make sure we're on the right architecture.
|
|
||||||
# Only bother to test for a few common targets, to avoid useless errors.
|
|
||||||
ifeq ($(CROSS_COMPILE),)
|
|
||||||
ifdef TILERA_ROOT
|
|
||||||
CROSS_COMPILE := $(TILERA_ROOT)/bin/tile-
|
|
||||||
else
|
|
||||||
goals := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), all)
|
|
||||||
ifneq ($(strip $(filter vmlinux modules all,$(goals))),)
|
|
||||||
HOST_ARCH := $(shell uname -m)
|
|
||||||
ifneq ($(HOST_ARCH),$(ARCH))
|
|
||||||
$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
# The tile compiler may emit .eh_frame information for backtracing.
|
|
||||||
# In kernel modules, this causes load failures due to unsupported relocations.
|
|
||||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|
||||||
|
|
||||||
LIBGCC_PATH := \
|
|
||||||
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
|
|
||||||
|
|
||||||
# Provide the path to use for "make defconfig".
|
|
||||||
# We default to the newer TILE-Gx architecture if only "tile" is given.
|
|
||||||
ifeq ($(ARCH),tile)
|
|
||||||
KBUILD_DEFCONFIG := tilegx_defconfig
|
|
||||||
else
|
|
||||||
KBUILD_DEFCONFIG := $(ARCH)_defconfig
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Used as a file extension when useful, e.g. head_$(BITS).o
|
|
||||||
# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
|
|
||||||
# uses the right default anyway.
|
|
||||||
export BITS
|
|
||||||
ifeq ($(CONFIG_TILEGX),y)
|
|
||||||
BITS := 64
|
|
||||||
else
|
|
||||||
BITS := 32
|
|
||||||
endif
|
|
||||||
|
|
||||||
CHECKFLAGS += -m$(BITS)
|
|
||||||
|
|
||||||
head-y := arch/tile/kernel/head_$(BITS).o
|
|
||||||
|
|
||||||
libs-y += arch/tile/lib/
|
|
||||||
libs-y += $(LIBGCC_PATH)
|
|
||||||
|
|
||||||
# See arch/tile/Kbuild for content of core part of the kernel
|
|
||||||
core-y += arch/tile/
|
|
||||||
|
|
||||||
core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/
|
|
||||||
|
|
||||||
ifdef TILERA_ROOT
|
|
||||||
INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
|
|
||||||
endif
|
|
||||||
|
|
||||||
install:
|
|
||||||
install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
|
|
||||||
install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
|
|
||||||
install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
|
|
||||||
|
|
||||||
define archhelp
|
|
||||||
echo ' install - install kernel into $(INSTALL_PATH)'
|
|
||||||
endef
|
|
|
@ -1,411 +0,0 @@
|
||||||
CONFIG_TILEGX=y
|
|
||||||
CONFIG_SYSVIPC=y
|
|
||||||
CONFIG_POSIX_MQUEUE=y
|
|
||||||
CONFIG_FHANDLE=y
|
|
||||||
CONFIG_AUDIT=y
|
|
||||||
CONFIG_NO_HZ=y
|
|
||||||
CONFIG_BSD_PROCESS_ACCT=y
|
|
||||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
|
||||||
CONFIG_TASKSTATS=y
|
|
||||||
CONFIG_TASK_DELAY_ACCT=y
|
|
||||||
CONFIG_TASK_XACCT=y
|
|
||||||
CONFIG_TASK_IO_ACCOUNTING=y
|
|
||||||
CONFIG_LOG_BUF_SHIFT=19
|
|
||||||
CONFIG_CGROUPS=y
|
|
||||||
CONFIG_CGROUP_DEBUG=y
|
|
||||||
CONFIG_CGROUP_DEVICE=y
|
|
||||||
CONFIG_CPUSETS=y
|
|
||||||
CONFIG_CGROUP_CPUACCT=y
|
|
||||||
CONFIG_CGROUP_SCHED=y
|
|
||||||
CONFIG_RT_GROUP_SCHED=y
|
|
||||||
CONFIG_BLK_CGROUP=y
|
|
||||||
CONFIG_NAMESPACES=y
|
|
||||||
CONFIG_RELAY=y
|
|
||||||
CONFIG_BLK_DEV_INITRD=y
|
|
||||||
CONFIG_RD_XZ=y
|
|
||||||
CONFIG_SYSCTL_SYSCALL=y
|
|
||||||
CONFIG_EMBEDDED=y
|
|
||||||
# CONFIG_COMPAT_BRK is not set
|
|
||||||
CONFIG_PROFILING=y
|
|
||||||
CONFIG_KPROBES=y
|
|
||||||
CONFIG_MODULES=y
|
|
||||||
CONFIG_MODULE_FORCE_LOAD=y
|
|
||||||
CONFIG_MODULE_UNLOAD=y
|
|
||||||
CONFIG_BLK_DEV_INTEGRITY=y
|
|
||||||
CONFIG_PARTITION_ADVANCED=y
|
|
||||||
CONFIG_OSF_PARTITION=y
|
|
||||||
CONFIG_AMIGA_PARTITION=y
|
|
||||||
CONFIG_MAC_PARTITION=y
|
|
||||||
CONFIG_BSD_DISKLABEL=y
|
|
||||||
CONFIG_MINIX_SUBPARTITION=y
|
|
||||||
CONFIG_SOLARIS_X86_PARTITION=y
|
|
||||||
CONFIG_UNIXWARE_DISKLABEL=y
|
|
||||||
CONFIG_SGI_PARTITION=y
|
|
||||||
CONFIG_SUN_PARTITION=y
|
|
||||||
CONFIG_KARMA_PARTITION=y
|
|
||||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
|
||||||
CONFIG_NR_CPUS=100
|
|
||||||
CONFIG_HZ_100=y
|
|
||||||
# CONFIG_COMPACTION is not set
|
|
||||||
CONFIG_PREEMPT_VOLUNTARY=y
|
|
||||||
CONFIG_TILE_PCI_IO=y
|
|
||||||
CONFIG_PCI_DEBUG=y
|
|
||||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
|
||||||
CONFIG_BINFMT_MISC=y
|
|
||||||
CONFIG_NET=y
|
|
||||||
CONFIG_PACKET=y
|
|
||||||
CONFIG_UNIX=y
|
|
||||||
CONFIG_XFRM_USER=y
|
|
||||||
CONFIG_XFRM_SUB_POLICY=y
|
|
||||||
CONFIG_XFRM_STATISTICS=y
|
|
||||||
CONFIG_NET_KEY=m
|
|
||||||
CONFIG_NET_KEY_MIGRATE=y
|
|
||||||
CONFIG_INET=y
|
|
||||||
CONFIG_IP_MULTICAST=y
|
|
||||||
CONFIG_IP_ADVANCED_ROUTER=y
|
|
||||||
CONFIG_IP_MULTIPLE_TABLES=y
|
|
||||||
CONFIG_IP_ROUTE_MULTIPATH=y
|
|
||||||
CONFIG_IP_ROUTE_VERBOSE=y
|
|
||||||
CONFIG_NET_IPIP=m
|
|
||||||
CONFIG_IP_MROUTE=y
|
|
||||||
CONFIG_IP_PIMSM_V1=y
|
|
||||||
CONFIG_IP_PIMSM_V2=y
|
|
||||||
CONFIG_SYN_COOKIES=y
|
|
||||||
CONFIG_INET_AH=m
|
|
||||||
CONFIG_INET_ESP=m
|
|
||||||
CONFIG_INET_IPCOMP=m
|
|
||||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
|
||||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
|
||||||
CONFIG_INET_XFRM_MODE_BEET=m
|
|
||||||
CONFIG_INET_DIAG=m
|
|
||||||
CONFIG_TCP_CONG_ADVANCED=y
|
|
||||||
CONFIG_TCP_CONG_HSTCP=m
|
|
||||||
CONFIG_TCP_CONG_HYBLA=m
|
|
||||||
CONFIG_TCP_CONG_SCALABLE=m
|
|
||||||
CONFIG_TCP_CONG_LP=m
|
|
||||||
CONFIG_TCP_CONG_VENO=m
|
|
||||||
CONFIG_TCP_CONG_YEAH=m
|
|
||||||
CONFIG_TCP_CONG_ILLINOIS=m
|
|
||||||
CONFIG_TCP_MD5SIG=y
|
|
||||||
CONFIG_IPV6=y
|
|
||||||
CONFIG_IPV6_ROUTER_PREF=y
|
|
||||||
CONFIG_IPV6_ROUTE_INFO=y
|
|
||||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
|
||||||
CONFIG_INET6_AH=m
|
|
||||||
CONFIG_INET6_ESP=m
|
|
||||||
CONFIG_INET6_IPCOMP=m
|
|
||||||
CONFIG_IPV6_MIP6=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_TUNNEL=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_BEET=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
|
|
||||||
CONFIG_IPV6_SIT=m
|
|
||||||
CONFIG_IPV6_TUNNEL=m
|
|
||||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
|
||||||
CONFIG_IPV6_MROUTE=y
|
|
||||||
CONFIG_IPV6_PIMSM_V2=y
|
|
||||||
CONFIG_NETLABEL=y
|
|
||||||
CONFIG_RDS=m
|
|
||||||
CONFIG_RDS_TCP=m
|
|
||||||
CONFIG_BRIDGE=m
|
|
||||||
CONFIG_VLAN_8021Q=m
|
|
||||||
CONFIG_VLAN_8021Q_GVRP=y
|
|
||||||
CONFIG_PHONET=m
|
|
||||||
CONFIG_NET_SCHED=y
|
|
||||||
CONFIG_NET_SCH_CBQ=m
|
|
||||||
CONFIG_NET_SCH_HTB=m
|
|
||||||
CONFIG_NET_SCH_HFSC=m
|
|
||||||
CONFIG_NET_SCH_PRIO=m
|
|
||||||
CONFIG_NET_SCH_MULTIQ=m
|
|
||||||
CONFIG_NET_SCH_RED=m
|
|
||||||
CONFIG_NET_SCH_SFQ=m
|
|
||||||
CONFIG_NET_SCH_TEQL=m
|
|
||||||
CONFIG_NET_SCH_TBF=m
|
|
||||||
CONFIG_NET_SCH_GRED=m
|
|
||||||
CONFIG_NET_SCH_DSMARK=m
|
|
||||||
CONFIG_NET_SCH_NETEM=m
|
|
||||||
CONFIG_NET_SCH_DRR=m
|
|
||||||
CONFIG_NET_SCH_INGRESS=m
|
|
||||||
CONFIG_NET_CLS_BASIC=m
|
|
||||||
CONFIG_NET_CLS_TCINDEX=m
|
|
||||||
CONFIG_NET_CLS_ROUTE4=m
|
|
||||||
CONFIG_NET_CLS_FW=m
|
|
||||||
CONFIG_NET_CLS_U32=m
|
|
||||||
CONFIG_CLS_U32_PERF=y
|
|
||||||
CONFIG_CLS_U32_MARK=y
|
|
||||||
CONFIG_NET_CLS_RSVP=m
|
|
||||||
CONFIG_NET_CLS_RSVP6=m
|
|
||||||
CONFIG_NET_CLS_FLOW=m
|
|
||||||
CONFIG_NET_CLS_CGROUP=y
|
|
||||||
CONFIG_NET_EMATCH=y
|
|
||||||
CONFIG_NET_EMATCH_CMP=m
|
|
||||||
CONFIG_NET_EMATCH_NBYTE=m
|
|
||||||
CONFIG_NET_EMATCH_U32=m
|
|
||||||
CONFIG_NET_EMATCH_META=m
|
|
||||||
CONFIG_NET_EMATCH_TEXT=m
|
|
||||||
CONFIG_NET_CLS_ACT=y
|
|
||||||
CONFIG_NET_ACT_POLICE=m
|
|
||||||
CONFIG_NET_ACT_GACT=m
|
|
||||||
CONFIG_GACT_PROB=y
|
|
||||||
CONFIG_NET_ACT_MIRRED=m
|
|
||||||
CONFIG_NET_ACT_NAT=m
|
|
||||||
CONFIG_NET_ACT_PEDIT=m
|
|
||||||
CONFIG_NET_ACT_SIMP=m
|
|
||||||
CONFIG_NET_ACT_SKBEDIT=m
|
|
||||||
CONFIG_NET_CLS_IND=y
|
|
||||||
CONFIG_DCB=y
|
|
||||||
CONFIG_DNS_RESOLVER=y
|
|
||||||
# CONFIG_WIRELESS is not set
|
|
||||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
|
||||||
CONFIG_DEVTMPFS=y
|
|
||||||
CONFIG_DEVTMPFS_MOUNT=y
|
|
||||||
CONFIG_CONNECTOR=y
|
|
||||||
CONFIG_BLK_DEV_LOOP=y
|
|
||||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
|
||||||
CONFIG_BLK_DEV_SX8=m
|
|
||||||
CONFIG_BLK_DEV_RAM=y
|
|
||||||
CONFIG_BLK_DEV_RAM_SIZE=16384
|
|
||||||
CONFIG_ATA_OVER_ETH=m
|
|
||||||
CONFIG_RAID_ATTRS=m
|
|
||||||
CONFIG_BLK_DEV_SD=y
|
|
||||||
CONFIG_SCSI_CONSTANTS=y
|
|
||||||
CONFIG_SCSI_LOGGING=y
|
|
||||||
CONFIG_SCSI_SAS_ATA=y
|
|
||||||
CONFIG_ISCSI_TCP=m
|
|
||||||
CONFIG_SCSI_MVSAS=y
|
|
||||||
# CONFIG_SCSI_MVSAS_DEBUG is not set
|
|
||||||
CONFIG_SCSI_MVSAS_TASKLET=y
|
|
||||||
CONFIG_ATA=y
|
|
||||||
CONFIG_SATA_AHCI=y
|
|
||||||
CONFIG_SATA_SIL24=y
|
|
||||||
# CONFIG_ATA_SFF is not set
|
|
||||||
CONFIG_MD=y
|
|
||||||
CONFIG_BLK_DEV_MD=y
|
|
||||||
CONFIG_MD_LINEAR=m
|
|
||||||
CONFIG_MD_RAID0=m
|
|
||||||
CONFIG_MD_RAID1=m
|
|
||||||
CONFIG_MD_RAID10=m
|
|
||||||
CONFIG_MD_RAID456=m
|
|
||||||
CONFIG_MD_FAULTY=m
|
|
||||||
CONFIG_BLK_DEV_DM=m
|
|
||||||
CONFIG_DM_DEBUG=y
|
|
||||||
CONFIG_DM_CRYPT=m
|
|
||||||
CONFIG_DM_SNAPSHOT=m
|
|
||||||
CONFIG_DM_MIRROR=m
|
|
||||||
CONFIG_DM_LOG_USERSPACE=m
|
|
||||||
CONFIG_DM_ZERO=m
|
|
||||||
CONFIG_DM_MULTIPATH=m
|
|
||||||
CONFIG_DM_MULTIPATH_QL=m
|
|
||||||
CONFIG_DM_MULTIPATH_ST=m
|
|
||||||
CONFIG_DM_DELAY=m
|
|
||||||
CONFIG_DM_UEVENT=y
|
|
||||||
CONFIG_TARGET_CORE=m
|
|
||||||
CONFIG_TCM_IBLOCK=m
|
|
||||||
CONFIG_TCM_FILEIO=m
|
|
||||||
CONFIG_TCM_PSCSI=m
|
|
||||||
CONFIG_LOOPBACK_TARGET=m
|
|
||||||
CONFIG_ISCSI_TARGET=m
|
|
||||||
CONFIG_FUSION=y
|
|
||||||
CONFIG_FUSION_SAS=y
|
|
||||||
CONFIG_NETDEVICES=y
|
|
||||||
CONFIG_BONDING=m
|
|
||||||
CONFIG_DUMMY=m
|
|
||||||
CONFIG_IFB=m
|
|
||||||
CONFIG_MACVLAN=m
|
|
||||||
CONFIG_MACVTAP=m
|
|
||||||
CONFIG_NETCONSOLE=m
|
|
||||||
CONFIG_NETCONSOLE_DYNAMIC=y
|
|
||||||
CONFIG_TUN=y
|
|
||||||
CONFIG_VETH=m
|
|
||||||
CONFIG_NET_DSA_MV88E6060=y
|
|
||||||
CONFIG_NET_DSA_MV88E6XXX=y
|
|
||||||
CONFIG_SKY2=y
|
|
||||||
CONFIG_PTP_1588_CLOCK_TILEGX=y
|
|
||||||
# CONFIG_WLAN is not set
|
|
||||||
# CONFIG_INPUT_MOUSEDEV is not set
|
|
||||||
# CONFIG_INPUT_KEYBOARD is not set
|
|
||||||
# CONFIG_INPUT_MOUSE is not set
|
|
||||||
# CONFIG_SERIO is not set
|
|
||||||
# CONFIG_VT is not set
|
|
||||||
# CONFIG_LEGACY_PTYS is not set
|
|
||||||
CONFIG_SERIAL_TILEGX=y
|
|
||||||
CONFIG_HW_RANDOM=y
|
|
||||||
CONFIG_HW_RANDOM_TIMERIOMEM=m
|
|
||||||
CONFIG_I2C=y
|
|
||||||
CONFIG_I2C_CHARDEV=y
|
|
||||||
# CONFIG_HWMON is not set
|
|
||||||
CONFIG_WATCHDOG=y
|
|
||||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
|
||||||
# CONFIG_VGA_ARB is not set
|
|
||||||
CONFIG_DRM=m
|
|
||||||
CONFIG_DRM_TDFX=m
|
|
||||||
CONFIG_DRM_R128=m
|
|
||||||
CONFIG_DRM_MGA=m
|
|
||||||
CONFIG_DRM_VIA=m
|
|
||||||
CONFIG_DRM_SAVAGE=m
|
|
||||||
CONFIG_USB=y
|
|
||||||
CONFIG_USB_EHCI_HCD=y
|
|
||||||
CONFIG_USB_OHCI_HCD=y
|
|
||||||
CONFIG_USB_STORAGE=y
|
|
||||||
CONFIG_EDAC=y
|
|
||||||
CONFIG_RTC_CLASS=y
|
|
||||||
CONFIG_RTC_DRV_TILE=y
|
|
||||||
CONFIG_EXT2_FS=y
|
|
||||||
CONFIG_EXT2_FS_XATTR=y
|
|
||||||
CONFIG_EXT2_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT2_FS_SECURITY=y
|
|
||||||
CONFIG_EXT2_FS_XIP=y
|
|
||||||
CONFIG_EXT3_FS=y
|
|
||||||
CONFIG_EXT3_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT3_FS_SECURITY=y
|
|
||||||
CONFIG_EXT4_FS=y
|
|
||||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT4_FS_SECURITY=y
|
|
||||||
CONFIG_XFS_FS=y
|
|
||||||
CONFIG_XFS_QUOTA=y
|
|
||||||
CONFIG_XFS_POSIX_ACL=y
|
|
||||||
CONFIG_GFS2_FS=m
|
|
||||||
CONFIG_GFS2_FS_LOCKING_DLM=y
|
|
||||||
CONFIG_BTRFS_FS=m
|
|
||||||
CONFIG_BTRFS_FS_POSIX_ACL=y
|
|
||||||
CONFIG_QUOTA=y
|
|
||||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
|
||||||
# CONFIG_PRINT_QUOTA_WARNING is not set
|
|
||||||
CONFIG_QFMT_V2=y
|
|
||||||
CONFIG_AUTOFS4_FS=m
|
|
||||||
CONFIG_FUSE_FS=y
|
|
||||||
CONFIG_CUSE=m
|
|
||||||
CONFIG_FSCACHE=m
|
|
||||||
CONFIG_FSCACHE_STATS=y
|
|
||||||
CONFIG_CACHEFILES=m
|
|
||||||
CONFIG_ISO9660_FS=m
|
|
||||||
CONFIG_JOLIET=y
|
|
||||||
CONFIG_ZISOFS=y
|
|
||||||
CONFIG_UDF_FS=m
|
|
||||||
CONFIG_MSDOS_FS=m
|
|
||||||
CONFIG_VFAT_FS=m
|
|
||||||
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
|
|
||||||
CONFIG_PROC_KCORE=y
|
|
||||||
CONFIG_TMPFS=y
|
|
||||||
CONFIG_TMPFS_POSIX_ACL=y
|
|
||||||
CONFIG_HUGETLBFS=y
|
|
||||||
CONFIG_ECRYPT_FS=m
|
|
||||||
CONFIG_CRAMFS=m
|
|
||||||
CONFIG_SQUASHFS=m
|
|
||||||
CONFIG_NFS_FS=m
|
|
||||||
CONFIG_NFS_V3_ACL=y
|
|
||||||
CONFIG_NFS_V4=m
|
|
||||||
CONFIG_NFS_V4_1=y
|
|
||||||
CONFIG_NFS_FSCACHE=y
|
|
||||||
CONFIG_NFSD=m
|
|
||||||
CONFIG_NFSD_V3_ACL=y
|
|
||||||
CONFIG_NFSD_V4=y
|
|
||||||
CONFIG_CIFS=m
|
|
||||||
CONFIG_CIFS_STATS=y
|
|
||||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
|
||||||
CONFIG_CIFS_UPCALL=y
|
|
||||||
CONFIG_CIFS_XATTR=y
|
|
||||||
CONFIG_CIFS_POSIX=y
|
|
||||||
CONFIG_CIFS_DFS_UPCALL=y
|
|
||||||
CONFIG_CIFS_FSCACHE=y
|
|
||||||
CONFIG_NLS_DEFAULT="utf8"
|
|
||||||
CONFIG_NLS_CODEPAGE_437=y
|
|
||||||
CONFIG_NLS_CODEPAGE_737=m
|
|
||||||
CONFIG_NLS_CODEPAGE_775=m
|
|
||||||
CONFIG_NLS_CODEPAGE_850=m
|
|
||||||
CONFIG_NLS_CODEPAGE_852=m
|
|
||||||
CONFIG_NLS_CODEPAGE_855=m
|
|
||||||
CONFIG_NLS_CODEPAGE_857=m
|
|
||||||
CONFIG_NLS_CODEPAGE_860=m
|
|
||||||
CONFIG_NLS_CODEPAGE_861=m
|
|
||||||
CONFIG_NLS_CODEPAGE_862=m
|
|
||||||
CONFIG_NLS_CODEPAGE_863=m
|
|
||||||
CONFIG_NLS_CODEPAGE_864=m
|
|
||||||
CONFIG_NLS_CODEPAGE_865=m
|
|
||||||
CONFIG_NLS_CODEPAGE_866=m
|
|
||||||
CONFIG_NLS_CODEPAGE_869=m
|
|
||||||
CONFIG_NLS_CODEPAGE_936=m
|
|
||||||
CONFIG_NLS_CODEPAGE_950=m
|
|
||||||
CONFIG_NLS_CODEPAGE_932=m
|
|
||||||
CONFIG_NLS_CODEPAGE_949=m
|
|
||||||
CONFIG_NLS_CODEPAGE_874=m
|
|
||||||
CONFIG_NLS_ISO8859_8=m
|
|
||||||
CONFIG_NLS_CODEPAGE_1250=m
|
|
||||||
CONFIG_NLS_CODEPAGE_1251=m
|
|
||||||
CONFIG_NLS_ASCII=y
|
|
||||||
CONFIG_NLS_ISO8859_1=m
|
|
||||||
CONFIG_NLS_ISO8859_2=m
|
|
||||||
CONFIG_NLS_ISO8859_3=m
|
|
||||||
CONFIG_NLS_ISO8859_4=m
|
|
||||||
CONFIG_NLS_ISO8859_5=m
|
|
||||||
CONFIG_NLS_ISO8859_6=m
|
|
||||||
CONFIG_NLS_ISO8859_7=m
|
|
||||||
CONFIG_NLS_ISO8859_9=m
|
|
||||||
CONFIG_NLS_ISO8859_13=m
|
|
||||||
CONFIG_NLS_ISO8859_14=m
|
|
||||||
CONFIG_NLS_ISO8859_15=m
|
|
||||||
CONFIG_NLS_KOI8_R=m
|
|
||||||
CONFIG_NLS_KOI8_U=m
|
|
||||||
CONFIG_NLS_UTF8=m
|
|
||||||
CONFIG_DLM=m
|
|
||||||
CONFIG_DLM_DEBUG=y
|
|
||||||
CONFIG_DYNAMIC_DEBUG=y
|
|
||||||
CONFIG_DEBUG_INFO=y
|
|
||||||
CONFIG_DEBUG_INFO_REDUCED=y
|
|
||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
|
||||||
CONFIG_DEBUG_FS=y
|
|
||||||
CONFIG_HEADERS_CHECK=y
|
|
||||||
# CONFIG_FRAME_POINTER is not set
|
|
||||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
|
||||||
CONFIG_DEBUG_VM=y
|
|
||||||
CONFIG_DEBUG_MEMORY_INIT=y
|
|
||||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
|
||||||
CONFIG_SCHEDSTATS=y
|
|
||||||
CONFIG_TIMER_STATS=y
|
|
||||||
CONFIG_DEBUG_LIST=y
|
|
||||||
CONFIG_DEBUG_CREDENTIALS=y
|
|
||||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
|
||||||
CONFIG_ASYNC_RAID6_TEST=m
|
|
||||||
CONFIG_KGDB=y
|
|
||||||
CONFIG_SECURITY=y
|
|
||||||
CONFIG_SECURITYFS=y
|
|
||||||
CONFIG_SECURITY_NETWORK=y
|
|
||||||
CONFIG_SECURITY_NETWORK_XFRM=y
|
|
||||||
CONFIG_SECURITY_SELINUX=y
|
|
||||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
|
||||||
CONFIG_SECURITY_SELINUX_DISABLE=y
|
|
||||||
CONFIG_CRYPTO_PCRYPT=m
|
|
||||||
CONFIG_CRYPTO_CRYPTD=m
|
|
||||||
CONFIG_CRYPTO_TEST=m
|
|
||||||
CONFIG_CRYPTO_CCM=m
|
|
||||||
CONFIG_CRYPTO_GCM=m
|
|
||||||
CONFIG_CRYPTO_CTS=m
|
|
||||||
CONFIG_CRYPTO_LRW=m
|
|
||||||
CONFIG_CRYPTO_PCBC=m
|
|
||||||
CONFIG_CRYPTO_XTS=m
|
|
||||||
CONFIG_CRYPTO_HMAC=y
|
|
||||||
CONFIG_CRYPTO_XCBC=m
|
|
||||||
CONFIG_CRYPTO_VMAC=m
|
|
||||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
|
||||||
CONFIG_CRYPTO_RMD128=m
|
|
||||||
CONFIG_CRYPTO_RMD160=m
|
|
||||||
CONFIG_CRYPTO_RMD256=m
|
|
||||||
CONFIG_CRYPTO_RMD320=m
|
|
||||||
CONFIG_CRYPTO_SHA1=y
|
|
||||||
CONFIG_CRYPTO_SHA512=m
|
|
||||||
CONFIG_CRYPTO_TGR192=m
|
|
||||||
CONFIG_CRYPTO_WP512=m
|
|
||||||
CONFIG_CRYPTO_ANUBIS=m
|
|
||||||
CONFIG_CRYPTO_BLOWFISH=m
|
|
||||||
CONFIG_CRYPTO_CAMELLIA=m
|
|
||||||
CONFIG_CRYPTO_CAST5=m
|
|
||||||
CONFIG_CRYPTO_CAST6=m
|
|
||||||
CONFIG_CRYPTO_FCRYPT=m
|
|
||||||
CONFIG_CRYPTO_KHAZAD=m
|
|
||||||
CONFIG_CRYPTO_SEED=m
|
|
||||||
CONFIG_CRYPTO_SERPENT=m
|
|
||||||
CONFIG_CRYPTO_TEA=m
|
|
||||||
CONFIG_CRYPTO_TWOFISH=m
|
|
||||||
CONFIG_CRYPTO_LZO=m
|
|
|
@ -1,524 +0,0 @@
|
||||||
CONFIG_SYSVIPC=y
|
|
||||||
CONFIG_POSIX_MQUEUE=y
|
|
||||||
CONFIG_AUDIT=y
|
|
||||||
CONFIG_NO_HZ=y
|
|
||||||
CONFIG_HIGH_RES_TIMERS=y
|
|
||||||
CONFIG_BSD_PROCESS_ACCT=y
|
|
||||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
|
||||||
CONFIG_TASKSTATS=y
|
|
||||||
CONFIG_TASK_DELAY_ACCT=y
|
|
||||||
CONFIG_TASK_XACCT=y
|
|
||||||
CONFIG_TASK_IO_ACCOUNTING=y
|
|
||||||
CONFIG_LOG_BUF_SHIFT=19
|
|
||||||
CONFIG_CGROUPS=y
|
|
||||||
CONFIG_CGROUP_DEBUG=y
|
|
||||||
CONFIG_CGROUP_DEVICE=y
|
|
||||||
CONFIG_CPUSETS=y
|
|
||||||
CONFIG_CGROUP_CPUACCT=y
|
|
||||||
CONFIG_CGROUP_SCHED=y
|
|
||||||
CONFIG_RT_GROUP_SCHED=y
|
|
||||||
CONFIG_BLK_CGROUP=y
|
|
||||||
CONFIG_NAMESPACES=y
|
|
||||||
CONFIG_RELAY=y
|
|
||||||
CONFIG_BLK_DEV_INITRD=y
|
|
||||||
CONFIG_RD_XZ=y
|
|
||||||
CONFIG_SYSCTL_SYSCALL=y
|
|
||||||
CONFIG_EMBEDDED=y
|
|
||||||
# CONFIG_COMPAT_BRK is not set
|
|
||||||
CONFIG_PROFILING=y
|
|
||||||
CONFIG_MODULES=y
|
|
||||||
CONFIG_MODULE_FORCE_LOAD=y
|
|
||||||
CONFIG_MODULE_UNLOAD=y
|
|
||||||
CONFIG_BLK_DEV_INTEGRITY=y
|
|
||||||
CONFIG_PARTITION_ADVANCED=y
|
|
||||||
CONFIG_OSF_PARTITION=y
|
|
||||||
CONFIG_AMIGA_PARTITION=y
|
|
||||||
CONFIG_MAC_PARTITION=y
|
|
||||||
CONFIG_BSD_DISKLABEL=y
|
|
||||||
CONFIG_MINIX_SUBPARTITION=y
|
|
||||||
CONFIG_SOLARIS_X86_PARTITION=y
|
|
||||||
CONFIG_UNIXWARE_DISKLABEL=y
|
|
||||||
CONFIG_SGI_PARTITION=y
|
|
||||||
CONFIG_SUN_PARTITION=y
|
|
||||||
CONFIG_KARMA_PARTITION=y
|
|
||||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
|
||||||
CONFIG_HZ_100=y
|
|
||||||
# CONFIG_COMPACTION is not set
|
|
||||||
CONFIG_PREEMPT_VOLUNTARY=y
|
|
||||||
CONFIG_PCI_DEBUG=y
|
|
||||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
|
||||||
CONFIG_BINFMT_MISC=y
|
|
||||||
CONFIG_NET=y
|
|
||||||
CONFIG_PACKET=y
|
|
||||||
CONFIG_UNIX=y
|
|
||||||
CONFIG_XFRM_USER=y
|
|
||||||
CONFIG_XFRM_SUB_POLICY=y
|
|
||||||
CONFIG_XFRM_STATISTICS=y
|
|
||||||
CONFIG_NET_KEY=m
|
|
||||||
CONFIG_NET_KEY_MIGRATE=y
|
|
||||||
CONFIG_INET=y
|
|
||||||
CONFIG_IP_MULTICAST=y
|
|
||||||
CONFIG_IP_ADVANCED_ROUTER=y
|
|
||||||
CONFIG_IP_MULTIPLE_TABLES=y
|
|
||||||
CONFIG_IP_ROUTE_MULTIPATH=y
|
|
||||||
CONFIG_IP_ROUTE_VERBOSE=y
|
|
||||||
CONFIG_NET_IPIP=m
|
|
||||||
CONFIG_IP_MROUTE=y
|
|
||||||
CONFIG_IP_PIMSM_V1=y
|
|
||||||
CONFIG_IP_PIMSM_V2=y
|
|
||||||
CONFIG_SYN_COOKIES=y
|
|
||||||
CONFIG_INET_AH=m
|
|
||||||
CONFIG_INET_ESP=m
|
|
||||||
CONFIG_INET_IPCOMP=m
|
|
||||||
CONFIG_INET_XFRM_MODE_TRANSPORT=m
|
|
||||||
CONFIG_INET_XFRM_MODE_TUNNEL=m
|
|
||||||
CONFIG_INET_XFRM_MODE_BEET=m
|
|
||||||
CONFIG_INET_DIAG=m
|
|
||||||
CONFIG_TCP_CONG_ADVANCED=y
|
|
||||||
CONFIG_TCP_CONG_HSTCP=m
|
|
||||||
CONFIG_TCP_CONG_HYBLA=m
|
|
||||||
CONFIG_TCP_CONG_SCALABLE=m
|
|
||||||
CONFIG_TCP_CONG_LP=m
|
|
||||||
CONFIG_TCP_CONG_VENO=m
|
|
||||||
CONFIG_TCP_CONG_YEAH=m
|
|
||||||
CONFIG_TCP_CONG_ILLINOIS=m
|
|
||||||
CONFIG_TCP_MD5SIG=y
|
|
||||||
CONFIG_IPV6=y
|
|
||||||
CONFIG_IPV6_ROUTER_PREF=y
|
|
||||||
CONFIG_IPV6_ROUTE_INFO=y
|
|
||||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
|
||||||
CONFIG_INET6_AH=m
|
|
||||||
CONFIG_INET6_ESP=m
|
|
||||||
CONFIG_INET6_IPCOMP=m
|
|
||||||
CONFIG_IPV6_MIP6=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_TUNNEL=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_BEET=m
|
|
||||||
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
|
|
||||||
CONFIG_IPV6_SIT=m
|
|
||||||
CONFIG_IPV6_TUNNEL=m
|
|
||||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
|
||||||
CONFIG_IPV6_MROUTE=y
|
|
||||||
CONFIG_IPV6_PIMSM_V2=y
|
|
||||||
CONFIG_NETLABEL=y
|
|
||||||
CONFIG_NETFILTER=y
|
|
||||||
CONFIG_NF_CONNTRACK=m
|
|
||||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
|
||||||
CONFIG_NF_CONNTRACK_ZONES=y
|
|
||||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
|
||||||
CONFIG_NF_CT_PROTO_DCCP=m
|
|
||||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
|
||||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
|
||||||
CONFIG_NF_CONNTRACK_FTP=m
|
|
||||||
CONFIG_NF_CONNTRACK_H323=m
|
|
||||||
CONFIG_NF_CONNTRACK_IRC=m
|
|
||||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
|
|
||||||
CONFIG_NF_CONNTRACK_PPTP=m
|
|
||||||
CONFIG_NF_CONNTRACK_SANE=m
|
|
||||||
CONFIG_NF_CONNTRACK_SIP=m
|
|
||||||
CONFIG_NF_CONNTRACK_TFTP=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_DSCP=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_MARK=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_TEE=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_TRACE=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
|
|
||||||
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_DCCP=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_DSCP=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_ESP=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_HELPER=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_IPVS=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_MAC=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_MARK=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_OSF=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_OWNER=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_POLICY=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_REALM=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_RECENT=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_STATE=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_STRING=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_TIME=m
|
|
||||||
CONFIG_NETFILTER_XT_MATCH_U32=m
|
|
||||||
CONFIG_IP_VS=m
|
|
||||||
CONFIG_IP_VS_IPV6=y
|
|
||||||
CONFIG_IP_VS_PROTO_TCP=y
|
|
||||||
CONFIG_IP_VS_PROTO_UDP=y
|
|
||||||
CONFIG_IP_VS_PROTO_ESP=y
|
|
||||||
CONFIG_IP_VS_PROTO_AH=y
|
|
||||||
CONFIG_IP_VS_PROTO_SCTP=y
|
|
||||||
CONFIG_IP_VS_RR=m
|
|
||||||
CONFIG_IP_VS_WRR=m
|
|
||||||
CONFIG_IP_VS_LC=m
|
|
||||||
CONFIG_IP_VS_WLC=m
|
|
||||||
CONFIG_IP_VS_LBLC=m
|
|
||||||
CONFIG_IP_VS_LBLCR=m
|
|
||||||
CONFIG_IP_VS_SED=m
|
|
||||||
CONFIG_IP_VS_NQ=m
|
|
||||||
CONFIG_NF_CONNTRACK_IPV4=m
|
|
||||||
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
|
|
||||||
CONFIG_IP_NF_IPTABLES=y
|
|
||||||
CONFIG_IP_NF_MATCH_AH=m
|
|
||||||
CONFIG_IP_NF_MATCH_ECN=m
|
|
||||||
CONFIG_IP_NF_MATCH_TTL=m
|
|
||||||
CONFIG_IP_NF_FILTER=y
|
|
||||||
CONFIG_IP_NF_TARGET_REJECT=y
|
|
||||||
CONFIG_IP_NF_MANGLE=m
|
|
||||||
CONFIG_IP_NF_TARGET_ECN=m
|
|
||||||
CONFIG_IP_NF_TARGET_TTL=m
|
|
||||||
CONFIG_IP_NF_RAW=m
|
|
||||||
CONFIG_IP_NF_SECURITY=m
|
|
||||||
CONFIG_IP_NF_ARPTABLES=m
|
|
||||||
CONFIG_IP_NF_ARPFILTER=m
|
|
||||||
CONFIG_IP_NF_ARP_MANGLE=m
|
|
||||||
CONFIG_NF_CONNTRACK_IPV6=m
|
|
||||||
CONFIG_IP6_NF_MATCH_AH=m
|
|
||||||
CONFIG_IP6_NF_MATCH_EUI64=m
|
|
||||||
CONFIG_IP6_NF_MATCH_FRAG=m
|
|
||||||
CONFIG_IP6_NF_MATCH_OPTS=m
|
|
||||||
CONFIG_IP6_NF_MATCH_HL=m
|
|
||||||
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
|
|
||||||
CONFIG_IP6_NF_MATCH_MH=m
|
|
||||||
CONFIG_IP6_NF_MATCH_RT=m
|
|
||||||
CONFIG_IP6_NF_TARGET_HL=m
|
|
||||||
CONFIG_IP6_NF_FILTER=m
|
|
||||||
CONFIG_IP6_NF_TARGET_REJECT=m
|
|
||||||
CONFIG_IP6_NF_MANGLE=m
|
|
||||||
CONFIG_IP6_NF_RAW=m
|
|
||||||
CONFIG_IP6_NF_SECURITY=m
|
|
||||||
CONFIG_BRIDGE_NF_EBTABLES=m
|
|
||||||
CONFIG_BRIDGE_EBT_BROUTE=m
|
|
||||||
CONFIG_BRIDGE_EBT_T_FILTER=m
|
|
||||||
CONFIG_BRIDGE_EBT_T_NAT=m
|
|
||||||
CONFIG_BRIDGE_EBT_802_3=m
|
|
||||||
CONFIG_BRIDGE_EBT_AMONG=m
|
|
||||||
CONFIG_BRIDGE_EBT_ARP=m
|
|
||||||
CONFIG_BRIDGE_EBT_IP=m
|
|
||||||
CONFIG_BRIDGE_EBT_IP6=m
|
|
||||||
CONFIG_BRIDGE_EBT_LIMIT=m
|
|
||||||
CONFIG_BRIDGE_EBT_MARK=m
|
|
||||||
CONFIG_BRIDGE_EBT_PKTTYPE=m
|
|
||||||
CONFIG_BRIDGE_EBT_STP=m
|
|
||||||
CONFIG_BRIDGE_EBT_VLAN=m
|
|
||||||
CONFIG_BRIDGE_EBT_ARPREPLY=m
|
|
||||||
CONFIG_BRIDGE_EBT_DNAT=m
|
|
||||||
CONFIG_BRIDGE_EBT_MARK_T=m
|
|
||||||
CONFIG_BRIDGE_EBT_REDIRECT=m
|
|
||||||
CONFIG_BRIDGE_EBT_SNAT=m
|
|
||||||
CONFIG_BRIDGE_EBT_LOG=m
|
|
||||||
CONFIG_BRIDGE_EBT_ULOG=m
|
|
||||||
CONFIG_BRIDGE_EBT_NFLOG=m
|
|
||||||
CONFIG_RDS=m
|
|
||||||
CONFIG_RDS_TCP=m
|
|
||||||
CONFIG_BRIDGE=m
|
|
||||||
CONFIG_VLAN_8021Q=m
|
|
||||||
CONFIG_VLAN_8021Q_GVRP=y
|
|
||||||
CONFIG_PHONET=m
|
|
||||||
CONFIG_NET_SCHED=y
|
|
||||||
CONFIG_NET_SCH_CBQ=m
|
|
||||||
CONFIG_NET_SCH_HTB=m
|
|
||||||
CONFIG_NET_SCH_HFSC=m
|
|
||||||
CONFIG_NET_SCH_PRIO=m
|
|
||||||
CONFIG_NET_SCH_MULTIQ=m
|
|
||||||
CONFIG_NET_SCH_RED=m
|
|
||||||
CONFIG_NET_SCH_SFQ=m
|
|
||||||
CONFIG_NET_SCH_TEQL=m
|
|
||||||
CONFIG_NET_SCH_TBF=m
|
|
||||||
CONFIG_NET_SCH_GRED=m
|
|
||||||
CONFIG_NET_SCH_DSMARK=m
|
|
||||||
CONFIG_NET_SCH_NETEM=m
|
|
||||||
CONFIG_NET_SCH_DRR=m
|
|
||||||
CONFIG_NET_SCH_INGRESS=m
|
|
||||||
CONFIG_NET_CLS_BASIC=m
|
|
||||||
CONFIG_NET_CLS_TCINDEX=m
|
|
||||||
CONFIG_NET_CLS_ROUTE4=m
|
|
||||||
CONFIG_NET_CLS_FW=m
|
|
||||||
CONFIG_NET_CLS_U32=m
|
|
||||||
CONFIG_CLS_U32_PERF=y
|
|
||||||
CONFIG_CLS_U32_MARK=y
|
|
||||||
CONFIG_NET_CLS_RSVP=m
|
|
||||||
CONFIG_NET_CLS_RSVP6=m
|
|
||||||
CONFIG_NET_CLS_FLOW=m
|
|
||||||
CONFIG_NET_CLS_CGROUP=y
|
|
||||||
CONFIG_NET_EMATCH=y
|
|
||||||
CONFIG_NET_EMATCH_CMP=m
|
|
||||||
CONFIG_NET_EMATCH_NBYTE=m
|
|
||||||
CONFIG_NET_EMATCH_U32=m
|
|
||||||
CONFIG_NET_EMATCH_META=m
|
|
||||||
CONFIG_NET_EMATCH_TEXT=m
|
|
||||||
CONFIG_NET_CLS_ACT=y
|
|
||||||
CONFIG_NET_ACT_POLICE=m
|
|
||||||
CONFIG_NET_ACT_GACT=m
|
|
||||||
CONFIG_GACT_PROB=y
|
|
||||||
CONFIG_NET_ACT_MIRRED=m
|
|
||||||
CONFIG_NET_ACT_IPT=m
|
|
||||||
CONFIG_NET_ACT_NAT=m
|
|
||||||
CONFIG_NET_ACT_PEDIT=m
|
|
||||||
CONFIG_NET_ACT_SIMP=m
|
|
||||||
CONFIG_NET_ACT_SKBEDIT=m
|
|
||||||
CONFIG_NET_CLS_IND=y
|
|
||||||
CONFIG_DCB=y
|
|
||||||
CONFIG_DNS_RESOLVER=y
|
|
||||||
# CONFIG_WIRELESS is not set
|
|
||||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
|
||||||
CONFIG_DEVTMPFS=y
|
|
||||||
CONFIG_DEVTMPFS_MOUNT=y
|
|
||||||
CONFIG_CONNECTOR=y
|
|
||||||
CONFIG_BLK_DEV_LOOP=y
|
|
||||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
|
||||||
CONFIG_BLK_DEV_SX8=m
|
|
||||||
CONFIG_BLK_DEV_RAM=y
|
|
||||||
CONFIG_BLK_DEV_RAM_SIZE=16384
|
|
||||||
CONFIG_ATA_OVER_ETH=m
|
|
||||||
CONFIG_RAID_ATTRS=m
|
|
||||||
CONFIG_BLK_DEV_SD=y
|
|
||||||
CONFIG_SCSI_CONSTANTS=y
|
|
||||||
CONFIG_SCSI_LOGGING=y
|
|
||||||
CONFIG_ATA=y
|
|
||||||
CONFIG_SATA_SIL24=y
|
|
||||||
# CONFIG_ATA_SFF is not set
|
|
||||||
CONFIG_MD=y
|
|
||||||
CONFIG_BLK_DEV_MD=y
|
|
||||||
CONFIG_MD_LINEAR=m
|
|
||||||
CONFIG_MD_RAID0=m
|
|
||||||
CONFIG_MD_RAID1=m
|
|
||||||
CONFIG_MD_RAID10=m
|
|
||||||
CONFIG_MD_RAID456=m
|
|
||||||
CONFIG_MD_FAULTY=m
|
|
||||||
CONFIG_BLK_DEV_DM=m
|
|
||||||
CONFIG_DM_DEBUG=y
|
|
||||||
CONFIG_DM_CRYPT=m
|
|
||||||
CONFIG_DM_SNAPSHOT=m
|
|
||||||
CONFIG_DM_MIRROR=m
|
|
||||||
CONFIG_DM_LOG_USERSPACE=m
|
|
||||||
CONFIG_DM_ZERO=m
|
|
||||||
CONFIG_DM_MULTIPATH=m
|
|
||||||
CONFIG_DM_MULTIPATH_QL=m
|
|
||||||
CONFIG_DM_MULTIPATH_ST=m
|
|
||||||
CONFIG_DM_DELAY=m
|
|
||||||
CONFIG_DM_UEVENT=y
|
|
||||||
CONFIG_FUSION=y
|
|
||||||
CONFIG_FUSION_SAS=y
|
|
||||||
CONFIG_NETDEVICES=y
|
|
||||||
CONFIG_BONDING=m
|
|
||||||
CONFIG_DUMMY=m
|
|
||||||
CONFIG_IFB=m
|
|
||||||
CONFIG_MACVLAN=m
|
|
||||||
CONFIG_MACVTAP=m
|
|
||||||
CONFIG_NETCONSOLE=m
|
|
||||||
CONFIG_NETCONSOLE_DYNAMIC=y
|
|
||||||
CONFIG_TUN=y
|
|
||||||
CONFIG_VETH=m
|
|
||||||
CONFIG_NET_DSA_MV88E6060=y
|
|
||||||
CONFIG_NET_DSA_MV88E6XXX=y
|
|
||||||
# CONFIG_NET_VENDOR_3COM is not set
|
|
||||||
CONFIG_E1000E=y
|
|
||||||
# CONFIG_WLAN is not set
|
|
||||||
# CONFIG_INPUT_MOUSEDEV is not set
|
|
||||||
# CONFIG_INPUT_KEYBOARD is not set
|
|
||||||
# CONFIG_INPUT_MOUSE is not set
|
|
||||||
# CONFIG_SERIO is not set
|
|
||||||
# CONFIG_VT is not set
|
|
||||||
# CONFIG_LEGACY_PTYS is not set
|
|
||||||
CONFIG_HW_RANDOM=y
|
|
||||||
CONFIG_HW_RANDOM_TIMERIOMEM=m
|
|
||||||
CONFIG_I2C=y
|
|
||||||
CONFIG_I2C_CHARDEV=y
|
|
||||||
# CONFIG_HWMON is not set
|
|
||||||
CONFIG_WATCHDOG=y
|
|
||||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
|
||||||
# CONFIG_VGA_ARB is not set
|
|
||||||
# CONFIG_USB_SUPPORT is not set
|
|
||||||
CONFIG_EDAC=y
|
|
||||||
CONFIG_RTC_CLASS=y
|
|
||||||
CONFIG_RTC_DRV_TILE=y
|
|
||||||
CONFIG_EXT2_FS=y
|
|
||||||
CONFIG_EXT2_FS_XATTR=y
|
|
||||||
CONFIG_EXT2_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT2_FS_SECURITY=y
|
|
||||||
CONFIG_EXT2_FS_XIP=y
|
|
||||||
CONFIG_EXT3_FS=y
|
|
||||||
CONFIG_EXT3_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT3_FS_SECURITY=y
|
|
||||||
CONFIG_EXT4_FS=y
|
|
||||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
|
||||||
CONFIG_EXT4_FS_SECURITY=y
|
|
||||||
CONFIG_XFS_FS=y
|
|
||||||
CONFIG_XFS_QUOTA=y
|
|
||||||
CONFIG_XFS_POSIX_ACL=y
|
|
||||||
CONFIG_GFS2_FS=m
|
|
||||||
CONFIG_GFS2_FS_LOCKING_DLM=y
|
|
||||||
CONFIG_BTRFS_FS=m
|
|
||||||
CONFIG_BTRFS_FS_POSIX_ACL=y
|
|
||||||
CONFIG_QUOTA=y
|
|
||||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
|
||||||
# CONFIG_PRINT_QUOTA_WARNING is not set
|
|
||||||
CONFIG_QFMT_V2=y
|
|
||||||
CONFIG_AUTOFS4_FS=m
|
|
||||||
CONFIG_FUSE_FS=y
|
|
||||||
CONFIG_CUSE=m
|
|
||||||
CONFIG_FSCACHE=m
|
|
||||||
CONFIG_FSCACHE_STATS=y
|
|
||||||
CONFIG_CACHEFILES=m
|
|
||||||
CONFIG_ISO9660_FS=m
|
|
||||||
CONFIG_JOLIET=y
|
|
||||||
CONFIG_ZISOFS=y
|
|
||||||
CONFIG_UDF_FS=m
|
|
||||||
CONFIG_MSDOS_FS=m
|
|
||||||
CONFIG_VFAT_FS=m
|
|
||||||
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
|
|
||||||
CONFIG_PROC_KCORE=y
|
|
||||||
CONFIG_TMPFS=y
|
|
||||||
CONFIG_TMPFS_POSIX_ACL=y
|
|
||||||
CONFIG_HUGETLBFS=y
|
|
||||||
CONFIG_CONFIGFS_FS=m
|
|
||||||
CONFIG_ECRYPT_FS=m
|
|
||||||
CONFIG_CRAMFS=m
|
|
||||||
CONFIG_SQUASHFS=m
|
|
||||||
CONFIG_NFS_FS=m
|
|
||||||
CONFIG_NFS_V3_ACL=y
|
|
||||||
CONFIG_NFS_V4=m
|
|
||||||
CONFIG_NFS_V4_1=y
|
|
||||||
CONFIG_NFS_FSCACHE=y
|
|
||||||
CONFIG_NFSD=m
|
|
||||||
CONFIG_NFSD_V3_ACL=y
|
|
||||||
CONFIG_NFSD_V4=y
|
|
||||||
CONFIG_CIFS=m
|
|
||||||
CONFIG_CIFS_STATS=y
|
|
||||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
|
||||||
CONFIG_CIFS_UPCALL=y
|
|
||||||
CONFIG_CIFS_XATTR=y
|
|
||||||
CONFIG_CIFS_POSIX=y
|
|
||||||
CONFIG_CIFS_DFS_UPCALL=y
|
|
||||||
CONFIG_CIFS_FSCACHE=y
|
|
||||||
CONFIG_NLS=y
|
|
||||||
CONFIG_NLS_DEFAULT="utf8"
|
|
||||||
CONFIG_NLS_CODEPAGE_437=y
|
|
||||||
CONFIG_NLS_CODEPAGE_737=m
|
|
||||||
CONFIG_NLS_CODEPAGE_775=m
|
|
||||||
CONFIG_NLS_CODEPAGE_850=m
|
|
||||||
CONFIG_NLS_CODEPAGE_852=m
|
|
||||||
CONFIG_NLS_CODEPAGE_855=m
|
|
||||||
CONFIG_NLS_CODEPAGE_857=m
|
|
||||||
CONFIG_NLS_CODEPAGE_860=m
|
|
||||||
CONFIG_NLS_CODEPAGE_861=m
|
|
||||||
CONFIG_NLS_CODEPAGE_862=m
|
|
||||||
CONFIG_NLS_CODEPAGE_863=m
|
|
||||||
CONFIG_NLS_CODEPAGE_864=m
|
|
||||||
CONFIG_NLS_CODEPAGE_865=m
|
|
||||||
CONFIG_NLS_CODEPAGE_866=m
|
|
||||||
CONFIG_NLS_CODEPAGE_869=m
|
|
||||||
CONFIG_NLS_CODEPAGE_936=m
|
|
||||||
CONFIG_NLS_CODEPAGE_950=m
|
|
||||||
CONFIG_NLS_CODEPAGE_932=m
|
|
||||||
CONFIG_NLS_CODEPAGE_949=m
|
|
||||||
CONFIG_NLS_CODEPAGE_874=m
|
|
||||||
CONFIG_NLS_ISO8859_8=m
|
|
||||||
CONFIG_NLS_CODEPAGE_1250=m
|
|
||||||
CONFIG_NLS_CODEPAGE_1251=m
|
|
||||||
CONFIG_NLS_ASCII=y
|
|
||||||
CONFIG_NLS_ISO8859_1=m
|
|
||||||
CONFIG_NLS_ISO8859_2=m
|
|
||||||
CONFIG_NLS_ISO8859_3=m
|
|
||||||
CONFIG_NLS_ISO8859_4=m
|
|
||||||
CONFIG_NLS_ISO8859_5=m
|
|
||||||
CONFIG_NLS_ISO8859_6=m
|
|
||||||
CONFIG_NLS_ISO8859_7=m
|
|
||||||
CONFIG_NLS_ISO8859_9=m
|
|
||||||
CONFIG_NLS_ISO8859_13=m
|
|
||||||
CONFIG_NLS_ISO8859_14=m
|
|
||||||
CONFIG_NLS_ISO8859_15=m
|
|
||||||
CONFIG_NLS_KOI8_R=m
|
|
||||||
CONFIG_NLS_KOI8_U=m
|
|
||||||
CONFIG_NLS_UTF8=m
|
|
||||||
CONFIG_DLM=m
|
|
||||||
CONFIG_DLM_DEBUG=y
|
|
||||||
CONFIG_DYNAMIC_DEBUG=y
|
|
||||||
CONFIG_DEBUG_INFO=y
|
|
||||||
CONFIG_DEBUG_INFO_REDUCED=y
|
|
||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
|
||||||
CONFIG_FRAME_WARN=2048
|
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
|
||||||
CONFIG_DEBUG_FS=y
|
|
||||||
CONFIG_HEADERS_CHECK=y
|
|
||||||
# CONFIG_FRAME_POINTER is not set
|
|
||||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
|
||||||
CONFIG_MAGIC_SYSRQ=y
|
|
||||||
CONFIG_DEBUG_VM=y
|
|
||||||
CONFIG_DEBUG_MEMORY_INIT=y
|
|
||||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
|
||||||
CONFIG_SCHEDSTATS=y
|
|
||||||
CONFIG_TIMER_STATS=y
|
|
||||||
CONFIG_DEBUG_LIST=y
|
|
||||||
CONFIG_DEBUG_CREDENTIALS=y
|
|
||||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
|
||||||
CONFIG_ASYNC_RAID6_TEST=m
|
|
||||||
CONFIG_SECURITY=y
|
|
||||||
CONFIG_SECURITYFS=y
|
|
||||||
CONFIG_SECURITY_NETWORK=y
|
|
||||||
CONFIG_SECURITY_NETWORK_XFRM=y
|
|
||||||
CONFIG_SECURITY_SELINUX=y
|
|
||||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
|
||||||
CONFIG_SECURITY_SELINUX_DISABLE=y
|
|
||||||
CONFIG_CRYPTO_PCRYPT=m
|
|
||||||
CONFIG_CRYPTO_CRYPTD=m
|
|
||||||
CONFIG_CRYPTO_TEST=m
|
|
||||||
CONFIG_CRYPTO_CCM=m
|
|
||||||
CONFIG_CRYPTO_GCM=m
|
|
||||||
CONFIG_CRYPTO_CTS=m
|
|
||||||
CONFIG_CRYPTO_LRW=m
|
|
||||||
CONFIG_CRYPTO_PCBC=m
|
|
||||||
CONFIG_CRYPTO_XTS=m
|
|
||||||
CONFIG_CRYPTO_HMAC=y
|
|
||||||
CONFIG_CRYPTO_XCBC=m
|
|
||||||
CONFIG_CRYPTO_VMAC=m
|
|
||||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
|
||||||
CONFIG_CRYPTO_RMD128=m
|
|
||||||
CONFIG_CRYPTO_RMD160=m
|
|
||||||
CONFIG_CRYPTO_RMD256=m
|
|
||||||
CONFIG_CRYPTO_RMD320=m
|
|
||||||
CONFIG_CRYPTO_SHA1=y
|
|
||||||
CONFIG_CRYPTO_SHA512=m
|
|
||||||
CONFIG_CRYPTO_TGR192=m
|
|
||||||
CONFIG_CRYPTO_WP512=m
|
|
||||||
CONFIG_CRYPTO_ANUBIS=m
|
|
||||||
CONFIG_CRYPTO_BLOWFISH=m
|
|
||||||
CONFIG_CRYPTO_CAMELLIA=m
|
|
||||||
CONFIG_CRYPTO_CAST5=m
|
|
||||||
CONFIG_CRYPTO_CAST6=m
|
|
||||||
CONFIG_CRYPTO_FCRYPT=m
|
|
||||||
CONFIG_CRYPTO_KHAZAD=m
|
|
||||||
CONFIG_CRYPTO_SEED=m
|
|
||||||
CONFIG_CRYPTO_SERPENT=m
|
|
||||||
CONFIG_CRYPTO_TEA=m
|
|
||||||
CONFIG_CRYPTO_TWOFISH=m
|
|
||||||
CONFIG_CRYPTO_LZO=m
|
|
||||||
CONFIG_CRC_CCITT=m
|
|
||||||
CONFIG_CRC7=m
|
|
|
@ -1,34 +0,0 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
# Support direct access to TILE-Gx hardware from user space, via the
|
|
||||||
# gxio library, or from kernel space, via kernel IORPC support.
|
|
||||||
config TILE_GXIO
|
|
||||||
bool
|
|
||||||
depends on TILEGX
|
|
||||||
|
|
||||||
# Support direct access to the common I/O DMA facility within the
|
|
||||||
# TILE-Gx mPIPE and Trio hardware from kernel space.
|
|
||||||
config TILE_GXIO_DMA
|
|
||||||
bool
|
|
||||||
select TILE_GXIO
|
|
||||||
|
|
||||||
# Support direct access to the TILE-Gx mPIPE hardware from kernel space.
|
|
||||||
config TILE_GXIO_MPIPE
|
|
||||||
bool
|
|
||||||
select TILE_GXIO
|
|
||||||
select TILE_GXIO_DMA
|
|
||||||
|
|
||||||
# Support direct access to the TILE-Gx TRIO hardware from kernel space.
|
|
||||||
config TILE_GXIO_TRIO
|
|
||||||
bool
|
|
||||||
select TILE_GXIO
|
|
||||||
select TILE_GXIO_DMA
|
|
||||||
|
|
||||||
# Support direct access to the TILE-Gx USB hardware from kernel space.
|
|
||||||
config TILE_GXIO_USB_HOST
|
|
||||||
bool
|
|
||||||
select TILE_GXIO
|
|
||||||
|
|
||||||
# Support direct access to the TILE-Gx UART hardware from kernel space.
|
|
||||||
config TILE_GXIO_UART
|
|
||||||
bool
|
|
||||||
select TILE_GXIO
|
|
|
@ -1,11 +0,0 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
|
||||||
#
|
|
||||||
# Makefile for the Tile-Gx device access support.
|
|
||||||
#
|
|
||||||
|
|
||||||
obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
|
|
||||||
obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
|
|
||||||
obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
|
|
||||||
obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
|
|
||||||
obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
|
|
||||||
obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
|
|
|
@ -1,176 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <linux/atomic.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <gxio/dma_queue.h>
|
|
||||||
|
|
||||||
/* Wait for a memory read to complete. */
|
|
||||||
#define wait_for_value(val) \
|
|
||||||
__asm__ __volatile__("move %0, %0" :: "r"(val))
|
|
||||||
|
|
||||||
/* The index is in the low 16. */
|
|
||||||
#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The hardware descriptor-ring type.
|
|
||||||
* This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
|
|
||||||
* and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
|
|
||||||
* See those types for more documentation on the individual fields.
|
|
||||||
*/
|
|
||||||
typedef union {
|
|
||||||
struct {
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
uint64_t ring_idx:16;
|
|
||||||
uint64_t count:16;
|
|
||||||
uint64_t gen:1;
|
|
||||||
uint64_t __reserved:31;
|
|
||||||
#else
|
|
||||||
uint64_t __reserved:31;
|
|
||||||
uint64_t gen:1;
|
|
||||||
uint64_t count:16;
|
|
||||||
uint64_t ring_idx:16;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
uint64_t word;
|
|
||||||
} __gxio_ring_t;
|
|
||||||
|
|
||||||
void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
|
|
||||||
void *post_region_addr, unsigned int num_entries)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Limit 65536 entry rings to 65535 credits because we only have a
|
|
||||||
* 16 bit completion counter.
|
|
||||||
*/
|
|
||||||
int64_t credits = (num_entries < 65536) ? num_entries : 65535;
|
|
||||||
|
|
||||||
memset(dma_queue, 0, sizeof(*dma_queue));
|
|
||||||
|
|
||||||
dma_queue->post_region_addr = post_region_addr;
|
|
||||||
dma_queue->hw_complete_count = 0;
|
|
||||||
dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
|
|
||||||
|
|
||||||
void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
|
|
||||||
{
|
|
||||||
__gxio_ring_t val;
|
|
||||||
uint64_t count;
|
|
||||||
uint64_t delta;
|
|
||||||
uint64_t new_count;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read the 64-bit completion count without touching the cache, so
|
|
||||||
* we later avoid having to evict any sharers of this cache line
|
|
||||||
* when we update it below.
|
|
||||||
*/
|
|
||||||
uint64_t orig_hw_complete_count =
|
|
||||||
cmpxchg(&dma_queue->hw_complete_count,
|
|
||||||
-1, -1);
|
|
||||||
|
|
||||||
/* Make sure the load completes before we access the hardware. */
|
|
||||||
wait_for_value(orig_hw_complete_count);
|
|
||||||
|
|
||||||
/* Read the 16-bit count of how many packets it has completed. */
|
|
||||||
val.word = __gxio_mmio_read(dma_queue->post_region_addr);
|
|
||||||
count = val.count;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the number of completions since we last updated the
|
|
||||||
* 64-bit counter. It's safe to ignore the high bits because the
|
|
||||||
* maximum credit value is 65535.
|
|
||||||
*/
|
|
||||||
delta = (count - orig_hw_complete_count) & 0xffff;
|
|
||||||
if (delta == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to write back the count, advanced by delta. If we race with
|
|
||||||
* another thread, this might fail, in which case we return
|
|
||||||
* immediately on the assumption that some credits are (or at least
|
|
||||||
* were) available.
|
|
||||||
*/
|
|
||||||
new_count = orig_hw_complete_count + delta;
|
|
||||||
if (cmpxchg(&dma_queue->hw_complete_count,
|
|
||||||
orig_hw_complete_count,
|
|
||||||
new_count) != orig_hw_complete_count)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We succeeded in advancing the completion count; add back the
|
|
||||||
* corresponding number of egress credits.
|
|
||||||
*/
|
|
||||||
__insn_fetchadd(&dma_queue->credits_and_next_index,
|
|
||||||
(delta << DMA_QUEUE_CREDIT_SHIFT));
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A separate 'blocked' method for put() so that backtraces and
|
|
||||||
* profiles will clearly indicate that we're wasting time spinning on
|
|
||||||
* egress availability rather than actually posting commands.
|
|
||||||
*/
|
|
||||||
int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
|
|
||||||
int64_t modifier)
|
|
||||||
{
|
|
||||||
int backoff = 16;
|
|
||||||
int64_t old;
|
|
||||||
|
|
||||||
do {
|
|
||||||
int i;
|
|
||||||
/* Back off to avoid spamming memory networks. */
|
|
||||||
for (i = backoff; i > 0; i--)
|
|
||||||
__insn_mfspr(SPR_PASS);
|
|
||||||
|
|
||||||
/* Check credits again. */
|
|
||||||
__gxio_dma_queue_update_credits(dma_queue);
|
|
||||||
old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
|
|
||||||
modifier);
|
|
||||||
|
|
||||||
/* Calculate bounded exponential backoff for next iteration. */
|
|
||||||
if (backoff < 256)
|
|
||||||
backoff *= 2;
|
|
||||||
} while (old + modifier < 0);
|
|
||||||
|
|
||||||
return old;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
|
|
||||||
|
|
||||||
int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
|
|
||||||
unsigned int num, int wait)
|
|
||||||
{
|
|
||||||
return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
|
|
||||||
|
|
||||||
int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
|
|
||||||
int64_t completion_slot, int update)
|
|
||||||
{
|
|
||||||
if (update) {
|
|
||||||
if (READ_ONCE(dma_queue->hw_complete_count) >
|
|
||||||
completion_slot)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
__gxio_dma_queue_update_credits(dma_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
|
|
|
@ -1,89 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_globals.h"
|
|
||||||
|
|
||||||
struct arm_pollfd_param {
|
|
||||||
union iorpc_pollfd pollfd;
|
|
||||||
};
|
|
||||||
|
|
||||||
int __iorpc_arm_pollfd(int fd, int pollfd_cookie)
|
|
||||||
{
|
|
||||||
struct arm_pollfd_param temp;
|
|
||||||
struct arm_pollfd_param *params = &temp;
|
|
||||||
|
|
||||||
params->pollfd.kernel.cookie = pollfd_cookie;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
IORPC_OP_ARM_POLLFD);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__iorpc_arm_pollfd);
|
|
||||||
|
|
||||||
struct close_pollfd_param {
|
|
||||||
union iorpc_pollfd pollfd;
|
|
||||||
};
|
|
||||||
|
|
||||||
int __iorpc_close_pollfd(int fd, int pollfd_cookie)
|
|
||||||
{
|
|
||||||
struct close_pollfd_param temp;
|
|
||||||
struct close_pollfd_param *params = &temp;
|
|
||||||
|
|
||||||
params->pollfd.kernel.cookie = pollfd_cookie;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
IORPC_OP_CLOSE_POLLFD);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__iorpc_close_pollfd);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int __iorpc_get_mmio_base(int fd, HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
IORPC_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__iorpc_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
IORPC_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__iorpc_check_mmio_offset);
|
|
|
@ -1,593 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_mpipe.h"
|
|
||||||
|
|
||||||
struct alloc_buffer_stacks_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_buffer_stacks_param temp;
|
|
||||||
struct alloc_buffer_stacks_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks);
|
|
||||||
|
|
||||||
struct init_buffer_stack_aux_param {
|
|
||||||
union iorpc_mem_buffer buffer;
|
|
||||||
unsigned int stack;
|
|
||||||
unsigned int buffer_size_enum;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
|
|
||||||
void *mem_va, size_t mem_size,
|
|
||||||
unsigned int mem_flags, unsigned int stack,
|
|
||||||
unsigned int buffer_size_enum)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
unsigned long long __cpa;
|
|
||||||
pte_t __pte;
|
|
||||||
struct init_buffer_stack_aux_param temp;
|
|
||||||
struct init_buffer_stack_aux_param *params = &temp;
|
|
||||||
|
|
||||||
__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
|
|
||||||
if (__result != 0)
|
|
||||||
return __result;
|
|
||||||
params->buffer.kernel.cpa = __cpa;
|
|
||||||
params->buffer.kernel.size = mem_size;
|
|
||||||
params->buffer.kernel.pte = __pte;
|
|
||||||
params->buffer.kernel.flags = mem_flags;
|
|
||||||
params->stack = stack;
|
|
||||||
params->buffer_size_enum = buffer_size_enum;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux);
|
|
||||||
|
|
||||||
|
|
||||||
struct alloc_notif_rings_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_notif_rings_param temp;
|
|
||||||
struct alloc_notif_rings_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings);
|
|
||||||
|
|
||||||
struct init_notif_ring_aux_param {
|
|
||||||
union iorpc_mem_buffer buffer;
|
|
||||||
unsigned int ring;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
|
|
||||||
size_t mem_size, unsigned int mem_flags,
|
|
||||||
unsigned int ring)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
unsigned long long __cpa;
|
|
||||||
pte_t __pte;
|
|
||||||
struct init_notif_ring_aux_param temp;
|
|
||||||
struct init_notif_ring_aux_param *params = &temp;
|
|
||||||
|
|
||||||
__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
|
|
||||||
if (__result != 0)
|
|
||||||
return __result;
|
|
||||||
params->buffer.kernel.cpa = __cpa;
|
|
||||||
params->buffer.kernel.size = mem_size;
|
|
||||||
params->buffer.kernel.pte = __pte;
|
|
||||||
params->buffer.kernel.flags = mem_flags;
|
|
||||||
params->ring = ring;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux);
|
|
||||||
|
|
||||||
struct request_notif_ring_interrupt_param {
|
|
||||||
union iorpc_interrupt interrupt;
|
|
||||||
unsigned int ring;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
|
|
||||||
int inter_x, int inter_y,
|
|
||||||
int inter_ipi, int inter_event,
|
|
||||||
unsigned int ring)
|
|
||||||
{
|
|
||||||
struct request_notif_ring_interrupt_param temp;
|
|
||||||
struct request_notif_ring_interrupt_param *params = &temp;
|
|
||||||
|
|
||||||
params->interrupt.kernel.x = inter_x;
|
|
||||||
params->interrupt.kernel.y = inter_y;
|
|
||||||
params->interrupt.kernel.ipi = inter_ipi;
|
|
||||||
params->interrupt.kernel.event = inter_event;
|
|
||||||
params->ring = ring;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt);
|
|
||||||
|
|
||||||
struct enable_notif_ring_interrupt_param {
|
|
||||||
unsigned int ring;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ring)
|
|
||||||
{
|
|
||||||
struct enable_notif_ring_interrupt_param temp;
|
|
||||||
struct enable_notif_ring_interrupt_param *params = &temp;
|
|
||||||
|
|
||||||
params->ring = ring;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt);
|
|
||||||
|
|
||||||
struct alloc_notif_groups_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_notif_groups_param temp;
|
|
||||||
struct alloc_notif_groups_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups);
|
|
||||||
|
|
||||||
struct init_notif_group_param {
|
|
||||||
unsigned int group;
|
|
||||||
gxio_mpipe_notif_group_bits_t bits;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int group,
|
|
||||||
gxio_mpipe_notif_group_bits_t bits)
|
|
||||||
{
|
|
||||||
struct init_notif_group_param temp;
|
|
||||||
struct init_notif_group_param *params = &temp;
|
|
||||||
|
|
||||||
params->group = group;
|
|
||||||
params->bits = bits;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_init_notif_group);
|
|
||||||
|
|
||||||
struct alloc_buckets_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
|
|
||||||
unsigned int first, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_buckets_param temp;
|
|
||||||
struct alloc_buckets_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_alloc_buckets);
|
|
||||||
|
|
||||||
struct init_bucket_param {
|
|
||||||
unsigned int bucket;
|
|
||||||
MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
|
|
||||||
MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
|
|
||||||
{
|
|
||||||
struct init_bucket_param temp;
|
|
||||||
struct init_bucket_param *params = &temp;
|
|
||||||
|
|
||||||
params->bucket = bucket;
|
|
||||||
params->bucket_info = bucket_info;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_init_bucket);
|
|
||||||
|
|
||||||
struct alloc_edma_rings_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_edma_rings_param temp;
|
|
||||||
struct alloc_edma_rings_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings);
|
|
||||||
|
|
||||||
struct init_edma_ring_aux_param {
|
|
||||||
union iorpc_mem_buffer buffer;
|
|
||||||
unsigned int ring;
|
|
||||||
unsigned int channel;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
|
|
||||||
size_t mem_size, unsigned int mem_flags,
|
|
||||||
unsigned int ring, unsigned int channel)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
unsigned long long __cpa;
|
|
||||||
pte_t __pte;
|
|
||||||
struct init_edma_ring_aux_param temp;
|
|
||||||
struct init_edma_ring_aux_param *params = &temp;
|
|
||||||
|
|
||||||
__result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
|
|
||||||
if (__result != 0)
|
|
||||||
return __result;
|
|
||||||
params->buffer.kernel.cpa = __cpa;
|
|
||||||
params->buffer.kernel.size = mem_size;
|
|
||||||
params->buffer.kernel.pte = __pte;
|
|
||||||
params->buffer.kernel.flags = mem_flags;
|
|
||||||
params->ring = ring;
|
|
||||||
params->channel = channel;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
|
|
||||||
|
|
||||||
|
|
||||||
int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
|
|
||||||
size_t blob_size)
|
|
||||||
{
|
|
||||||
const void *params = blob;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size,
|
|
||||||
GXIO_MPIPE_OP_COMMIT_RULES);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_commit_rules);
|
|
||||||
|
|
||||||
struct register_client_memory_param {
|
|
||||||
unsigned int iotlb;
|
|
||||||
HV_PTE pte;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int iotlb, HV_PTE pte,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct register_client_memory_param temp;
|
|
||||||
struct register_client_memory_param *params = &temp;
|
|
||||||
|
|
||||||
params->iotlb = iotlb;
|
|
||||||
params->pte = pte;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_register_client_memory);
|
|
||||||
|
|
||||||
struct link_open_aux_param {
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
|
|
||||||
_gxio_mpipe_link_name_t name, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct link_open_aux_param temp;
|
|
||||||
struct link_open_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->name = name;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_link_open_aux);
|
|
||||||
|
|
||||||
struct link_close_aux_param {
|
|
||||||
int mac;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac)
|
|
||||||
{
|
|
||||||
struct link_close_aux_param temp;
|
|
||||||
struct link_close_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->mac = mac;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
|
|
||||||
|
|
||||||
struct link_set_attr_aux_param {
|
|
||||||
int mac;
|
|
||||||
uint32_t attr;
|
|
||||||
int64_t val;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
|
|
||||||
uint32_t attr, int64_t val)
|
|
||||||
{
|
|
||||||
struct link_set_attr_aux_param temp;
|
|
||||||
struct link_set_attr_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->mac = mac;
|
|
||||||
params->attr = attr;
|
|
||||||
params->val = val;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
|
|
||||||
|
|
||||||
struct get_timestamp_aux_param {
|
|
||||||
uint64_t sec;
|
|
||||||
uint64_t nsec;
|
|
||||||
uint64_t cycles;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
|
|
||||||
uint64_t *nsec, uint64_t *cycles)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_timestamp_aux_param temp;
|
|
||||||
struct get_timestamp_aux_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_GET_TIMESTAMP_AUX);
|
|
||||||
*sec = params->sec;
|
|
||||||
*nsec = params->nsec;
|
|
||||||
*cycles = params->cycles;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux);
|
|
||||||
|
|
||||||
struct set_timestamp_aux_param {
|
|
||||||
uint64_t sec;
|
|
||||||
uint64_t nsec;
|
|
||||||
uint64_t cycles;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
|
|
||||||
uint64_t nsec, uint64_t cycles)
|
|
||||||
{
|
|
||||||
struct set_timestamp_aux_param temp;
|
|
||||||
struct set_timestamp_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->sec = sec;
|
|
||||||
params->nsec = nsec;
|
|
||||||
params->cycles = cycles;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux);
|
|
||||||
|
|
||||||
struct adjust_timestamp_aux_param {
|
|
||||||
int64_t nsec;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec)
|
|
||||||
{
|
|
||||||
struct adjust_timestamp_aux_param temp;
|
|
||||||
struct adjust_timestamp_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->nsec = nsec;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
|
|
||||||
|
|
||||||
struct config_edma_ring_blks_param {
|
|
||||||
unsigned int ering;
|
|
||||||
unsigned int max_blks;
|
|
||||||
unsigned int min_snf_blks;
|
|
||||||
unsigned int db;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ering, unsigned int max_blks,
|
|
||||||
unsigned int min_snf_blks, unsigned int db)
|
|
||||||
{
|
|
||||||
struct config_edma_ring_blks_param temp;
|
|
||||||
struct config_edma_ring_blks_param *params = &temp;
|
|
||||||
|
|
||||||
params->ering = ering;
|
|
||||||
params->max_blks = max_blks;
|
|
||||||
params->min_snf_blks = min_snf_blks;
|
|
||||||
params->db = db;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
|
|
||||||
|
|
||||||
struct adjust_timestamp_freq_param {
|
|
||||||
int32_t ppb;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb)
|
|
||||||
{
|
|
||||||
struct adjust_timestamp_freq_param temp;
|
|
||||||
struct adjust_timestamp_freq_param *params = &temp;
|
|
||||||
|
|
||||||
params->ppb = ppb;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
|
|
||||||
|
|
||||||
struct arm_pollfd_param {
|
|
||||||
union iorpc_pollfd pollfd;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
|
|
||||||
{
|
|
||||||
struct arm_pollfd_param temp;
|
|
||||||
struct arm_pollfd_param *params = &temp;
|
|
||||||
|
|
||||||
params->pollfd.kernel.cookie = pollfd_cookie;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_arm_pollfd);
|
|
||||||
|
|
||||||
struct close_pollfd_param {
|
|
||||||
union iorpc_pollfd pollfd;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
|
|
||||||
{
|
|
||||||
struct close_pollfd_param temp;
|
|
||||||
struct close_pollfd_param *params = &temp;
|
|
||||||
|
|
||||||
params->pollfd.kernel.cookie = pollfd_cookie;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_close_pollfd);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_MPIPE_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
|
|
||||||
unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset);
|
|
|
@ -1,102 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_mpipe_info.h"
|
|
||||||
|
|
||||||
struct instance_aux_param {
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
|
|
||||||
_gxio_mpipe_link_name_t name)
|
|
||||||
{
|
|
||||||
struct instance_aux_param temp;
|
|
||||||
struct instance_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->name = name;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
|
|
||||||
|
|
||||||
struct enumerate_aux_param {
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
_gxio_mpipe_link_mac_t mac;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
|
|
||||||
unsigned int idx,
|
|
||||||
_gxio_mpipe_link_name_t *name,
|
|
||||||
_gxio_mpipe_link_mac_t *mac)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct enumerate_aux_param temp;
|
|
||||||
struct enumerate_aux_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
(((uint64_t)idx << 32) |
|
|
||||||
GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
|
|
||||||
*name = params->name;
|
|
||||||
*mac = params->mac;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
|
|
||||||
HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_MPIPE_INFO_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
|
|
||||||
unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset);
|
|
|
@ -1,350 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_trio.h"
|
|
||||||
|
|
||||||
struct alloc_asids_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
|
|
||||||
unsigned int first, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_asids_param temp;
|
|
||||||
struct alloc_asids_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_ALLOC_ASIDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_alloc_asids);
|
|
||||||
|
|
||||||
|
|
||||||
struct alloc_memory_maps_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_memory_maps_param temp;
|
|
||||||
struct alloc_memory_maps_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_ALLOC_MEMORY_MAPS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
|
|
||||||
|
|
||||||
struct alloc_scatter_queues_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_scatter_queues_param temp;
|
|
||||||
struct alloc_scatter_queues_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
|
|
||||||
|
|
||||||
struct alloc_pio_regions_param {
|
|
||||||
unsigned int count;
|
|
||||||
unsigned int first;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
|
|
||||||
unsigned int count, unsigned int first,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct alloc_pio_regions_param temp;
|
|
||||||
struct alloc_pio_regions_param *params = &temp;
|
|
||||||
|
|
||||||
params->count = count;
|
|
||||||
params->first = first;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_ALLOC_PIO_REGIONS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_alloc_pio_regions);
|
|
||||||
|
|
||||||
struct init_pio_region_aux_param {
|
|
||||||
unsigned int pio_region;
|
|
||||||
unsigned int mac;
|
|
||||||
uint32_t bus_address_hi;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
|
|
||||||
unsigned int pio_region, unsigned int mac,
|
|
||||||
uint32_t bus_address_hi, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct init_pio_region_aux_param temp;
|
|
||||||
struct init_pio_region_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->pio_region = pio_region;
|
|
||||||
params->mac = mac;
|
|
||||||
params->bus_address_hi = bus_address_hi;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_INIT_PIO_REGION_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_init_pio_region_aux);
|
|
||||||
|
|
||||||
|
|
||||||
struct init_memory_map_mmu_aux_param {
|
|
||||||
unsigned int map;
|
|
||||||
unsigned long va;
|
|
||||||
uint64_t size;
|
|
||||||
unsigned int asid;
|
|
||||||
unsigned int mac;
|
|
||||||
uint64_t bus_address;
|
|
||||||
unsigned int node;
|
|
||||||
unsigned int order_mode;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
|
|
||||||
unsigned int map, unsigned long va,
|
|
||||||
uint64_t size, unsigned int asid,
|
|
||||||
unsigned int mac, uint64_t bus_address,
|
|
||||||
unsigned int node,
|
|
||||||
unsigned int order_mode)
|
|
||||||
{
|
|
||||||
struct init_memory_map_mmu_aux_param temp;
|
|
||||||
struct init_memory_map_mmu_aux_param *params = &temp;
|
|
||||||
|
|
||||||
params->map = map;
|
|
||||||
params->va = va;
|
|
||||||
params->size = size;
|
|
||||||
params->asid = asid;
|
|
||||||
params->mac = mac;
|
|
||||||
params->bus_address = bus_address;
|
|
||||||
params->node = node;
|
|
||||||
params->order_mode = order_mode;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_init_memory_map_mmu_aux);
|
|
||||||
|
|
||||||
struct get_port_property_param {
|
|
||||||
struct pcie_trio_ports_property trio_ports;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_get_port_property(gxio_trio_context_t *context,
|
|
||||||
struct pcie_trio_ports_property *trio_ports)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_port_property_param temp;
|
|
||||||
struct get_port_property_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_TRIO_OP_GET_PORT_PROPERTY);
|
|
||||||
*trio_ports = params->trio_ports;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_get_port_property);
|
|
||||||
|
|
||||||
struct config_legacy_intr_param {
|
|
||||||
union iorpc_interrupt interrupt;
|
|
||||||
unsigned int mac;
|
|
||||||
unsigned int intx;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
|
|
||||||
int inter_y, int inter_ipi, int inter_event,
|
|
||||||
unsigned int mac, unsigned int intx)
|
|
||||||
{
|
|
||||||
struct config_legacy_intr_param temp;
|
|
||||||
struct config_legacy_intr_param *params = &temp;
|
|
||||||
|
|
||||||
params->interrupt.kernel.x = inter_x;
|
|
||||||
params->interrupt.kernel.y = inter_y;
|
|
||||||
params->interrupt.kernel.ipi = inter_ipi;
|
|
||||||
params->interrupt.kernel.event = inter_event;
|
|
||||||
params->mac = mac;
|
|
||||||
params->intx = intx;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_CONFIG_LEGACY_INTR);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_config_legacy_intr);
|
|
||||||
|
|
||||||
struct config_msi_intr_param {
|
|
||||||
union iorpc_interrupt interrupt;
|
|
||||||
unsigned int mac;
|
|
||||||
unsigned int mem_map;
|
|
||||||
uint64_t mem_map_base;
|
|
||||||
uint64_t mem_map_limit;
|
|
||||||
unsigned int asid;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
|
|
||||||
int inter_y, int inter_ipi, int inter_event,
|
|
||||||
unsigned int mac, unsigned int mem_map,
|
|
||||||
uint64_t mem_map_base, uint64_t mem_map_limit,
|
|
||||||
unsigned int asid)
|
|
||||||
{
|
|
||||||
struct config_msi_intr_param temp;
|
|
||||||
struct config_msi_intr_param *params = &temp;
|
|
||||||
|
|
||||||
params->interrupt.kernel.x = inter_x;
|
|
||||||
params->interrupt.kernel.y = inter_y;
|
|
||||||
params->interrupt.kernel.ipi = inter_ipi;
|
|
||||||
params->interrupt.kernel.event = inter_event;
|
|
||||||
params->mac = mac;
|
|
||||||
params->mem_map = mem_map;
|
|
||||||
params->mem_map_base = mem_map_base;
|
|
||||||
params->mem_map_limit = mem_map_limit;
|
|
||||||
params->asid = asid;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_CONFIG_MSI_INTR);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_config_msi_intr);
|
|
||||||
|
|
||||||
|
|
||||||
struct set_mps_mrs_param {
|
|
||||||
uint16_t mps;
|
|
||||||
uint16_t mrs;
|
|
||||||
unsigned int mac;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
|
|
||||||
uint16_t mrs, unsigned int mac)
|
|
||||||
{
|
|
||||||
struct set_mps_mrs_param temp;
|
|
||||||
struct set_mps_mrs_param *params = &temp;
|
|
||||||
|
|
||||||
params->mps = mps;
|
|
||||||
params->mrs = mrs;
|
|
||||||
params->mac = mac;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_SET_MPS_MRS);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_set_mps_mrs);
|
|
||||||
|
|
||||||
struct force_rc_link_up_param {
|
|
||||||
unsigned int mac;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac)
|
|
||||||
{
|
|
||||||
struct force_rc_link_up_param temp;
|
|
||||||
struct force_rc_link_up_param *params = &temp;
|
|
||||||
|
|
||||||
params->mac = mac;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_FORCE_RC_LINK_UP);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_force_rc_link_up);
|
|
||||||
|
|
||||||
struct force_ep_link_up_param {
|
|
||||||
unsigned int mac;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac)
|
|
||||||
{
|
|
||||||
struct force_ep_link_up_param temp;
|
|
||||||
struct force_ep_link_up_param *params = &temp;
|
|
||||||
|
|
||||||
params->mac = mac;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_FORCE_EP_LINK_UP);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_force_ep_link_up);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_TRIO_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
|
|
||||||
unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_TRIO_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_trio_check_mmio_offset);
|
|
|
@ -1,77 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_uart.h"
|
|
||||||
|
|
||||||
struct cfg_interrupt_param {
|
|
||||||
union iorpc_interrupt interrupt;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
|
|
||||||
int inter_y, int inter_ipi, int inter_event)
|
|
||||||
{
|
|
||||||
struct cfg_interrupt_param temp;
|
|
||||||
struct cfg_interrupt_param *params = &temp;
|
|
||||||
|
|
||||||
params->interrupt.kernel.x = inter_x;
|
|
||||||
params->interrupt.kernel.y = inter_y;
|
|
||||||
params->interrupt.kernel.ipi = inter_ipi;
|
|
||||||
params->interrupt.kernel.event = inter_event;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_UART_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_uart_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
|
|
||||||
unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
|
|
|
@ -1,99 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This file is machine-generated; DO NOT EDIT! */
|
|
||||||
#include "gxio/iorpc_usb_host.h"
|
|
||||||
|
|
||||||
struct cfg_interrupt_param {
|
|
||||||
union iorpc_interrupt interrupt;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
|
|
||||||
int inter_y, int inter_ipi, int inter_event)
|
|
||||||
{
|
|
||||||
struct cfg_interrupt_param temp;
|
|
||||||
struct cfg_interrupt_param *params = &temp;
|
|
||||||
|
|
||||||
params->interrupt.kernel.x = inter_x;
|
|
||||||
params->interrupt.kernel.y = inter_y;
|
|
||||||
params->interrupt.kernel.ipi = inter_ipi;
|
|
||||||
params->interrupt.kernel.event = inter_event;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params), GXIO_USB_HOST_OP_CFG_INTERRUPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_usb_host_cfg_interrupt);
|
|
||||||
|
|
||||||
struct register_client_memory_param {
|
|
||||||
HV_PTE pte;
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
|
|
||||||
HV_PTE pte, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct register_client_memory_param temp;
|
|
||||||
struct register_client_memory_param *params = &temp;
|
|
||||||
|
|
||||||
params->pte = pte;
|
|
||||||
params->flags = flags;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_usb_host_register_client_memory);
|
|
||||||
|
|
||||||
struct get_mmio_base_param {
|
|
||||||
HV_PTE base;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base)
|
|
||||||
{
|
|
||||||
int __result;
|
|
||||||
struct get_mmio_base_param temp;
|
|
||||||
struct get_mmio_base_param *params = &temp;
|
|
||||||
|
|
||||||
__result =
|
|
||||||
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
|
|
||||||
GXIO_USB_HOST_OP_GET_MMIO_BASE);
|
|
||||||
*base = params->base;
|
|
||||||
|
|
||||||
return __result;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_usb_host_get_mmio_base);
|
|
||||||
|
|
||||||
struct check_mmio_offset_param {
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned long size;
|
|
||||||
};
|
|
||||||
|
|
||||||
int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
|
|
||||||
unsigned long offset, unsigned long size)
|
|
||||||
{
|
|
||||||
struct check_mmio_offset_param temp;
|
|
||||||
struct check_mmio_offset_param *params = &temp;
|
|
||||||
|
|
||||||
params->offset = offset;
|
|
||||||
params->size = size;
|
|
||||||
|
|
||||||
return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
|
|
||||||
sizeof(*params),
|
|
||||||
GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(gxio_usb_host_check_mmio_offset);
|
|
|
@ -1,61 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* TILE-Gx IORPC support for kernel I/O drivers.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/mmzone.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <gxio/iorpc_globals.h>
|
|
||||||
#include <gxio/kiorpc.h>
|
|
||||||
|
|
||||||
#ifdef DEBUG_IORPC
|
|
||||||
#define TRACE(FMT, ...) pr_info(SIMPLE_MSG_LINE FMT, ## __VA_ARGS__)
|
|
||||||
#else
|
|
||||||
#define TRACE(...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Create kernel-VA-space MMIO mapping for an on-chip IO device. */
|
|
||||||
void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
|
|
||||||
unsigned long size)
|
|
||||||
{
|
|
||||||
pgprot_t mmio_base, prot = { 0 };
|
|
||||||
unsigned long pfn;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Look up the shim's lotar and base PA. */
|
|
||||||
err = __iorpc_get_mmio_base(hv_fd, &mmio_base);
|
|
||||||
if (err) {
|
|
||||||
TRACE("get_mmio_base() failure: %d\n", err);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Make sure the HV driver approves of our offset and size. */
|
|
||||||
err = __iorpc_check_mmio_offset(hv_fd, offset, size);
|
|
||||||
if (err) {
|
|
||||||
TRACE("check_mmio_offset() failure: %d\n", err);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* mmio_base contains a base pfn and homing coordinates. Turn
|
|
||||||
* it into an MMIO pgprot and offset pfn.
|
|
||||||
*/
|
|
||||||
prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base));
|
|
||||||
pfn = pte_pfn(mmio_base) + PFN_DOWN(offset);
|
|
||||||
|
|
||||||
return ioremap_prot(PFN_PHYS(pfn), size, prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(iorpc_ioremap);
|
|
|
@ -1,584 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Implementation of mpipe gxio calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
|
|
||||||
#include <gxio/iorpc_globals.h>
|
|
||||||
#include <gxio/iorpc_mpipe.h>
|
|
||||||
#include <gxio/iorpc_mpipe_info.h>
|
|
||||||
#include <gxio/kiorpc.h>
|
|
||||||
#include <gxio/mpipe.h>
|
|
||||||
|
|
||||||
/* HACK: Avoid pointless "shadow" warnings. */
|
|
||||||
#define link link_shadow
|
|
||||||
|
|
||||||
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
|
|
||||||
{
|
|
||||||
char file[32];
|
|
||||||
|
|
||||||
int fd;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
|
|
||||||
fd = hv_dev_open((HV_VirtAddr) file, 0);
|
|
||||||
|
|
||||||
context->fd = fd;
|
|
||||||
|
|
||||||
if (fd < 0) {
|
|
||||||
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
|
|
||||||
return fd;
|
|
||||||
else
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Map in the MMIO space. */
|
|
||||||
context->mmio_cfg_base = (void __force *)
|
|
||||||
iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
|
|
||||||
HV_MPIPE_CONFIG_MMIO_SIZE);
|
|
||||||
if (context->mmio_cfg_base == NULL)
|
|
||||||
goto cfg_failed;
|
|
||||||
|
|
||||||
context->mmio_fast_base = (void __force *)
|
|
||||||
iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
|
|
||||||
HV_MPIPE_FAST_MMIO_SIZE);
|
|
||||||
if (context->mmio_fast_base == NULL)
|
|
||||||
goto fast_failed;
|
|
||||||
|
|
||||||
/* Initialize the stacks. */
|
|
||||||
for (i = 0; i < 8; i++)
|
|
||||||
context->__stacks.stacks[i] = 255;
|
|
||||||
|
|
||||||
context->instance = mpipe_index;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fast_failed:
|
|
||||||
iounmap((void __force __iomem *)(context->mmio_cfg_base));
|
|
||||||
cfg_failed:
|
|
||||||
hv_dev_close(context->fd);
|
|
||||||
context->fd = -1;
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_init);
|
|
||||||
|
|
||||||
int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
|
|
||||||
{
|
|
||||||
iounmap((void __force __iomem *)(context->mmio_cfg_base));
|
|
||||||
iounmap((void __force __iomem *)(context->mmio_fast_base));
|
|
||||||
return hv_dev_close(context->fd);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
|
|
||||||
|
|
||||||
static int16_t gxio_mpipe_buffer_sizes[8] =
|
|
||||||
{ 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
|
|
||||||
|
|
||||||
gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
|
|
||||||
size)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
for (i = 0; i < 7; i++)
|
|
||||||
if (size <= gxio_mpipe_buffer_sizes[i])
|
|
||||||
break;
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
|
|
||||||
|
|
||||||
size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
|
|
||||||
buffer_size_enum)
|
|
||||||
{
|
|
||||||
if (buffer_size_enum > 7)
|
|
||||||
buffer_size_enum = 7;
|
|
||||||
|
|
||||||
return gxio_mpipe_buffer_sizes[buffer_size_enum];
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
|
|
||||||
|
|
||||||
size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
|
|
||||||
{
|
|
||||||
const int BUFFERS_PER_LINE = 12;
|
|
||||||
|
|
||||||
/* Count the number of cachelines. */
|
|
||||||
unsigned long lines =
|
|
||||||
(buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
|
|
||||||
|
|
||||||
/* Convert to bytes. */
|
|
||||||
return lines * CHIP_L2_LINE_SIZE();
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
|
|
||||||
|
|
||||||
int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int stack,
|
|
||||||
gxio_mpipe_buffer_size_enum_t
|
|
||||||
buffer_size_enum, void *mem, size_t mem_size,
|
|
||||||
unsigned int mem_flags)
|
|
||||||
{
|
|
||||||
int result;
|
|
||||||
|
|
||||||
memset(mem, 0, mem_size);
|
|
||||||
|
|
||||||
result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
|
|
||||||
mem_flags, stack,
|
|
||||||
buffer_size_enum);
|
|
||||||
if (result < 0)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
/* Save the stack. */
|
|
||||||
context->__stacks.stacks[buffer_size_enum] = stack;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
|
|
||||||
|
|
||||||
int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ring,
|
|
||||||
void *mem, size_t mem_size,
|
|
||||||
unsigned int mem_flags)
|
|
||||||
{
|
|
||||||
return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
|
|
||||||
mem_flags, ring);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
|
|
||||||
|
|
||||||
int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int group,
|
|
||||||
unsigned int ring,
|
|
||||||
unsigned int num_rings,
|
|
||||||
unsigned int bucket,
|
|
||||||
unsigned int num_buckets,
|
|
||||||
gxio_mpipe_bucket_mode_t mode)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int result;
|
|
||||||
|
|
||||||
gxio_mpipe_bucket_info_t bucket_info = { {
|
|
||||||
.group = group,
|
|
||||||
.mode = mode,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
gxio_mpipe_notif_group_bits_t bits = { {0} };
|
|
||||||
|
|
||||||
for (i = 0; i < num_rings; i++)
|
|
||||||
gxio_mpipe_notif_group_add_ring(&bits, ring + i);
|
|
||||||
|
|
||||||
result = gxio_mpipe_init_notif_group(context, group, bits);
|
|
||||||
if (result != 0)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
for (i = 0; i < num_buckets; i++) {
|
|
||||||
bucket_info.notifring = ring + (i % num_rings);
|
|
||||||
|
|
||||||
result = gxio_mpipe_init_bucket(context, bucket + i,
|
|
||||||
bucket_info);
|
|
||||||
if (result != 0)
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
|
|
||||||
|
|
||||||
int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ring, unsigned int channel,
|
|
||||||
void *mem, size_t mem_size,
|
|
||||||
unsigned int mem_flags)
|
|
||||||
{
|
|
||||||
memset(mem, 0, mem_size);
|
|
||||||
|
|
||||||
return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
|
|
||||||
ring, channel);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
|
|
||||||
|
|
||||||
void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
|
|
||||||
gxio_mpipe_context_t *context)
|
|
||||||
{
|
|
||||||
rules->context = context;
|
|
||||||
memset(&rules->list, 0, sizeof(rules->list));
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
|
|
||||||
|
|
||||||
int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
|
|
||||||
unsigned int bucket, unsigned int num_buckets,
|
|
||||||
gxio_mpipe_rules_stacks_t *stacks)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int stack = 255;
|
|
||||||
|
|
||||||
gxio_mpipe_rules_list_t *list = &rules->list;
|
|
||||||
|
|
||||||
/* Current rule. */
|
|
||||||
gxio_mpipe_rules_rule_t *rule =
|
|
||||||
(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
|
|
||||||
|
|
||||||
unsigned int head = list->tail;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Align next rule properly.
|
|
||||||
*Note that "dmacs_and_vlans" will also be aligned.
|
|
||||||
*/
|
|
||||||
unsigned int pad = 0;
|
|
||||||
while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
|
|
||||||
pad++;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Verify room.
|
|
||||||
* ISSUE: Mark rules as broken on error?
|
|
||||||
*/
|
|
||||||
if (head + pad + sizeof(*rule) >= sizeof(list->rules))
|
|
||||||
return GXIO_MPIPE_ERR_RULES_FULL;
|
|
||||||
|
|
||||||
/* Verify num_buckets is a power of 2. */
|
|
||||||
if (__builtin_popcount(num_buckets) != 1)
|
|
||||||
return GXIO_MPIPE_ERR_RULES_INVALID;
|
|
||||||
|
|
||||||
/* Add padding to previous rule. */
|
|
||||||
rule->size += pad;
|
|
||||||
|
|
||||||
/* Start a new rule. */
|
|
||||||
list->head = head + pad;
|
|
||||||
|
|
||||||
rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
|
|
||||||
|
|
||||||
/* Default some values. */
|
|
||||||
rule->headroom = 2;
|
|
||||||
rule->tailroom = 0;
|
|
||||||
rule->capacity = 16384;
|
|
||||||
|
|
||||||
/* Save the bucket info. */
|
|
||||||
rule->bucket_mask = num_buckets - 1;
|
|
||||||
rule->bucket_first = bucket;
|
|
||||||
|
|
||||||
for (i = 8 - 1; i >= 0; i--) {
|
|
||||||
int maybe =
|
|
||||||
stacks ? stacks->stacks[i] : rules->context->__stacks.
|
|
||||||
stacks[i];
|
|
||||||
if (maybe != 255)
|
|
||||||
stack = maybe;
|
|
||||||
rule->stacks.stacks[i] = stack;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stack == 255)
|
|
||||||
return GXIO_MPIPE_ERR_RULES_INVALID;
|
|
||||||
|
|
||||||
/* NOTE: Only entries at the end of the array can be 255. */
|
|
||||||
for (i = 8 - 1; i > 0; i--) {
|
|
||||||
if (rule->stacks.stacks[i] == 255) {
|
|
||||||
rule->stacks.stacks[i] = stack;
|
|
||||||
rule->capacity =
|
|
||||||
gxio_mpipe_buffer_size_enum_to_buffer_size(i -
|
|
||||||
1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rule->size = sizeof(*rule);
|
|
||||||
list->tail = list->head + rule->size;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
|
|
||||||
|
|
||||||
int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
|
|
||||||
unsigned int channel)
|
|
||||||
{
|
|
||||||
gxio_mpipe_rules_list_t *list = &rules->list;
|
|
||||||
|
|
||||||
gxio_mpipe_rules_rule_t *rule =
|
|
||||||
(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
|
|
||||||
|
|
||||||
/* Verify channel. */
|
|
||||||
if (channel >= 32)
|
|
||||||
return GXIO_MPIPE_ERR_RULES_INVALID;
|
|
||||||
|
|
||||||
/* Verify begun. */
|
|
||||||
if (list->tail == 0)
|
|
||||||
return GXIO_MPIPE_ERR_RULES_EMPTY;
|
|
||||||
|
|
||||||
rule->channel_bits |= (1UL << channel);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
|
|
||||||
|
|
||||||
int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
|
|
||||||
{
|
|
||||||
gxio_mpipe_rules_list_t *list = &rules->list;
|
|
||||||
|
|
||||||
gxio_mpipe_rules_rule_t *rule =
|
|
||||||
(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
|
|
||||||
|
|
||||||
/* Verify begun. */
|
|
||||||
if (list->tail == 0)
|
|
||||||
return GXIO_MPIPE_ERR_RULES_EMPTY;
|
|
||||||
|
|
||||||
rule->headroom = headroom;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
|
|
||||||
|
|
||||||
int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
|
|
||||||
{
|
|
||||||
gxio_mpipe_rules_list_t *list = &rules->list;
|
|
||||||
unsigned int size =
|
|
||||||
offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
|
|
||||||
return gxio_mpipe_commit_rules(rules->context, list, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
|
|
||||||
|
|
||||||
int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
|
|
||||||
gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ring,
|
|
||||||
void *mem, size_t mem_size, unsigned int mem_flags)
|
|
||||||
{
|
|
||||||
/* The init call below will verify that "mem_size" is legal. */
|
|
||||||
unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
|
|
||||||
|
|
||||||
iqueue->context = context;
|
|
||||||
iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
|
|
||||||
iqueue->ring = ring;
|
|
||||||
iqueue->num_entries = num_entries;
|
|
||||||
iqueue->mask_num_entries = num_entries - 1;
|
|
||||||
iqueue->log2_num_entries = __builtin_ctz(num_entries);
|
|
||||||
iqueue->head = 1;
|
|
||||||
#ifdef __BIG_ENDIAN__
|
|
||||||
iqueue->swapped = 0;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Initialize the "tail". */
|
|
||||||
__gxio_mmio_write(mem, iqueue->head);
|
|
||||||
|
|
||||||
return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
|
|
||||||
mem_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
|
|
||||||
|
|
||||||
int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
|
|
||||||
gxio_mpipe_context_t *context,
|
|
||||||
unsigned int ering,
|
|
||||||
unsigned int channel,
|
|
||||||
void *mem, unsigned int mem_size,
|
|
||||||
unsigned int mem_flags)
|
|
||||||
{
|
|
||||||
/* The init call below will verify that "mem_size" is legal. */
|
|
||||||
unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
|
|
||||||
|
|
||||||
/* Offset used to read number of completed commands. */
|
|
||||||
MPIPE_EDMA_POST_REGION_ADDR_t offset;
|
|
||||||
|
|
||||||
int result = gxio_mpipe_init_edma_ring(context, ering, channel,
|
|
||||||
mem, mem_size, mem_flags);
|
|
||||||
if (result < 0)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
memset(equeue, 0, sizeof(*equeue));
|
|
||||||
|
|
||||||
offset.word = 0;
|
|
||||||
offset.region =
|
|
||||||
MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
|
|
||||||
MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
|
|
||||||
offset.ring = ering;
|
|
||||||
|
|
||||||
__gxio_dma_queue_init(&equeue->dma_queue,
|
|
||||||
context->mmio_fast_base + offset.word,
|
|
||||||
num_entries);
|
|
||||||
equeue->edescs = mem;
|
|
||||||
equeue->mask_num_entries = num_entries - 1;
|
|
||||||
equeue->log2_num_entries = __builtin_ctz(num_entries);
|
|
||||||
equeue->context = context;
|
|
||||||
equeue->ering = ering;
|
|
||||||
equeue->channel = channel;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
|
|
||||||
|
|
||||||
int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
|
|
||||||
const struct timespec64 *ts)
|
|
||||||
{
|
|
||||||
cycles_t cycles = get_cycles();
|
|
||||||
return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
|
|
||||||
(uint64_t)ts->tv_nsec,
|
|
||||||
(uint64_t)cycles);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
|
|
||||||
|
|
||||||
int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
|
|
||||||
struct timespec64 *ts)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
cycles_t cycles_prev, cycles_now, clock_rate;
|
|
||||||
cycles_prev = get_cycles();
|
|
||||||
ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
|
|
||||||
(uint64_t *)&ts->tv_nsec,
|
|
||||||
(uint64_t *)&cycles_now);
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
clock_rate = get_clock_rate();
|
|
||||||
ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
|
|
||||||
if (ts->tv_nsec < 0) {
|
|
||||||
ts->tv_nsec += 1000000000LL;
|
|
||||||
ts->tv_sec -= 1;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
|
|
||||||
|
|
||||||
int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
|
|
||||||
{
|
|
||||||
return gxio_mpipe_adjust_timestamp_aux(context, delta);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
|
|
||||||
|
|
||||||
/* Get our internal context used for link name access. This context is
|
|
||||||
* special in that it is not associated with an mPIPE service domain.
|
|
||||||
*/
|
|
||||||
static gxio_mpipe_context_t *_gxio_get_link_context(void)
|
|
||||||
{
|
|
||||||
static gxio_mpipe_context_t context;
|
|
||||||
static gxio_mpipe_context_t *contextp;
|
|
||||||
static int tried_open = 0;
|
|
||||||
static DEFINE_MUTEX(mutex);
|
|
||||||
|
|
||||||
mutex_lock(&mutex);
|
|
||||||
|
|
||||||
if (!tried_open) {
|
|
||||||
int i = 0;
|
|
||||||
tried_open = 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* "4" here is the maximum possible number of mPIPE shims; it's
|
|
||||||
* an exaggeration but we shouldn't ever go beyond 2 anyway.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < 4; i++) {
|
|
||||||
char file[80];
|
|
||||||
|
|
||||||
snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
|
|
||||||
context.fd = hv_dev_open((HV_VirtAddr) file, 0);
|
|
||||||
if (context.fd < 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
contextp = &context;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&mutex);
|
|
||||||
|
|
||||||
return contextp;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gxio_mpipe_link_instance(const char *link_name)
|
|
||||||
{
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
gxio_mpipe_context_t *context = _gxio_get_link_context();
|
|
||||||
|
|
||||||
if (!context)
|
|
||||||
return GXIO_ERR_NO_DEVICE;
|
|
||||||
|
|
||||||
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
|
|
||||||
return GXIO_ERR_NO_DEVICE;
|
|
||||||
|
|
||||||
return gxio_mpipe_info_instance_aux(context, name);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
|
|
||||||
|
|
||||||
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
|
|
||||||
{
|
|
||||||
int rv;
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
_gxio_mpipe_link_mac_t mac;
|
|
||||||
|
|
||||||
gxio_mpipe_context_t *context = _gxio_get_link_context();
|
|
||||||
if (!context)
|
|
||||||
return GXIO_ERR_NO_DEVICE;
|
|
||||||
|
|
||||||
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
|
|
||||||
if (rv >= 0) {
|
|
||||||
if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
|
|
||||||
return GXIO_ERR_INVAL_MEMORY_SIZE;
|
|
||||||
memcpy(link_mac, mac.mac, sizeof(mac.mac));
|
|
||||||
}
|
|
||||||
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
|
|
||||||
|
|
||||||
int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
|
|
||||||
gxio_mpipe_context_t *context, const char *link_name,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
_gxio_mpipe_link_name_t name;
|
|
||||||
int rv;
|
|
||||||
|
|
||||||
if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
|
|
||||||
return GXIO_ERR_NO_DEVICE;
|
|
||||||
|
|
||||||
rv = gxio_mpipe_link_open_aux(context, name, flags);
|
|
||||||
if (rv < 0)
|
|
||||||
return rv;
|
|
||||||
|
|
||||||
link->context = context;
|
|
||||||
link->channel = rv >> 8;
|
|
||||||
link->mac = rv & 0xFF;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
|
|
||||||
|
|
||||||
int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
|
|
||||||
{
|
|
||||||
return gxio_mpipe_link_close_aux(link->context, link->mac);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
|
|
||||||
|
|
||||||
int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
|
|
||||||
int64_t val)
|
|
||||||
{
|
|
||||||
return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
|
|
||||||
val);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
|
|
|
@ -1,49 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Implementation of trio gxio calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <gxio/trio.h>
|
|
||||||
#include <gxio/iorpc_globals.h>
|
|
||||||
#include <gxio/iorpc_trio.h>
|
|
||||||
#include <gxio/kiorpc.h>
|
|
||||||
|
|
||||||
int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index)
|
|
||||||
{
|
|
||||||
char file[32];
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
snprintf(file, sizeof(file), "trio/%d/iorpc", trio_index);
|
|
||||||
fd = hv_dev_open((HV_VirtAddr) file, 0);
|
|
||||||
if (fd < 0) {
|
|
||||||
context->fd = -1;
|
|
||||||
|
|
||||||
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
|
|
||||||
return fd;
|
|
||||||
else
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
context->fd = fd;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_trio_init);
|
|
|
@ -1,87 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Implementation of UART gxio calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <gxio/uart.h>
|
|
||||||
#include <gxio/iorpc_globals.h>
|
|
||||||
#include <gxio/iorpc_uart.h>
|
|
||||||
#include <gxio/kiorpc.h>
|
|
||||||
|
|
||||||
int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
|
|
||||||
{
|
|
||||||
char file[32];
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
|
|
||||||
fd = hv_dev_open((HV_VirtAddr) file, 0);
|
|
||||||
if (fd < 0) {
|
|
||||||
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
|
|
||||||
return fd;
|
|
||||||
else
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
context->fd = fd;
|
|
||||||
|
|
||||||
/* Map in the MMIO space. */
|
|
||||||
context->mmio_base = (void __force *)
|
|
||||||
iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
|
|
||||||
|
|
||||||
if (context->mmio_base == NULL) {
|
|
||||||
hv_dev_close(context->fd);
|
|
||||||
context->fd = -1;
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_uart_init);
|
|
||||||
|
|
||||||
int gxio_uart_destroy(gxio_uart_context_t *context)
|
|
||||||
{
|
|
||||||
iounmap((void __force __iomem *)(context->mmio_base));
|
|
||||||
hv_dev_close(context->fd);
|
|
||||||
|
|
||||||
context->mmio_base = NULL;
|
|
||||||
context->fd = -1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_uart_destroy);
|
|
||||||
|
|
||||||
/* UART register write wrapper. */
|
|
||||||
void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
|
|
||||||
uint64_t word)
|
|
||||||
{
|
|
||||||
__gxio_mmio_write(context->mmio_base + offset, word);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_uart_write);
|
|
||||||
|
|
||||||
/* UART register read wrapper. */
|
|
||||||
uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
|
|
||||||
{
|
|
||||||
return __gxio_mmio_read(context->mmio_base + offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_uart_read);
|
|
|
@ -1,91 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Implementation of USB gxio calls.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/io.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
|
|
||||||
#include <gxio/iorpc_globals.h>
|
|
||||||
#include <gxio/iorpc_usb_host.h>
|
|
||||||
#include <gxio/kiorpc.h>
|
|
||||||
#include <gxio/usb_host.h>
|
|
||||||
|
|
||||||
int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
|
|
||||||
int is_ehci)
|
|
||||||
{
|
|
||||||
char file[32];
|
|
||||||
int fd;
|
|
||||||
|
|
||||||
if (is_ehci)
|
|
||||||
snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci",
|
|
||||||
usb_index);
|
|
||||||
else
|
|
||||||
snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci",
|
|
||||||
usb_index);
|
|
||||||
|
|
||||||
fd = hv_dev_open((HV_VirtAddr) file, 0);
|
|
||||||
if (fd < 0) {
|
|
||||||
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
|
|
||||||
return fd;
|
|
||||||
else
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
context->fd = fd;
|
|
||||||
|
|
||||||
// Map in the MMIO space.
|
|
||||||
context->mmio_base =
|
|
||||||
(void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE);
|
|
||||||
|
|
||||||
if (context->mmio_base == NULL) {
|
|
||||||
hv_dev_close(context->fd);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_usb_host_init);
|
|
||||||
|
|
||||||
int gxio_usb_host_destroy(gxio_usb_host_context_t *context)
|
|
||||||
{
|
|
||||||
iounmap((void __force __iomem *)(context->mmio_base));
|
|
||||||
hv_dev_close(context->fd);
|
|
||||||
|
|
||||||
context->mmio_base = NULL;
|
|
||||||
context->fd = -1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
|
|
||||||
|
|
||||||
void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context)
|
|
||||||
{
|
|
||||||
return context->mmio_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
|
|
||||||
|
|
||||||
size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context)
|
|
||||||
{
|
|
||||||
return HV_USB_HOST_MMIO_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len);
|
|
|
@ -1,371 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_MPIPE_H__
|
|
||||||
#define __ARCH_MPIPE_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/mpipe_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Ingress DMA Release Region Address.
|
|
||||||
* This is a description of the physical addresses used to manipulate ingress
|
|
||||||
* credit counters. Accesses to this address space should use an address of
|
|
||||||
* this form and a value like that specified in IDMA_RELEASE_REGION_VAL.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
/* NotifRing to be released */
|
|
||||||
uint_reg_t ring : 8;
|
|
||||||
/* Bucket to be released */
|
|
||||||
uint_reg_t bucket : 13;
|
|
||||||
/* Enable NotifRing release */
|
|
||||||
uint_reg_t ring_enable : 1;
|
|
||||||
/* Enable Bucket release */
|
|
||||||
uint_reg_t bucket_enable : 1;
|
|
||||||
/*
|
|
||||||
* This field of the address selects the region (address space) to be
|
|
||||||
* accessed. For the iDMA release region, this field must be 4.
|
|
||||||
*/
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 6;
|
|
||||||
/* This field of the address indexes the 32 entry service domain table. */
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 24;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_2 : 24;
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
uint_reg_t __reserved_1 : 6;
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
uint_reg_t bucket_enable : 1;
|
|
||||||
uint_reg_t ring_enable : 1;
|
|
||||||
uint_reg_t bucket : 13;
|
|
||||||
uint_reg_t ring : 8;
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_IDMA_RELEASE_REGION_ADDR_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket.
|
|
||||||
* Provides release of the associated NotifRing. The address of the MMIO
|
|
||||||
* operation is described in IDMA_RELEASE_REGION_ADDR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* Number of packets being released. The load balancer's count of
|
|
||||||
* inflight packets will be decremented by this amount for the associated
|
|
||||||
* Bucket and/or NotifRing
|
|
||||||
*/
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 48;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 48;
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_IDMA_RELEASE_REGION_VAL_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Buffer Stack Manager Region Address.
|
|
||||||
* This MMIO region is used for posting or fetching buffers to/from the
|
|
||||||
* buffer stack manager. On an MMIO load, this pops a buffer descriptor from
|
|
||||||
* the top of stack if one is available. On an MMIO store, this pushes a
|
|
||||||
* buffer to the stack. The value read or written is described in
|
|
||||||
* BSM_REGION_VAL.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
/* BufferStack being accessed. */
|
|
||||||
uint_reg_t stack : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 18;
|
|
||||||
/*
|
|
||||||
* This field of the address selects the region (address space) to be
|
|
||||||
* accessed. For the buffer stack manager region, this field must be 6.
|
|
||||||
*/
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 6;
|
|
||||||
/* This field of the address indexes the 32 entry service domain table. */
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_3 : 24;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_3 : 24;
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
uint_reg_t __reserved_2 : 6;
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
uint_reg_t __reserved_1 : 18;
|
|
||||||
uint_reg_t stack : 5;
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_BSM_REGION_ADDR_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Buffer Stack Manager Region Value.
|
|
||||||
* This MMIO region is used for posting or fetching buffers to/from the
|
|
||||||
* buffer stack manager. On an MMIO load, this pops a buffer descriptor from
|
|
||||||
* the top of stack if one is available. On an MMIO store, this pushes a
|
|
||||||
* buffer to the stack. The address of the MMIO operation is described in
|
|
||||||
* BSM_REGION_ADDR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
/*
|
|
||||||
* Base virtual address of the buffer. Must be sign extended by consumer.
|
|
||||||
*/
|
|
||||||
int_reg_t va : 35;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 6;
|
|
||||||
/*
|
|
||||||
* Index of the buffer stack to which this buffer belongs. Ignored on
|
|
||||||
* writes since the offset bits specify the stack being accessed.
|
|
||||||
*/
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 3;
|
|
||||||
/*
|
|
||||||
* Instance ID. For devices that support automatic buffer return between
|
|
||||||
* mPIPE instances, this field indicates the buffer owner. If the INST
|
|
||||||
* field does not match the mPIPE's instance number when a packet is
|
|
||||||
* egressed, buffers with HWB set will be returned to the other mPIPE
|
|
||||||
* instance. Note that not all devices support multi-mPIPE buffer
|
|
||||||
* return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
|
|
||||||
* whether the INST field in the buffer descriptor is populated by iDMA
|
|
||||||
* hardware. This field is ignored on writes.
|
|
||||||
*/
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
/*
|
|
||||||
* Reads as one to indicate that this is a hardware managed buffer.
|
|
||||||
* Ignored on writes since all buffers on a given stack are the same size.
|
|
||||||
*/
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
/*
|
|
||||||
* Encoded size of buffer (ignored on writes):
|
|
||||||
* 0 = 128 bytes
|
|
||||||
* 1 = 256 bytes
|
|
||||||
* 2 = 512 bytes
|
|
||||||
* 3 = 1024 bytes
|
|
||||||
* 4 = 1664 bytes
|
|
||||||
* 5 = 4096 bytes
|
|
||||||
* 6 = 10368 bytes
|
|
||||||
* 7 = 16384 bytes
|
|
||||||
*/
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
/*
|
|
||||||
* Valid indication for the buffer. Ignored on writes.
|
|
||||||
* 0 : Valid buffer descriptor popped from stack.
|
|
||||||
* 3 : Could not pop a buffer from the stack. Either the stack is empty,
|
|
||||||
* or the hardware's prefetch buffer is empty for this stack.
|
|
||||||
*/
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
uint_reg_t __reserved_2 : 3;
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
uint_reg_t __reserved_1 : 6;
|
|
||||||
int_reg_t va : 35;
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_BSM_REGION_VAL_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Egress DMA Post Region Address.
|
|
||||||
* Used to post descriptor locations to the eDMA descriptor engine. The
|
|
||||||
* value to be written is described in EDMA_POST_REGION_VAL
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
/* eDMA ring being accessed */
|
|
||||||
uint_reg_t ring : 6;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 17;
|
|
||||||
/*
|
|
||||||
* This field of the address selects the region (address space) to be
|
|
||||||
* accessed. For the egress DMA post region, this field must be 5.
|
|
||||||
*/
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 6;
|
|
||||||
/* This field of the address indexes the 32 entry service domain table. */
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_3 : 24;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_3 : 24;
|
|
||||||
uint_reg_t svc_dom : 5;
|
|
||||||
uint_reg_t __reserved_2 : 6;
|
|
||||||
uint_reg_t region : 3;
|
|
||||||
uint_reg_t __reserved_1 : 17;
|
|
||||||
uint_reg_t ring : 6;
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_EDMA_POST_REGION_ADDR_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* MMIO Egress DMA Post Region Value.
|
|
||||||
* Used to post descriptor locations to the eDMA descriptor engine. The
|
|
||||||
* address is described in EDMA_POST_REGION_ADDR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* For writes, this specifies the current ring tail pointer prior to any
|
|
||||||
* post. For example, to post 1 or more descriptors starting at location
|
|
||||||
* 23, this would contain 23 (not 24). On writes, this index must be
|
|
||||||
* masked based on the ring size. The new tail pointer after this post
|
|
||||||
* is COUNT+RING_IDX (masked by the ring size).
|
|
||||||
*
|
|
||||||
* For reads, this provides the hardware descriptor fetcher's head
|
|
||||||
* pointer. The descriptors prior to the head pointer, however, may not
|
|
||||||
* yet have been processed so this indicator is only used to determine
|
|
||||||
* how full the ring is and if software may post more descriptors.
|
|
||||||
*/
|
|
||||||
uint_reg_t ring_idx : 16;
|
|
||||||
/*
|
|
||||||
* For writes, this specifies number of contiguous descriptors that are
|
|
||||||
* being posted. Software may post up to RingSize descriptors with a
|
|
||||||
* single MMIO store. A zero in this field on a write will "wake up" an
|
|
||||||
* eDMA ring and cause it fetch descriptors regardless of the hardware's
|
|
||||||
* current view of the state of the tail pointer.
|
|
||||||
*
|
|
||||||
* For reads, this field provides a rolling count of the number of
|
|
||||||
* descriptors that have been completely processed. This may be used by
|
|
||||||
* software to determine when buffers associated with a descriptor may be
|
|
||||||
* returned or reused. When the ring's flush bit is cleared by software
|
|
||||||
* (after having been set by HW or SW), the COUNT will be cleared.
|
|
||||||
*/
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
/*
|
|
||||||
* For writes, this specifies the generation number of the tail being
|
|
||||||
* posted. Note that if tail+cnt wraps to the beginning of the ring, the
|
|
||||||
* eDMA hardware assumes that the descriptors posted at the beginning of
|
|
||||||
* the ring are also valid so it is okay to post around the wrap point.
|
|
||||||
*
|
|
||||||
* For reads, this is the current generation number. Valid descriptors
|
|
||||||
* will have the inverse of this generation number.
|
|
||||||
*/
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 31;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 31;
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
uint_reg_t ring_idx : 16;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_EDMA_POST_REGION_VAL_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Load Balancer Bucket Status Data.
|
|
||||||
* Read/Write data for load balancer Bucket-Status Table. 4160 entries
|
|
||||||
* indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* NotifRing currently assigned to this bucket. */
|
|
||||||
uint_reg_t notifring : 8;
|
|
||||||
/* Current reference count. */
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
/* Group associated with this bucket. */
|
|
||||||
uint_reg_t group : 5;
|
|
||||||
/* Mode select for this bucket. */
|
|
||||||
uint_reg_t mode : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 32;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 32;
|
|
||||||
uint_reg_t mode : 3;
|
|
||||||
uint_reg_t group : 5;
|
|
||||||
uint_reg_t count : 16;
|
|
||||||
uint_reg_t notifring : 8;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} MPIPE_LBL_INIT_DAT_BSTS_TBL_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_MPIPE_H__) */
|
|
|
@ -1,42 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ARCH_MPIPE_CONSTANTS_H__
|
|
||||||
#define __ARCH_MPIPE_CONSTANTS_H__
|
|
||||||
|
|
||||||
#define MPIPE_NUM_CLASSIFIERS 16
|
|
||||||
#define MPIPE_CLS_MHZ 1200
|
|
||||||
|
|
||||||
#define MPIPE_NUM_EDMA_RINGS 64
|
|
||||||
|
|
||||||
#define MPIPE_NUM_SGMII_MACS 16
|
|
||||||
#define MPIPE_NUM_XAUI_MACS 16
|
|
||||||
#define MPIPE_NUM_LOOPBACK_CHANNELS 4
|
|
||||||
#define MPIPE_NUM_NON_LB_CHANNELS 28
|
|
||||||
|
|
||||||
#define MPIPE_NUM_IPKT_BLOCKS 1536
|
|
||||||
|
|
||||||
#define MPIPE_NUM_BUCKETS 4160
|
|
||||||
|
|
||||||
#define MPIPE_NUM_NOTIF_RINGS 256
|
|
||||||
|
|
||||||
#define MPIPE_NUM_NOTIF_GROUPS 32
|
|
||||||
|
|
||||||
#define MPIPE_NUM_TLBS_PER_ASID 16
|
|
||||||
#define MPIPE_TLB_IDX_WIDTH 4
|
|
||||||
|
|
||||||
#define MPIPE_MMIO_NUM_SVC_DOM 32
|
|
||||||
|
|
||||||
#endif /* __ARCH_MPIPE_CONSTANTS_H__ */
|
|
|
@ -1,39 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_MPIPE_DEF_H__
|
|
||||||
#define __ARCH_MPIPE_DEF_H__
|
|
||||||
#define MPIPE_MMIO_ADDR__REGION_SHIFT 26
|
|
||||||
#define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0
|
|
||||||
#define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4
|
|
||||||
#define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5
|
|
||||||
#define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6
|
|
||||||
#define MPIPE_BSM_REGION_VAL__VA_SHIFT 7
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6
|
|
||||||
#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7
|
|
||||||
#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0
|
|
||||||
#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1
|
|
||||||
#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2
|
|
||||||
#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3
|
|
||||||
#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7
|
|
||||||
#define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138
|
|
||||||
#endif /* !defined(__ARCH_MPIPE_DEF_H__) */
|
|
|
@ -1,521 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ARCH_MPIPE_SHM_H__
|
|
||||||
#define __ARCH_MPIPE_SHM_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/mpipe_shm_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
/**
|
|
||||||
* MPIPE eDMA Descriptor.
|
|
||||||
* The eDMA descriptor is written by software and consumed by hardware. It
|
|
||||||
* is used to specify the location of egress packet data to be sent out of
|
|
||||||
* the chip via one of the packet interfaces.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
/* Word 0 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Generation number. Used to indicate a valid descriptor in ring. When
|
|
||||||
* a new descriptor is written into the ring, software must toggle this
|
|
||||||
* bit. The net effect is that the GEN bit being written into new
|
|
||||||
* descriptors toggles each time the ring tail pointer wraps.
|
|
||||||
*/
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
/**
|
|
||||||
* For devices with EDMA reorder support, this field allows the
|
|
||||||
* descriptor to select the egress FIFO. The associated DMA ring must
|
|
||||||
* have ALLOW_EFIFO_SEL enabled.
|
|
||||||
*/
|
|
||||||
uint_reg_t efifo_sel : 6;
|
|
||||||
/** Reserved. Must be zero. */
|
|
||||||
uint_reg_t r0 : 1;
|
|
||||||
/** Checksum generation enabled for this transfer. */
|
|
||||||
uint_reg_t csum : 1;
|
|
||||||
/**
|
|
||||||
* Nothing to be sent. Used, for example, when software has dropped a
|
|
||||||
* packet but still wishes to return all of the associated buffers.
|
|
||||||
*/
|
|
||||||
uint_reg_t ns : 1;
|
|
||||||
/**
|
|
||||||
* Notification interrupt will be delivered when packet has been egressed.
|
|
||||||
*/
|
|
||||||
uint_reg_t notif : 1;
|
|
||||||
/**
|
|
||||||
* Boundary indicator. When 1, this transfer includes the EOP for this
|
|
||||||
* command. Must be clear on all but the last descriptor for an egress
|
|
||||||
* packet.
|
|
||||||
*/
|
|
||||||
uint_reg_t bound : 1;
|
|
||||||
/** Reserved. Must be zero. */
|
|
||||||
uint_reg_t r1 : 4;
|
|
||||||
/**
|
|
||||||
* Number of bytes to be sent for this descriptor. When zero, no data
|
|
||||||
* will be moved and the buffer descriptor will be ignored. If the
|
|
||||||
* buffer descriptor indicates that it is chained, the low 7 bits of the
|
|
||||||
* VA indicate the offset within the first buffer (e.g. 127 bytes is the
|
|
||||||
* maximum offset into the first buffer). If the size exceeds a single
|
|
||||||
* buffer, subsequent buffer descriptors will be fetched prior to
|
|
||||||
* processing the next eDMA descriptor in the ring.
|
|
||||||
*/
|
|
||||||
uint_reg_t xfer_size : 14;
|
|
||||||
/** Reserved. Must be zero. */
|
|
||||||
uint_reg_t r2 : 2;
|
|
||||||
/**
|
|
||||||
* Destination of checksum relative to CSUM_START relative to the first
|
|
||||||
* byte moved by this descriptor. Must be zero if CSUM=0 in this
|
|
||||||
* descriptor. Must be less than XFER_SIZE (e.g. the first byte of the
|
|
||||||
* CSUM_DEST must be within the span of this descriptor).
|
|
||||||
*/
|
|
||||||
uint_reg_t csum_dest : 8;
|
|
||||||
/**
|
|
||||||
* Start byte of checksum relative to the first byte moved by this
|
|
||||||
* descriptor. If this is not the first descriptor for the egress
|
|
||||||
* packet, CSUM_START is still relative to the first byte in this
|
|
||||||
* descriptor. Must be zero if CSUM=0 in this descriptor.
|
|
||||||
*/
|
|
||||||
uint_reg_t csum_start : 8;
|
|
||||||
/**
|
|
||||||
* Initial value for 16-bit 1's compliment checksum if enabled via CSUM.
|
|
||||||
* Specified in network order. That is, bits[7:0] will be added to the
|
|
||||||
* byte pointed to by CSUM_START and bits[15:8] will be added to the byte
|
|
||||||
* pointed to by CSUM_START+1 (with appropriate 1's compliment carries).
|
|
||||||
* Must be zero if CSUM=0 in this descriptor.
|
|
||||||
*/
|
|
||||||
uint_reg_t csum_seed : 16;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t csum_seed : 16;
|
|
||||||
uint_reg_t csum_start : 8;
|
|
||||||
uint_reg_t csum_dest : 8;
|
|
||||||
uint_reg_t r2 : 2;
|
|
||||||
uint_reg_t xfer_size : 14;
|
|
||||||
uint_reg_t r1 : 4;
|
|
||||||
uint_reg_t bound : 1;
|
|
||||||
uint_reg_t notif : 1;
|
|
||||||
uint_reg_t ns : 1;
|
|
||||||
uint_reg_t csum : 1;
|
|
||||||
uint_reg_t r0 : 1;
|
|
||||||
uint_reg_t efifo_sel : 6;
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 1 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/** Virtual address. Must be sign extended by consumer. */
|
|
||||||
int_reg_t va : 42;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 6;
|
|
||||||
/** Index of the buffer stack to which this buffer belongs. */
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 3;
|
|
||||||
/**
|
|
||||||
* Instance ID. For devices that support automatic buffer return between
|
|
||||||
* mPIPE instances, this field indicates the buffer owner. If the INST
|
|
||||||
* field does not match the mPIPE's instance number when a packet is
|
|
||||||
* egressed, buffers with HWB set will be returned to the other mPIPE
|
|
||||||
* instance. Note that not all devices support multi-mPIPE buffer
|
|
||||||
* return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
|
|
||||||
* whether the INST field in the buffer descriptor is populated by iDMA
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
/**
|
|
||||||
* Always set to one by hardware in iDMA packet descriptors. For eDMA,
|
|
||||||
* indicates whether the buffer will be released to the buffer stack
|
|
||||||
* manager. When 0, software is responsible for releasing the buffer.
|
|
||||||
*/
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
/**
|
|
||||||
* Encoded size of buffer. Set by the ingress hardware for iDMA packet
|
|
||||||
* descriptors. For eDMA descriptors, indicates the buffer size if .c
|
|
||||||
* indicates a chained packet. If an eDMA descriptor is not chained and
|
|
||||||
* the .hwb bit is not set, this field is ignored and the size is
|
|
||||||
* specified by the .xfer_size field.
|
|
||||||
* 0 = 128 bytes
|
|
||||||
* 1 = 256 bytes
|
|
||||||
* 2 = 512 bytes
|
|
||||||
* 3 = 1024 bytes
|
|
||||||
* 4 = 1664 bytes
|
|
||||||
* 5 = 4096 bytes
|
|
||||||
* 6 = 10368 bytes
|
|
||||||
* 7 = 16384 bytes
|
|
||||||
*/
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
/**
|
|
||||||
* Chaining configuration for the buffer. Indicates that an ingress
|
|
||||||
* packet or egress command is chained across multiple buffers, with each
|
|
||||||
* buffer's size indicated by the .size field.
|
|
||||||
*/
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
uint_reg_t __reserved_1 : 3;
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
uint_reg_t __reserved_0 : 6;
|
|
||||||
int_reg_t va : 42;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Word access */
|
|
||||||
uint_reg_t words[2];
|
|
||||||
} MPIPE_EDMA_DESC_t;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* MPIPE Packet Descriptor.
|
|
||||||
* The packet descriptor is filled by the mPIPE's classification,
|
|
||||||
* load-balancing, and buffer management services. Some fields are consumed
|
|
||||||
* by mPIPE hardware, and others are consumed by Tile software.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
/* Word 0 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Notification ring into which this packet descriptor is written.
|
|
||||||
* Typically written by load balancer, but can be overridden by
|
|
||||||
* classification program if NR is asserted.
|
|
||||||
*/
|
|
||||||
uint_reg_t notif_ring : 8;
|
|
||||||
/** Source channel for this packet. Written by mPIPE DMA hardware. */
|
|
||||||
uint_reg_t channel : 5;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/**
|
|
||||||
* MAC Error.
|
|
||||||
* Generated by the MAC interface. Asserted if there was an overrun of
|
|
||||||
* the MAC's receive FIFO. This condition generally only occurs if the
|
|
||||||
* mPIPE clock is running too slowly.
|
|
||||||
*/
|
|
||||||
uint_reg_t me : 1;
|
|
||||||
/**
|
|
||||||
* Truncation Error.
|
|
||||||
* Written by the iDMA hardware. Asserted if packet was truncated due to
|
|
||||||
* insufficient space in iPkt buffer
|
|
||||||
*/
|
|
||||||
uint_reg_t tr : 1;
|
|
||||||
/**
|
|
||||||
* Written by the iDMA hardware. Indicates the number of bytes written
|
|
||||||
* to Tile memory. In general, this is the actual size of the packet as
|
|
||||||
* received from the MAC. But if the packet is truncated due to running
|
|
||||||
* out of buffers or due to the iPkt buffer filling up, then the L2_SIZE
|
|
||||||
* will be reduced to reflect the actual number of valid bytes written to
|
|
||||||
* Tile memory.
|
|
||||||
*/
|
|
||||||
uint_reg_t l2_size : 14;
|
|
||||||
/**
|
|
||||||
* CRC Error.
|
|
||||||
* Generated by the MAC. Asserted if MAC indicated an L2 CRC error or
|
|
||||||
* other L2 error (bad length etc.) on the packet.
|
|
||||||
*/
|
|
||||||
uint_reg_t ce : 1;
|
|
||||||
/**
|
|
||||||
* Cut Through.
|
|
||||||
* Written by the iDMA hardware. Asserted if packet was not completely
|
|
||||||
* received before being sent to classifier. L2_Size will indicate
|
|
||||||
* number of bytes received so far.
|
|
||||||
*/
|
|
||||||
uint_reg_t ct : 1;
|
|
||||||
/**
|
|
||||||
* Written by the classification program. Used by the load balancer to
|
|
||||||
* select the ring into which this packet descriptor is written.
|
|
||||||
*/
|
|
||||||
uint_reg_t bucket_id : 13;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 3;
|
|
||||||
/**
|
|
||||||
* Checksum.
|
|
||||||
* Written by classification program. When 1, the checksum engine will
|
|
||||||
* perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES
|
|
||||||
* fields. The result will be placed in CSUM_VAL.
|
|
||||||
*/
|
|
||||||
uint_reg_t cs : 1;
|
|
||||||
/**
|
|
||||||
* Notification Ring Select.
|
|
||||||
* Written by the classification program. When 1, the NotifRingIDX is
|
|
||||||
* set by classification program rather than being set by load balancer.
|
|
||||||
*/
|
|
||||||
uint_reg_t nr : 1;
|
|
||||||
/**
|
|
||||||
* Written by classification program. Indicates whether packet and
|
|
||||||
* descriptor should both be dropped, both be delivered, or only the
|
|
||||||
* descriptor should be delivered.
|
|
||||||
*/
|
|
||||||
uint_reg_t dest : 2;
|
|
||||||
/**
|
|
||||||
* General Purpose Sequence Number Enable.
|
|
||||||
* Written by the classification program. When 1, the GP_SQN_SEL field
|
|
||||||
* contains the sequence number selector and the GP_SQN field will be
|
|
||||||
* replaced with the associated sequence number. When clear, the GP_SQN
|
|
||||||
* field is left intact and be used as "Custom" bytes.
|
|
||||||
*/
|
|
||||||
uint_reg_t sq : 1;
|
|
||||||
/**
|
|
||||||
* TimeStamp Enable.
|
|
||||||
* Enable TimeStamp insertion. When clear, timestamp field may be filled
|
|
||||||
* with custom data by classifier. When set, hardware inserts the
|
|
||||||
* timestamp when the start of packet is received from the MAC.
|
|
||||||
*/
|
|
||||||
uint_reg_t ts : 1;
|
|
||||||
/**
|
|
||||||
* Packet Sequence Number Enable.
|
|
||||||
* Enable PacketSQN insertion. When clear, PacketSQN field may be filled
|
|
||||||
* with custom data by classifier. When set, hardware inserts the packet
|
|
||||||
* sequence number when the packet descriptor is written to a
|
|
||||||
* notification ring.
|
|
||||||
*/
|
|
||||||
uint_reg_t ps : 1;
|
|
||||||
/**
|
|
||||||
* Buffer Error.
|
|
||||||
* Written by the iDMA hardware. Asserted if iDMA ran out of buffers
|
|
||||||
* while writing the packet. Software must still return any buffer
|
|
||||||
* descriptors whose C field indicates a valid descriptor was consumed.
|
|
||||||
*/
|
|
||||||
uint_reg_t be : 1;
|
|
||||||
/**
|
|
||||||
* Written by the classification program. The associated counter is
|
|
||||||
* incremented when the packet is sent.
|
|
||||||
*/
|
|
||||||
uint_reg_t ctr0 : 5;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 3;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_2 : 3;
|
|
||||||
uint_reg_t ctr0 : 5;
|
|
||||||
uint_reg_t be : 1;
|
|
||||||
uint_reg_t ps : 1;
|
|
||||||
uint_reg_t ts : 1;
|
|
||||||
uint_reg_t sq : 1;
|
|
||||||
uint_reg_t dest : 2;
|
|
||||||
uint_reg_t nr : 1;
|
|
||||||
uint_reg_t cs : 1;
|
|
||||||
uint_reg_t __reserved_1 : 3;
|
|
||||||
uint_reg_t bucket_id : 13;
|
|
||||||
uint_reg_t ct : 1;
|
|
||||||
uint_reg_t ce : 1;
|
|
||||||
uint_reg_t l2_size : 14;
|
|
||||||
uint_reg_t tr : 1;
|
|
||||||
uint_reg_t me : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t channel : 5;
|
|
||||||
uint_reg_t notif_ring : 8;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 1 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Written by the classification program. The associated counter is
|
|
||||||
* incremented when the packet is sent.
|
|
||||||
*/
|
|
||||||
uint_reg_t ctr1 : 5;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_3 : 3;
|
|
||||||
/**
|
|
||||||
* Written by classification program. Indicates the start byte for
|
|
||||||
* checksum. Relative to 1st byte received from MAC.
|
|
||||||
*/
|
|
||||||
uint_reg_t csum_start : 8;
|
|
||||||
/**
|
|
||||||
* Checksum seed written by classification program. Overwritten with
|
|
||||||
* resultant checksum if CS bit is asserted. The endianness of the CSUM
|
|
||||||
* value bits when viewed by Tile software match the packet byte order.
|
|
||||||
* That is, bits[7:0] of the resulting checksum value correspond to
|
|
||||||
* earlier (more significant) bytes in the packet. To avoid classifier
|
|
||||||
* software from having to byte swap the CSUM_SEED, the iDMA checksum
|
|
||||||
* engine byte swaps the classifier's result before seeding the checksum
|
|
||||||
* calculation. Thus, the CSUM_START byte of packet data is added to
|
|
||||||
* bits[15:8] of the CSUM_SEED field generated by the classifier. This
|
|
||||||
* byte swap will be visible to Tile software if the CS bit is clear.
|
|
||||||
*/
|
|
||||||
uint_reg_t csum_seed_val : 16;
|
|
||||||
/**
|
|
||||||
* Written by the classification program. Not interpreted by mPIPE
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t custom0 : 32;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t custom0 : 32;
|
|
||||||
uint_reg_t csum_seed_val : 16;
|
|
||||||
uint_reg_t csum_start : 8;
|
|
||||||
uint_reg_t __reserved_3 : 3;
|
|
||||||
uint_reg_t ctr1 : 5;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 2 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Written by the classification program. Not interpreted by mPIPE
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t custom1 : 64;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t custom1 : 64;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 3 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Written by the classification program. Not interpreted by mPIPE
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t custom2 : 64;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t custom2 : 64;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 4 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Written by the classification program. Not interpreted by mPIPE
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t custom3 : 64;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t custom3 : 64;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 5 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Sequence number applied when packet is distributed. Classifier
|
|
||||||
* selects which sequence number is to be applied by writing the 13-bit
|
|
||||||
* SQN-selector into this field. For devices that support EXT_SQN (as
|
|
||||||
* indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to
|
|
||||||
* 32-bits via the IDMA_CTL.EXT_SQN register. In this case the
|
|
||||||
* PACKET_SQN will be reduced to 32 bits.
|
|
||||||
*/
|
|
||||||
uint_reg_t gp_sqn : 16;
|
|
||||||
/**
|
|
||||||
* Written by notification hardware. The packet sequence number is
|
|
||||||
* incremented for each packet that wasn't dropped.
|
|
||||||
*/
|
|
||||||
uint_reg_t packet_sqn : 48;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t packet_sqn : 48;
|
|
||||||
uint_reg_t gp_sqn : 16;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 6 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/**
|
|
||||||
* Written by hardware when the start-of-packet is received by the mPIPE
|
|
||||||
* from the MAC. This is the nanoseconds part of the packet timestamp.
|
|
||||||
*/
|
|
||||||
uint_reg_t time_stamp_ns : 32;
|
|
||||||
/**
|
|
||||||
* Written by hardware when the start-of-packet is received by the mPIPE
|
|
||||||
* from the MAC. This is the seconds part of the packet timestamp.
|
|
||||||
*/
|
|
||||||
uint_reg_t time_stamp_sec : 32;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t time_stamp_sec : 32;
|
|
||||||
uint_reg_t time_stamp_ns : 32;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 7 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/** Virtual address. Must be sign extended by consumer. */
|
|
||||||
int_reg_t va : 42;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_4 : 6;
|
|
||||||
/** Index of the buffer stack to which this buffer belongs. */
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_5 : 3;
|
|
||||||
/**
|
|
||||||
* Instance ID. For devices that support automatic buffer return between
|
|
||||||
* mPIPE instances, this field indicates the buffer owner. If the INST
|
|
||||||
* field does not match the mPIPE's instance number when a packet is
|
|
||||||
* egressed, buffers with HWB set will be returned to the other mPIPE
|
|
||||||
* instance. Note that not all devices support multi-mPIPE buffer
|
|
||||||
* return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
|
|
||||||
* whether the INST field in the buffer descriptor is populated by iDMA
|
|
||||||
* hardware.
|
|
||||||
*/
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
/**
|
|
||||||
* Always set to one by hardware in iDMA packet descriptors. For eDMA,
|
|
||||||
* indicates whether the buffer will be released to the buffer stack
|
|
||||||
* manager. When 0, software is responsible for releasing the buffer.
|
|
||||||
*/
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
/**
|
|
||||||
* Encoded size of buffer. Set by the ingress hardware for iDMA packet
|
|
||||||
* descriptors. For eDMA descriptors, indicates the buffer size if .c
|
|
||||||
* indicates a chained packet. If an eDMA descriptor is not chained and
|
|
||||||
* the .hwb bit is not set, this field is ignored and the size is
|
|
||||||
* specified by the .xfer_size field.
|
|
||||||
* 0 = 128 bytes
|
|
||||||
* 1 = 256 bytes
|
|
||||||
* 2 = 512 bytes
|
|
||||||
* 3 = 1024 bytes
|
|
||||||
* 4 = 1664 bytes
|
|
||||||
* 5 = 4096 bytes
|
|
||||||
* 6 = 10368 bytes
|
|
||||||
* 7 = 16384 bytes
|
|
||||||
*/
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
/**
|
|
||||||
* Chaining configuration for the buffer. Indicates that an ingress
|
|
||||||
* packet or egress command is chained across multiple buffers, with each
|
|
||||||
* buffer's size indicated by the .size field.
|
|
||||||
*/
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t c : 2;
|
|
||||||
uint_reg_t size : 3;
|
|
||||||
uint_reg_t hwb : 1;
|
|
||||||
uint_reg_t inst : 2;
|
|
||||||
uint_reg_t __reserved_5 : 3;
|
|
||||||
uint_reg_t stack_idx : 5;
|
|
||||||
uint_reg_t __reserved_4 : 6;
|
|
||||||
int_reg_t va : 42;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Word access */
|
|
||||||
uint_reg_t words[8];
|
|
||||||
} MPIPE_PDESC_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_MPIPE_SHM_H__) */
|
|
|
@ -1,23 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_MPIPE_SHM_DEF_H__
|
|
||||||
#define __ARCH_MPIPE_SHM_DEF_H__
|
|
||||||
#define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0
|
|
||||||
#define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1
|
|
||||||
#define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2
|
|
||||||
#define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3
|
|
||||||
#endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */
|
|
|
@ -1,109 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
#ifndef __ARCH_SPR_DEF_H__
|
|
||||||
#define __ARCH_SPR_DEF_H__
|
|
||||||
|
|
||||||
#include <uapi/arch/spr_def.h>
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In addition to including the proper base SPR definition file, depending
|
|
||||||
* on machine architecture, this file defines several macros which allow
|
|
||||||
* kernel code to use protection-level dependent SPRs without worrying
|
|
||||||
* about which PL it's running at. In these macros, the PL that the SPR
|
|
||||||
* or interrupt number applies to is replaced by K.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
|
|
||||||
#error CONFIG_KERNEL_PL must be 1 or 2
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Concatenate 4 strings. */
|
|
||||||
#define __concat4(a, b, c, d) a ## b ## c ## d
|
|
||||||
#define _concat4(a, b, c, d) __concat4(a, b, c, d)
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
/* TILE-Gx dependent, protection-level dependent SPRs. */
|
|
||||||
|
|
||||||
#define SPR_INTERRUPT_MASK_K \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_INTERRUPT_MASK_SET_K \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_INTERRUPT_MASK_RESET_K \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_INTERRUPT_VECTOR_BASE_K \
|
|
||||||
_concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
|
|
||||||
|
|
||||||
#define SPR_IPI_MASK_K \
|
|
||||||
_concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_IPI_MASK_RESET_K \
|
|
||||||
_concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_IPI_MASK_SET_K \
|
|
||||||
_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_IPI_EVENT_K \
|
|
||||||
_concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_IPI_EVENT_RESET_K \
|
|
||||||
_concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_IPI_EVENT_SET_K \
|
|
||||||
_concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define INT_IPI_K \
|
|
||||||
_concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
|
|
||||||
|
|
||||||
#define SPR_SINGLE_STEP_CONTROL_K \
|
|
||||||
_concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
|
|
||||||
#define SPR_SINGLE_STEP_EN_K_K \
|
|
||||||
_concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
|
|
||||||
#define INT_SINGLE_STEP_K \
|
|
||||||
_concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/* TILEPro dependent, protection-level dependent SPRs. */
|
|
||||||
|
|
||||||
#define SPR_INTERRUPT_MASK_K_0 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
|
|
||||||
#define SPR_INTERRUPT_MASK_K_1 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
|
|
||||||
#define SPR_INTERRUPT_MASK_SET_K_0 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
|
|
||||||
#define SPR_INTERRUPT_MASK_SET_K_1 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
|
|
||||||
#define SPR_INTERRUPT_MASK_RESET_K_0 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
|
|
||||||
#define SPR_INTERRUPT_MASK_RESET_K_1 \
|
|
||||||
_concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Generic protection-level dependent SPRs. */
|
|
||||||
|
|
||||||
#define SPR_SYSTEM_SAVE_K_0 \
|
|
||||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
|
|
||||||
#define SPR_SYSTEM_SAVE_K_1 \
|
|
||||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
|
|
||||||
#define SPR_SYSTEM_SAVE_K_2 \
|
|
||||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
|
|
||||||
#define SPR_SYSTEM_SAVE_K_3 \
|
|
||||||
_concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
|
|
||||||
#define SPR_EX_CONTEXT_K_0 \
|
|
||||||
_concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
|
|
||||||
#define SPR_EX_CONTEXT_K_1 \
|
|
||||||
_concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
|
|
||||||
#define SPR_INTCTRL_K_STATUS \
|
|
||||||
_concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
|
|
||||||
#define INT_INTCTRL_K \
|
|
||||||
_concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
|
|
||||||
|
|
||||||
#endif /* __ARCH_SPR_DEF_H__ */
|
|
|
@ -1,111 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_H__
|
|
||||||
#define __ARCH_TRIO_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/trio_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Map SQ Doorbell Format.
|
|
||||||
* This describes the format of the write-only doorbell register that exists
|
|
||||||
* in the last 8-bytes of the MAP_SQ_BASE/LIM range. This register is only
|
|
||||||
* writable from PCIe space. Writes to this register will not be written to
|
|
||||||
* Tile memory space and thus no IO VA translation is required if the last
|
|
||||||
* page of the BASE/LIM range is not otherwise written.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* When written with a 1, the associated MAP_SQ region's doorbell
|
|
||||||
* interrupt will be triggered once all previous writes are visible to
|
|
||||||
* Tile software.
|
|
||||||
*/
|
|
||||||
uint_reg_t doorbell : 1;
|
|
||||||
/*
|
|
||||||
* When written with a 1, the descriptor at the head of the associated
|
|
||||||
* MAP_SQ's FIFO will be dequeued.
|
|
||||||
*/
|
|
||||||
uint_reg_t pop : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 62;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 62;
|
|
||||||
uint_reg_t pop : 1;
|
|
||||||
uint_reg_t doorbell : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_MAP_SQ_DOORBELL_FMT_t;
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Tile PIO Region Configuration - CFG Address Format.
|
|
||||||
* This register describes the address format for PIO accesses when the
|
|
||||||
* associated region is setup with TYPE=CFG.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Register Address (full byte address). */
|
|
||||||
uint_reg_t reg_addr : 12;
|
|
||||||
/* Function Number */
|
|
||||||
uint_reg_t fn : 3;
|
|
||||||
/* Device Number */
|
|
||||||
uint_reg_t dev : 5;
|
|
||||||
/* BUS Number */
|
|
||||||
uint_reg_t bus : 8;
|
|
||||||
/* Config Type: 0 for access to directly-attached device. 1 otherwise. */
|
|
||||||
uint_reg_t type : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/*
|
|
||||||
* MAC select. This must match the configuration in
|
|
||||||
* TILE_PIO_REGION_SETUP.MAC.
|
|
||||||
*/
|
|
||||||
uint_reg_t mac : 2;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 32;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_1 : 32;
|
|
||||||
uint_reg_t mac : 2;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t type : 1;
|
|
||||||
uint_reg_t bus : 8;
|
|
||||||
uint_reg_t dev : 5;
|
|
||||||
uint_reg_t fn : 3;
|
|
||||||
uint_reg_t reg_addr : 12;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_TRIO_H__) */
|
|
|
@ -1,36 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_CONSTANTS_H__
|
|
||||||
#define __ARCH_TRIO_CONSTANTS_H__
|
|
||||||
|
|
||||||
#define TRIO_NUM_ASIDS 32
|
|
||||||
#define TRIO_NUM_TLBS_PER_ASID 16
|
|
||||||
|
|
||||||
#define TRIO_NUM_TPIO_REGIONS 8
|
|
||||||
#define TRIO_LOG2_NUM_TPIO_REGIONS 3
|
|
||||||
|
|
||||||
#define TRIO_NUM_MAP_MEM_REGIONS 32
|
|
||||||
#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5
|
|
||||||
#define TRIO_NUM_MAP_SQ_REGIONS 8
|
|
||||||
#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3
|
|
||||||
|
|
||||||
#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6
|
|
||||||
|
|
||||||
#define TRIO_NUM_PUSH_DMA_RINGS 64
|
|
||||||
|
|
||||||
#define TRIO_NUM_PULL_DMA_RINGS 64
|
|
||||||
|
|
||||||
#endif /* __ARCH_TRIO_CONSTANTS_H__ */
|
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_DEF_H__
|
|
||||||
#define __ARCH_TRIO_DEF_H__
|
|
||||||
#define TRIO_CFG_REGION_ADDR__REG_SHIFT 0
|
|
||||||
#define TRIO_CFG_REGION_ADDR__INTFC_SHIFT 16
|
|
||||||
#define TRIO_CFG_REGION_ADDR__INTFC_VAL_TRIO 0x0
|
|
||||||
#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE 0x1
|
|
||||||
#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD 0x2
|
|
||||||
#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED 0x3
|
|
||||||
#define TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT 18
|
|
||||||
#define TRIO_CFG_REGION_ADDR__PROT_SHIFT 20
|
|
||||||
#define TRIO_PIO_REGIONS_ADDR__REGION_SHIFT 32
|
|
||||||
#define TRIO_MAP_MEM_REG_INT0 0x1000000000
|
|
||||||
#define TRIO_MAP_MEM_REG_INT1 0x1000000008
|
|
||||||
#define TRIO_MAP_MEM_REG_INT2 0x1000000010
|
|
||||||
#define TRIO_MAP_MEM_REG_INT3 0x1000000018
|
|
||||||
#define TRIO_MAP_MEM_REG_INT4 0x1000000020
|
|
||||||
#define TRIO_MAP_MEM_REG_INT5 0x1000000028
|
|
||||||
#define TRIO_MAP_MEM_REG_INT6 0x1000000030
|
|
||||||
#define TRIO_MAP_MEM_REG_INT7 0x1000000038
|
|
||||||
#define TRIO_MAP_MEM_LIM__ADDR_SHIFT 12
|
|
||||||
#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED 0x0
|
|
||||||
#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT 0x1
|
|
||||||
#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD 0x2
|
|
||||||
#define TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT 30
|
|
||||||
#endif /* !defined(__ARCH_TRIO_DEF_H__) */
|
|
|
@ -1,229 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_PCIE_INTFC_H__
|
|
||||||
#define __ARCH_TRIO_PCIE_INTFC_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/trio_pcie_intfc_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Port Configuration.
|
|
||||||
* Configuration of the PCIe Port
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Provides the state of the strapping pins for this port. */
|
|
||||||
uint_reg_t strap_state : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/*
|
|
||||||
* When 1, the device type will be overridden using OVD_DEV_TYPE_VAL.
|
|
||||||
* When 0, the device type is determined based on the STRAP_STATE.
|
|
||||||
*/
|
|
||||||
uint_reg_t ovd_dev_type : 1;
|
|
||||||
/* Provides the device type when OVD_DEV_TYPE is 1. */
|
|
||||||
uint_reg_t ovd_dev_type_val : 4;
|
|
||||||
/* Determines how link is trained. */
|
|
||||||
uint_reg_t train_mode : 2;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
/*
|
|
||||||
* For PCIe, used to flip physical RX lanes that were not properly wired.
|
|
||||||
* This is not the same as lane reversal which is handled automatically
|
|
||||||
* during link training. When 0, RX Lane0 must be wired to the link
|
|
||||||
* partner (either to its Lane0 or it's LaneN). When RX_LANE_FLIP is 1,
|
|
||||||
* the highest numbered lane for this port becomes Lane0 and Lane0 does
|
|
||||||
* NOT have to be wired to the link partner.
|
|
||||||
*/
|
|
||||||
uint_reg_t rx_lane_flip : 1;
|
|
||||||
/*
|
|
||||||
* For PCIe, used to flip physical TX lanes that were not properly wired.
|
|
||||||
* This is not the same as lane reversal which is handled automatically
|
|
||||||
* during link training. When 0, TX Lane0 must be wired to the link
|
|
||||||
* partner (either to its Lane0 or it's LaneN). When TX_LANE_FLIP is 1,
|
|
||||||
* the highest numbered lane for this port becomes Lane0 and Lane0 does
|
|
||||||
* NOT have to be wired to the link partner.
|
|
||||||
*/
|
|
||||||
uint_reg_t tx_lane_flip : 1;
|
|
||||||
/*
|
|
||||||
* For StreamIO port, configures the width of the port when TRAIN_MODE is
|
|
||||||
* not STRAP.
|
|
||||||
*/
|
|
||||||
uint_reg_t stream_width : 2;
|
|
||||||
/*
|
|
||||||
* For StreamIO port, configures the rate of the port when TRAIN_MODE is
|
|
||||||
* not STRAP.
|
|
||||||
*/
|
|
||||||
uint_reg_t stream_rate : 2;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 46;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_2 : 46;
|
|
||||||
uint_reg_t stream_rate : 2;
|
|
||||||
uint_reg_t stream_width : 2;
|
|
||||||
uint_reg_t tx_lane_flip : 1;
|
|
||||||
uint_reg_t rx_lane_flip : 1;
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
uint_reg_t train_mode : 2;
|
|
||||||
uint_reg_t ovd_dev_type_val : 4;
|
|
||||||
uint_reg_t ovd_dev_type : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t strap_state : 3;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_PCIE_INTFC_PORT_CONFIG_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Port Status.
|
|
||||||
* Status of the PCIe Port. This register applies to the StreamIO port when
|
|
||||||
* StreamIO is enabled.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* Indicates the DL state of the port. When 1, the port is up and ready
|
|
||||||
* to receive traffic.
|
|
||||||
*/
|
|
||||||
uint_reg_t dl_up : 1;
|
|
||||||
/*
|
|
||||||
* Indicates the number of times the link has gone down. Clears on read.
|
|
||||||
*/
|
|
||||||
uint_reg_t dl_down_cnt : 7;
|
|
||||||
/* Indicates the SERDES PLL has spun up and is providing a valid clock. */
|
|
||||||
uint_reg_t clock_ready : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
/* Device revision ID. */
|
|
||||||
uint_reg_t device_rev : 8;
|
|
||||||
/* Link state (PCIe). */
|
|
||||||
uint_reg_t ltssm_state : 6;
|
|
||||||
/* Link power management state (PCIe). */
|
|
||||||
uint_reg_t pm_state : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 31;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_1 : 31;
|
|
||||||
uint_reg_t pm_state : 3;
|
|
||||||
uint_reg_t ltssm_state : 6;
|
|
||||||
uint_reg_t device_rev : 8;
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
uint_reg_t clock_ready : 1;
|
|
||||||
uint_reg_t dl_down_cnt : 7;
|
|
||||||
uint_reg_t dl_up : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_PCIE_INTFC_PORT_STATUS_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Transmit FIFO Control.
|
|
||||||
* Contains TX FIFO thresholds. These registers are for diagnostics purposes
|
|
||||||
* only. Changing these values causes undefined behavior.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* Almost-Empty level for TX0 data. Typically set to at least
|
|
||||||
* roundup(38.0*M/N) where N=tclk frequency and M=MAC symbol rate in MHz
|
|
||||||
* for a x4 port (250MHz).
|
|
||||||
*/
|
|
||||||
uint_reg_t tx0_data_ae_lvl : 7;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/* Almost-Empty level for TX1 data. */
|
|
||||||
uint_reg_t tx1_data_ae_lvl : 7;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
/* Almost-Full level for TX0 data. */
|
|
||||||
uint_reg_t tx0_data_af_lvl : 7;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 1;
|
|
||||||
/* Almost-Full level for TX1 data. */
|
|
||||||
uint_reg_t tx1_data_af_lvl : 7;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_3 : 1;
|
|
||||||
/* Almost-Full level for TX0 info. */
|
|
||||||
uint_reg_t tx0_info_af_lvl : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_4 : 3;
|
|
||||||
/* Almost-Full level for TX1 info. */
|
|
||||||
uint_reg_t tx1_info_af_lvl : 5;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_5 : 3;
|
|
||||||
/*
|
|
||||||
* This register provides performance adjustment for high bandwidth
|
|
||||||
* flows. The MAC will assert almost-full to TRIO if non-posted credits
|
|
||||||
* fall below this level. Note that setting this larger than the initial
|
|
||||||
* PORT_CREDIT.NPH value will cause READS to never be sent. If the
|
|
||||||
* initial credit value from the link partner is smaller than this value
|
|
||||||
* when the link comes up, the value will be reset to the initial credit
|
|
||||||
* value to prevent lockup.
|
|
||||||
*/
|
|
||||||
uint_reg_t min_np_credits : 8;
|
|
||||||
/*
|
|
||||||
* This register provides performance adjustment for high bandwidth
|
|
||||||
* flows. The MAC will assert almost-full to TRIO if posted credits fall
|
|
||||||
* below this level. Note that setting this larger than the initial
|
|
||||||
* PORT_CREDIT.PH value will cause WRITES to never be sent. If the
|
|
||||||
* initial credit value from the link partner is smaller than this value
|
|
||||||
* when the link comes up, the value will be reset to the initial credit
|
|
||||||
* value to prevent lockup.
|
|
||||||
*/
|
|
||||||
uint_reg_t min_p_credits : 8;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t min_p_credits : 8;
|
|
||||||
uint_reg_t min_np_credits : 8;
|
|
||||||
uint_reg_t __reserved_5 : 3;
|
|
||||||
uint_reg_t tx1_info_af_lvl : 5;
|
|
||||||
uint_reg_t __reserved_4 : 3;
|
|
||||||
uint_reg_t tx0_info_af_lvl : 5;
|
|
||||||
uint_reg_t __reserved_3 : 1;
|
|
||||||
uint_reg_t tx1_data_af_lvl : 7;
|
|
||||||
uint_reg_t __reserved_2 : 1;
|
|
||||||
uint_reg_t tx0_data_af_lvl : 7;
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
uint_reg_t tx1_data_ae_lvl : 7;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t tx0_data_ae_lvl : 7;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_PCIE_INTFC_TX_FIFO_CTL_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_H__) */
|
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_PCIE_INTFC_DEF_H__
|
|
||||||
#define __ARCH_TRIO_PCIE_INTFC_DEF_H__
|
|
||||||
#define TRIO_PCIE_INTFC_MAC_INT_STS 0x0000
|
|
||||||
#define TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK 0xf000
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG 0x0018
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_DISABLED 0x0
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT 0x1
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC 0x2
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 0x3
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 0x4
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_XLINK 0x5
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X1 0x6
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X4 0x7
|
|
||||||
#define TRIO_PCIE_INTFC_PORT_STATUS 0x0020
|
|
||||||
#define TRIO_PCIE_INTFC_TX_FIFO_CTL 0x0050
|
|
||||||
#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_DEF_H__) */
|
|
|
@ -1,156 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_PCIE_RC_H__
|
|
||||||
#define __ARCH_TRIO_PCIE_RC_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/trio_pcie_rc_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
|
|
||||||
/* Device Capabilities Register. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* Max_Payload_Size Supported, writablethrough the MAC_STANDARD interface
|
|
||||||
*/
|
|
||||||
uint_reg_t mps_sup : 3;
|
|
||||||
/*
|
|
||||||
* This field is writable through the MAC_STANDARD interface. However,
|
|
||||||
* Phantom Function is not supported. Therefore, the application must
|
|
||||||
* not write any value other than 0x0 to this field.
|
|
||||||
*/
|
|
||||||
uint_reg_t phantom_function_supported : 2;
|
|
||||||
/* This bit is writable through the MAC_STANDARD interface. */
|
|
||||||
uint_reg_t ext_tag_field_supported : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
/* Endpoint L1 Acceptable Latency Must be 0x0 for non-Endpoint devices. */
|
|
||||||
uint_reg_t l1_lat : 3;
|
|
||||||
/*
|
|
||||||
* Undefined since PCI Express 1.1 (Was Attention Button Present for PCI
|
|
||||||
* Express 1.0a)
|
|
||||||
*/
|
|
||||||
uint_reg_t r1 : 1;
|
|
||||||
/*
|
|
||||||
* Undefined since PCI Express 1.1 (Was Attention Indicator Present for
|
|
||||||
* PCI Express 1.0a)
|
|
||||||
*/
|
|
||||||
uint_reg_t r2 : 1;
|
|
||||||
/*
|
|
||||||
* Undefined since PCI Express 1.1 (Was Power Indicator Present for PCI
|
|
||||||
* Express 1.0a)
|
|
||||||
*/
|
|
||||||
uint_reg_t r3 : 1;
|
|
||||||
/*
|
|
||||||
* Role-Based Error Reporting, writable through the MAC_STANDARD
|
|
||||||
* interface. Required to be set for device compliant to 1.1 spec and
|
|
||||||
* later.
|
|
||||||
*/
|
|
||||||
uint_reg_t rer : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 2;
|
|
||||||
/* Captured Slot Power Limit Value Upstream port only. */
|
|
||||||
uint_reg_t slot_pwr_lim : 8;
|
|
||||||
/* Captured Slot Power Limit Scale Upstream port only. */
|
|
||||||
uint_reg_t slot_pwr_scale : 2;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 4;
|
|
||||||
/* Endpoint L0s Acceptable LatencyMust be 0x0 for non-Endpoint devices. */
|
|
||||||
uint_reg_t l0s_lat : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_3 : 31;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_3 : 31;
|
|
||||||
uint_reg_t l0s_lat : 1;
|
|
||||||
uint_reg_t __reserved_2 : 4;
|
|
||||||
uint_reg_t slot_pwr_scale : 2;
|
|
||||||
uint_reg_t slot_pwr_lim : 8;
|
|
||||||
uint_reg_t __reserved_1 : 2;
|
|
||||||
uint_reg_t rer : 1;
|
|
||||||
uint_reg_t r3 : 1;
|
|
||||||
uint_reg_t r2 : 1;
|
|
||||||
uint_reg_t r1 : 1;
|
|
||||||
uint_reg_t l1_lat : 3;
|
|
||||||
uint_reg_t __reserved_0 : 3;
|
|
||||||
uint_reg_t ext_tag_field_supported : 1;
|
|
||||||
uint_reg_t phantom_function_supported : 2;
|
|
||||||
uint_reg_t mps_sup : 3;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_PCIE_RC_DEVICE_CAP_t;
|
|
||||||
|
|
||||||
/* Device Control Register. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Correctable Error Reporting Enable */
|
|
||||||
uint_reg_t cor_err_ena : 1;
|
|
||||||
/* Non-Fatal Error Reporting Enable */
|
|
||||||
uint_reg_t nf_err_ena : 1;
|
|
||||||
/* Fatal Error Reporting Enable */
|
|
||||||
uint_reg_t fatal_err_ena : 1;
|
|
||||||
/* Unsupported Request Reporting Enable */
|
|
||||||
uint_reg_t ur_ena : 1;
|
|
||||||
/* Relaxed orderring enable */
|
|
||||||
uint_reg_t ro_ena : 1;
|
|
||||||
/* Max Payload Size */
|
|
||||||
uint_reg_t max_payload_size : 3;
|
|
||||||
/* Extended Tag Field Enable */
|
|
||||||
uint_reg_t ext_tag : 1;
|
|
||||||
/* Phantom Function Enable */
|
|
||||||
uint_reg_t ph_fn_ena : 1;
|
|
||||||
/* AUX Power PM Enable */
|
|
||||||
uint_reg_t aux_pm_ena : 1;
|
|
||||||
/* Enable NoSnoop */
|
|
||||||
uint_reg_t no_snoop : 1;
|
|
||||||
/* Max read request size */
|
|
||||||
uint_reg_t max_read_req_sz : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 49;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 49;
|
|
||||||
uint_reg_t max_read_req_sz : 3;
|
|
||||||
uint_reg_t no_snoop : 1;
|
|
||||||
uint_reg_t aux_pm_ena : 1;
|
|
||||||
uint_reg_t ph_fn_ena : 1;
|
|
||||||
uint_reg_t ext_tag : 1;
|
|
||||||
uint_reg_t max_payload_size : 3;
|
|
||||||
uint_reg_t ro_ena : 1;
|
|
||||||
uint_reg_t ur_ena : 1;
|
|
||||||
uint_reg_t fatal_err_ena : 1;
|
|
||||||
uint_reg_t nf_err_ena : 1;
|
|
||||||
uint_reg_t cor_err_ena : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} TRIO_PCIE_RC_DEVICE_CONTROL_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_TRIO_PCIE_RC_H__) */
|
|
|
@ -1,24 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_PCIE_RC_DEF_H__
|
|
||||||
#define __ARCH_TRIO_PCIE_RC_DEF_H__
|
|
||||||
#define TRIO_PCIE_RC_DEVICE_CAP 0x0074
|
|
||||||
#define TRIO_PCIE_RC_DEVICE_CONTROL 0x0078
|
|
||||||
#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID 0x0000
|
|
||||||
#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT 16
|
|
||||||
#define TRIO_PCIE_RC_REVISION_ID 0x0008
|
|
||||||
#endif /* !defined(__ARCH_TRIO_PCIE_RC_DEF_H__) */
|
|
|
@ -1,125 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_SHM_H__
|
|
||||||
#define __ARCH_TRIO_SHM_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/trio_shm_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
/**
|
|
||||||
* TRIO DMA Descriptor.
|
|
||||||
* The TRIO DMA descriptor is written by software and consumed by hardware.
|
|
||||||
* It is used to specify the location of transaction data in the IO and Tile
|
|
||||||
* domains.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
/* Word 0 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/** Tile side virtual address. */
|
|
||||||
int_reg_t va : 42;
|
|
||||||
/**
|
|
||||||
* Encoded size of buffer used on push DMA when C=1:
|
|
||||||
* 0 = 128 bytes
|
|
||||||
* 1 = 256 bytes
|
|
||||||
* 2 = 512 bytes
|
|
||||||
* 3 = 1024 bytes
|
|
||||||
* 4 = 1664 bytes
|
|
||||||
* 5 = 4096 bytes
|
|
||||||
* 6 = 10368 bytes
|
|
||||||
* 7 = 16384 bytes
|
|
||||||
*/
|
|
||||||
uint_reg_t bsz : 3;
|
|
||||||
/**
|
|
||||||
* Chaining designation. Always zero for pull DMA
|
|
||||||
* 0 : Unchained buffer pointer
|
|
||||||
* 1 : Chained buffer pointer. Next buffer descriptor (e.g. VA) stored
|
|
||||||
* in 1st 8-bytes in buffer. For chained buffers, first 8-bytes of each
|
|
||||||
* buffer contain the next buffer descriptor formatted exactly like a PDE
|
|
||||||
* buffer descriptor. This allows a chained PDE buffer to be sent using
|
|
||||||
* push DMA.
|
|
||||||
*/
|
|
||||||
uint_reg_t c : 1;
|
|
||||||
/**
|
|
||||||
* Notification interrupt will be delivered when the transaction has
|
|
||||||
* completed (all data has been read from or written to the Tile-side
|
|
||||||
* buffer).
|
|
||||||
*/
|
|
||||||
uint_reg_t notif : 1;
|
|
||||||
/**
|
|
||||||
* When 0, the XSIZE field specifies the total byte count for the
|
|
||||||
* transaction. When 1, the XSIZE field is encoded as 2^(N+14) for N in
|
|
||||||
* {0..6}:
|
|
||||||
* 0 = 16KB
|
|
||||||
* 1 = 32KB
|
|
||||||
* 2 = 64KB
|
|
||||||
* 3 = 128KB
|
|
||||||
* 4 = 256KB
|
|
||||||
* 5 = 512KB
|
|
||||||
* 6 = 1MB
|
|
||||||
* All other encodings of the XSIZE field are reserved when SMOD=1
|
|
||||||
*/
|
|
||||||
uint_reg_t smod : 1;
|
|
||||||
/**
|
|
||||||
* Total number of bytes to move for this transaction. When SMOD=1,
|
|
||||||
* this field is encoded - see SMOD description.
|
|
||||||
*/
|
|
||||||
uint_reg_t xsize : 14;
|
|
||||||
/** Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/**
|
|
||||||
* Generation number. Used to indicate a valid descriptor in ring. When
|
|
||||||
* a new descriptor is written into the ring, software must toggle this
|
|
||||||
* bit. The net effect is that the GEN bit being written into new
|
|
||||||
* descriptors toggles each time the ring tail pointer wraps.
|
|
||||||
*/
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t gen : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t xsize : 14;
|
|
||||||
uint_reg_t smod : 1;
|
|
||||||
uint_reg_t notif : 1;
|
|
||||||
uint_reg_t c : 1;
|
|
||||||
uint_reg_t bsz : 3;
|
|
||||||
int_reg_t va : 42;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Word 1 */
|
|
||||||
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/** IO-side address */
|
|
||||||
uint_reg_t io_address : 64;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t io_address : 64;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Word access */
|
|
||||||
uint_reg_t words[2];
|
|
||||||
} TRIO_DMA_DESC_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_TRIO_SHM_H__) */
|
|
|
@ -1,19 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_TRIO_SHM_DEF_H__
|
|
||||||
#define __ARCH_TRIO_SHM_DEF_H__
|
|
||||||
#endif /* !defined(__ARCH_TRIO_SHM_DEF_H__) */
|
|
|
@ -1,300 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_UART_H__
|
|
||||||
#define __ARCH_UART_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/uart_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
|
|
||||||
/* Divisor. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* Baud Rate Divisor. Desired_baud_rate = REF_CLK frequency / (baud *
|
|
||||||
* 16).
|
|
||||||
* Note: REF_CLK is always 125 MHz, the default
|
|
||||||
* divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
|
|
||||||
*/
|
|
||||||
uint_reg_t divisor : 12;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved : 52;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved : 52;
|
|
||||||
uint_reg_t divisor : 12;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_DIVISOR_t;
|
|
||||||
|
|
||||||
/* FIFO Count. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/*
|
|
||||||
* n: n active entries in the receive FIFO (max is 2**8). Each entry has
|
|
||||||
* 8 bits.
|
|
||||||
* 0: no active entry in the receive FIFO (that is empty).
|
|
||||||
*/
|
|
||||||
uint_reg_t rfifo_count : 9;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
/*
|
|
||||||
* n: n active entries in the transmit FIFO (max is 2**8). Each entry has
|
|
||||||
* 8 bits.
|
|
||||||
* 0: no active entry in the transmit FIFO (that is empty).
|
|
||||||
*/
|
|
||||||
uint_reg_t tfifo_count : 9;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 7;
|
|
||||||
/*
|
|
||||||
* n: n active entries in the write FIFO (max is 2**2). Each entry has 8
|
|
||||||
* bits.
|
|
||||||
* 0: no active entry in the write FIFO (that is empty).
|
|
||||||
*/
|
|
||||||
uint_reg_t wfifo_count : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 29;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_2 : 29;
|
|
||||||
uint_reg_t wfifo_count : 3;
|
|
||||||
uint_reg_t __reserved_1 : 7;
|
|
||||||
uint_reg_t tfifo_count : 9;
|
|
||||||
uint_reg_t __reserved_0 : 7;
|
|
||||||
uint_reg_t rfifo_count : 9;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_FIFO_COUNT_t;
|
|
||||||
|
|
||||||
/* FLAG. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/* 1: receive FIFO is empty */
|
|
||||||
uint_reg_t rfifo_empty : 1;
|
|
||||||
/* 1: write FIFO is empty. */
|
|
||||||
uint_reg_t wfifo_empty : 1;
|
|
||||||
/* 1: transmit FIFO is empty. */
|
|
||||||
uint_reg_t tfifo_empty : 1;
|
|
||||||
/* 1: receive FIFO is full. */
|
|
||||||
uint_reg_t rfifo_full : 1;
|
|
||||||
/* 1: write FIFO is full. */
|
|
||||||
uint_reg_t wfifo_full : 1;
|
|
||||||
/* 1: transmit FIFO is full. */
|
|
||||||
uint_reg_t tfifo_full : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 57;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_1 : 57;
|
|
||||||
uint_reg_t tfifo_full : 1;
|
|
||||||
uint_reg_t wfifo_full : 1;
|
|
||||||
uint_reg_t rfifo_full : 1;
|
|
||||||
uint_reg_t tfifo_empty : 1;
|
|
||||||
uint_reg_t wfifo_empty : 1;
|
|
||||||
uint_reg_t rfifo_empty : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_FLAG_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt Vector Mask.
|
|
||||||
* Each bit in this register corresponds to a specific interrupt. When set,
|
|
||||||
* the associated interrupt will not be dispatched.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Read data FIFO read and no data available */
|
|
||||||
uint_reg_t rdat_err : 1;
|
|
||||||
/* Write FIFO was written but it was full */
|
|
||||||
uint_reg_t wdat_err : 1;
|
|
||||||
/* Stop bit not found when current data was received */
|
|
||||||
uint_reg_t frame_err : 1;
|
|
||||||
/* Parity error was detected when current data was received */
|
|
||||||
uint_reg_t parity_err : 1;
|
|
||||||
/* Data was received but the receive FIFO was full */
|
|
||||||
uint_reg_t rfifo_overflow : 1;
|
|
||||||
/*
|
|
||||||
* An almost full event is reached when data is to be written to the
|
|
||||||
* receive FIFO, and the receive FIFO has more than or equal to
|
|
||||||
* BUFFER_THRESHOLD.RFIFO_AFULL bytes.
|
|
||||||
*/
|
|
||||||
uint_reg_t rfifo_afull : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/* An entry in the transmit FIFO was popped */
|
|
||||||
uint_reg_t tfifo_re : 1;
|
|
||||||
/* An entry has been pushed into the receive FIFO */
|
|
||||||
uint_reg_t rfifo_we : 1;
|
|
||||||
/* An entry of the write FIFO has been popped */
|
|
||||||
uint_reg_t wfifo_re : 1;
|
|
||||||
/* Rshim read receive FIFO in protocol mode */
|
|
||||||
uint_reg_t rfifo_err : 1;
|
|
||||||
/*
|
|
||||||
* An almost empty event is reached when data is to be read from the
|
|
||||||
* transmit FIFO, and the transmit FIFO has less than or equal to
|
|
||||||
* BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
|
|
||||||
*/
|
|
||||||
uint_reg_t tfifo_aempty : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 52;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_1 : 52;
|
|
||||||
uint_reg_t tfifo_aempty : 1;
|
|
||||||
uint_reg_t rfifo_err : 1;
|
|
||||||
uint_reg_t wfifo_re : 1;
|
|
||||||
uint_reg_t rfifo_we : 1;
|
|
||||||
uint_reg_t tfifo_re : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t rfifo_afull : 1;
|
|
||||||
uint_reg_t rfifo_overflow : 1;
|
|
||||||
uint_reg_t parity_err : 1;
|
|
||||||
uint_reg_t frame_err : 1;
|
|
||||||
uint_reg_t wdat_err : 1;
|
|
||||||
uint_reg_t rdat_err : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_INTERRUPT_MASK_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Interrupt vector, write-one-to-clear.
|
|
||||||
* Each bit in this register corresponds to a specific interrupt. Hardware
|
|
||||||
* sets the bit when the associated condition has occurred. Writing a 1
|
|
||||||
* clears the status bit.
|
|
||||||
*/
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Read data FIFO read and no data available */
|
|
||||||
uint_reg_t rdat_err : 1;
|
|
||||||
/* Write FIFO was written but it was full */
|
|
||||||
uint_reg_t wdat_err : 1;
|
|
||||||
/* Stop bit not found when current data was received */
|
|
||||||
uint_reg_t frame_err : 1;
|
|
||||||
/* Parity error was detected when current data was received */
|
|
||||||
uint_reg_t parity_err : 1;
|
|
||||||
/* Data was received but the receive FIFO was full */
|
|
||||||
uint_reg_t rfifo_overflow : 1;
|
|
||||||
/*
|
|
||||||
* Data was received and the receive FIFO is now almost full (more than
|
|
||||||
* BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
|
|
||||||
*/
|
|
||||||
uint_reg_t rfifo_afull : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/* An entry in the transmit FIFO was popped */
|
|
||||||
uint_reg_t tfifo_re : 1;
|
|
||||||
/* An entry has been pushed into the receive FIFO */
|
|
||||||
uint_reg_t rfifo_we : 1;
|
|
||||||
/* An entry of the write FIFO has been popped */
|
|
||||||
uint_reg_t wfifo_re : 1;
|
|
||||||
/* Rshim read receive FIFO in protocol mode */
|
|
||||||
uint_reg_t rfifo_err : 1;
|
|
||||||
/*
|
|
||||||
* Data was read from the transmit FIFO and now it is almost empty (less
|
|
||||||
* than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
|
|
||||||
*/
|
|
||||||
uint_reg_t tfifo_aempty : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 52;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_1 : 52;
|
|
||||||
uint_reg_t tfifo_aempty : 1;
|
|
||||||
uint_reg_t rfifo_err : 1;
|
|
||||||
uint_reg_t wfifo_re : 1;
|
|
||||||
uint_reg_t rfifo_we : 1;
|
|
||||||
uint_reg_t tfifo_re : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t rfifo_afull : 1;
|
|
||||||
uint_reg_t rfifo_overflow : 1;
|
|
||||||
uint_reg_t parity_err : 1;
|
|
||||||
uint_reg_t frame_err : 1;
|
|
||||||
uint_reg_t wdat_err : 1;
|
|
||||||
uint_reg_t rdat_err : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_INTERRUPT_STATUS_t;
|
|
||||||
|
|
||||||
/* Type. */
|
|
||||||
|
|
||||||
__extension__
|
|
||||||
typedef union
|
|
||||||
{
|
|
||||||
struct
|
|
||||||
{
|
|
||||||
#ifndef __BIG_ENDIAN__
|
|
||||||
/* Number of stop bits, rx and tx */
|
|
||||||
uint_reg_t sbits : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
/* Data word size, rx and tx */
|
|
||||||
uint_reg_t dbits : 1;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
/* Parity selection, rx and tx */
|
|
||||||
uint_reg_t ptype : 3;
|
|
||||||
/* Reserved. */
|
|
||||||
uint_reg_t __reserved_2 : 57;
|
|
||||||
#else /* __BIG_ENDIAN__ */
|
|
||||||
uint_reg_t __reserved_2 : 57;
|
|
||||||
uint_reg_t ptype : 3;
|
|
||||||
uint_reg_t __reserved_1 : 1;
|
|
||||||
uint_reg_t dbits : 1;
|
|
||||||
uint_reg_t __reserved_0 : 1;
|
|
||||||
uint_reg_t sbits : 1;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
uint_reg_t word;
|
|
||||||
} UART_TYPE_t;
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_UART_H__) */
|
|
|
@ -1,120 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_UART_DEF_H__
|
|
||||||
#define __ARCH_UART_DEF_H__
|
|
||||||
#define UART_DIVISOR 0x0158
|
|
||||||
#define UART_FIFO_COUNT 0x0110
|
|
||||||
#define UART_FLAG 0x0108
|
|
||||||
#define UART_INTERRUPT_MASK 0x0208
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_MASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_MASK 0x2
|
|
||||||
#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_MASK 0x4
|
|
||||||
#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_MASK 0x8
|
|
||||||
#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK 0x10
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK 0x20
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_MASK 0x80
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_MASK 0x100
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_MASK 0x200
|
|
||||||
#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK 0x400
|
|
||||||
#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK 0x800
|
|
||||||
#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
|
|
||||||
#define UART_INTERRUPT_STATUS 0x0200
|
|
||||||
#define UART_RECEIVE_DATA 0x0148
|
|
||||||
#define UART_TRANSMIT_DATA 0x0140
|
|
||||||
#define UART_TYPE 0x0160
|
|
||||||
#define UART_TYPE__SBITS_SHIFT 0
|
|
||||||
#define UART_TYPE__SBITS_WIDTH 1
|
|
||||||
#define UART_TYPE__SBITS_RESET_VAL 1
|
|
||||||
#define UART_TYPE__SBITS_RMASK 0x1
|
|
||||||
#define UART_TYPE__SBITS_MASK 0x1
|
|
||||||
#define UART_TYPE__SBITS_FIELD 0,0
|
|
||||||
#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
|
|
||||||
#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
|
|
||||||
#define UART_TYPE__DBITS_SHIFT 2
|
|
||||||
#define UART_TYPE__DBITS_WIDTH 1
|
|
||||||
#define UART_TYPE__DBITS_RESET_VAL 0
|
|
||||||
#define UART_TYPE__DBITS_RMASK 0x1
|
|
||||||
#define UART_TYPE__DBITS_MASK 0x4
|
|
||||||
#define UART_TYPE__DBITS_FIELD 2,2
|
|
||||||
#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
|
|
||||||
#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
|
|
||||||
#define UART_TYPE__PTYPE_SHIFT 4
|
|
||||||
#define UART_TYPE__PTYPE_WIDTH 3
|
|
||||||
#define UART_TYPE__PTYPE_RESET_VAL 3
|
|
||||||
#define UART_TYPE__PTYPE_RMASK 0x7
|
|
||||||
#define UART_TYPE__PTYPE_MASK 0x70
|
|
||||||
#define UART_TYPE__PTYPE_FIELD 4,6
|
|
||||||
#define UART_TYPE__PTYPE_VAL_NONE 0x0
|
|
||||||
#define UART_TYPE__PTYPE_VAL_MARK 0x1
|
|
||||||
#define UART_TYPE__PTYPE_VAL_SPACE 0x2
|
|
||||||
#define UART_TYPE__PTYPE_VAL_EVEN 0x3
|
|
||||||
#define UART_TYPE__PTYPE_VAL_ODD 0x4
|
|
||||||
#endif /* !defined(__ARCH_UART_DEF_H__) */
|
|
|
@ -1,26 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_USB_HOST_H__
|
|
||||||
#define __ARCH_USB_HOST_H__
|
|
||||||
|
|
||||||
#include <arch/abi.h>
|
|
||||||
#include <arch/usb_host_def.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLER__
|
|
||||||
#endif /* !defined(__ASSEMBLER__) */
|
|
||||||
|
|
||||||
#endif /* !defined(__ARCH_USB_HOST_H__) */
|
|
|
@ -1,19 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Machine-generated file; do not edit. */
|
|
||||||
|
|
||||||
#ifndef __ARCH_USB_HOST_DEF_H__
|
|
||||||
#define __ARCH_USB_HOST_DEF_H__
|
|
||||||
#endif /* !defined(__ARCH_USB_HOST_DEF_H__) */
|
|
|
@ -1,18 +0,0 @@
|
||||||
generic-y += bug.h
|
|
||||||
generic-y += bugs.h
|
|
||||||
generic-y += emergency-restart.h
|
|
||||||
generic-y += exec.h
|
|
||||||
generic-y += extable.h
|
|
||||||
generic-y += fb.h
|
|
||||||
generic-y += hw_irq.h
|
|
||||||
generic-y += irq_regs.h
|
|
||||||
generic-y += local.h
|
|
||||||
generic-y += local64.h
|
|
||||||
generic-y += mcs_spinlock.h
|
|
||||||
generic-y += mm-arch-hooks.h
|
|
||||||
generic-y += parport.h
|
|
||||||
generic-y += preempt.h
|
|
||||||
generic-y += seccomp.h
|
|
||||||
generic-y += serial.h
|
|
||||||
generic-y += trace_clock.h
|
|
||||||
generic-y += xor.h
|
|
|
@ -1 +0,0 @@
|
||||||
#include <generated/asm-offsets.h>
|
|
|
@ -1,210 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Atomic primitives.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_ATOMIC_H
|
|
||||||
#define _ASM_TILE_ATOMIC_H
|
|
||||||
|
|
||||||
#include <asm/cmpxchg.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
#define ATOMIC_INIT(i) { (i) }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_read - read atomic variable
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically reads the value of @v.
|
|
||||||
*/
|
|
||||||
static inline int atomic_read(const atomic_t *v)
|
|
||||||
{
|
|
||||||
return READ_ONCE(v->counter);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_sub_return - subtract integer and return
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @i: integer value to subtract
|
|
||||||
*
|
|
||||||
* Atomically subtracts @i from @v and returns @v - @i
|
|
||||||
*/
|
|
||||||
#define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v))
|
|
||||||
|
|
||||||
#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_sub - subtract integer from atomic variable
|
|
||||||
* @i: integer value to subtract
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically subtracts @i from @v.
|
|
||||||
*/
|
|
||||||
#define atomic_sub(i, v) atomic_add((int)(-(i)), (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_sub_and_test - subtract value from variable and test result
|
|
||||||
* @i: integer value to subtract
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically subtracts @i from @v and returns true if the result is
|
|
||||||
* zero, or false for all other cases.
|
|
||||||
*/
|
|
||||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_inc_return - increment memory and return
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically increments @v by 1 and returns the new value.
|
|
||||||
*/
|
|
||||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_dec_return - decrement memory and return
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically decrements @v by 1 and returns the new value.
|
|
||||||
*/
|
|
||||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_inc - increment atomic variable
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically increments @v by 1.
|
|
||||||
*/
|
|
||||||
#define atomic_inc(v) atomic_add(1, (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_dec - decrement atomic variable
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically decrements @v by 1.
|
|
||||||
*/
|
|
||||||
#define atomic_dec(v) atomic_sub(1, (v))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_dec_and_test - decrement and test
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically decrements @v by 1 and returns true if the result is 0.
|
|
||||||
*/
|
|
||||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_inc_and_test - increment and test
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically increments @v by 1 and returns true if the result is 0.
|
|
||||||
*/
|
|
||||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_xchg - atomically exchange contents of memory with a new value
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @i: integer value to store in memory
|
|
||||||
*
|
|
||||||
* Atomically sets @v to @i and returns old @v
|
|
||||||
*/
|
|
||||||
static inline int atomic_xchg(atomic_t *v, int n)
|
|
||||||
{
|
|
||||||
return xchg(&v->counter, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_cmpxchg - atomically exchange contents of memory if it matches
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @o: old value that memory should have
|
|
||||||
* @n: new value to write to memory if it matches
|
|
||||||
*
|
|
||||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
|
||||||
* Returns the old value at @v.
|
|
||||||
*/
|
|
||||||
static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
|
||||||
{
|
|
||||||
return cmpxchg(&v->counter, o, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_add_negative - add and test if negative
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @i: integer value to add
|
|
||||||
*
|
|
||||||
* Atomically adds @i to @v and returns true if the result is
|
|
||||||
* negative, or false when result is greater than or equal to zero.
|
|
||||||
*/
|
|
||||||
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#ifndef __tilegx__
|
|
||||||
#include <asm/atomic_32.h>
|
|
||||||
#else
|
|
||||||
#include <asm/atomic_64.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_xchg - atomically exchange contents of memory with a new value
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
* @i: integer value to store in memory
|
|
||||||
*
|
|
||||||
* Atomically sets @v to @i and returns old @v
|
|
||||||
*/
|
|
||||||
static inline long long atomic64_xchg(atomic64_t *v, long long n)
|
|
||||||
{
|
|
||||||
return xchg64(&v->counter, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_cmpxchg - atomically exchange contents of memory if it matches
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
* @o: old value that memory should have
|
|
||||||
* @n: new value to write to memory if it matches
|
|
||||||
*
|
|
||||||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
|
||||||
* Returns the old value at @v.
|
|
||||||
*/
|
|
||||||
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
|
|
||||||
long long n)
|
|
||||||
{
|
|
||||||
return cmpxchg64(&v->counter, o, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|
||||||
{
|
|
||||||
long long c, old, dec;
|
|
||||||
|
|
||||||
c = atomic64_read(v);
|
|
||||||
for (;;) {
|
|
||||||
dec = c - 1;
|
|
||||||
if (unlikely(dec < 0))
|
|
||||||
break;
|
|
||||||
old = atomic64_cmpxchg((v), c, dec);
|
|
||||||
if (likely(old == c))
|
|
||||||
break;
|
|
||||||
c = old;
|
|
||||||
}
|
|
||||||
return dec;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_ATOMIC_H */
|
|
|
@ -1,297 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Do not include directly; use <linux/atomic.h>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_ATOMIC_32_H
|
|
||||||
#define _ASM_TILE_ATOMIC_32_H
|
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_add - add integer to atomic variable
|
|
||||||
* @i: integer value to add
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically adds @i to @v.
|
|
||||||
*/
|
|
||||||
static inline void atomic_add(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
_atomic_xchg_add(&v->counter, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ATOMIC_OPS(op) \
|
|
||||||
unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
|
|
||||||
static inline void atomic_##op(int i, atomic_t *v) \
|
|
||||||
{ \
|
|
||||||
_atomic_fetch_##op((unsigned long *)&v->counter, i); \
|
|
||||||
} \
|
|
||||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
||||||
{ \
|
|
||||||
smp_mb(); \
|
|
||||||
return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
|
|
||||||
}
|
|
||||||
|
|
||||||
ATOMIC_OPS(and)
|
|
||||||
ATOMIC_OPS(or)
|
|
||||||
ATOMIC_OPS(xor)
|
|
||||||
|
|
||||||
#undef ATOMIC_OPS
|
|
||||||
|
|
||||||
static inline int atomic_fetch_add(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
smp_mb();
|
|
||||||
return _atomic_xchg_add(&v->counter, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_add_return - add integer and return
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @i: integer value to add
|
|
||||||
*
|
|
||||||
* Atomically adds @i to @v and returns @i + @v
|
|
||||||
*/
|
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return _atomic_xchg_add(&v->counter, i) + i;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* __atomic_add_unless - add unless the number is already a given value
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @a: the amount to add to v...
|
|
||||||
* @u: ...unless v is equal to u.
|
|
||||||
*
|
|
||||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
|
||||||
* Returns the old value of @v.
|
|
||||||
*/
|
|
||||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
||||||
{
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return _atomic_xchg_add_unless(&v->counter, a, u);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_set - set atomic variable
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
* @i: required value
|
|
||||||
*
|
|
||||||
* Atomically sets the value of @v to @i.
|
|
||||||
*
|
|
||||||
* atomic_set() can't be just a raw store, since it would be lost if it
|
|
||||||
* fell between the load and store of one of the other atomic ops.
|
|
||||||
*/
|
|
||||||
static inline void atomic_set(atomic_t *v, int n)
|
|
||||||
{
|
|
||||||
_atomic_xchg(&v->counter, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define atomic_set_release(v, i) atomic_set((v), (i))
|
|
||||||
|
|
||||||
/* A 64bit atomic type */
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
long long counter;
|
|
||||||
} atomic64_t;
|
|
||||||
|
|
||||||
#define ATOMIC64_INIT(val) { (val) }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_read - read atomic variable
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
*
|
|
||||||
* Atomically reads the value of @v.
|
|
||||||
*/
|
|
||||||
static inline long long atomic64_read(const atomic64_t *v)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Requires an atomic op to read both 32-bit parts consistently.
|
|
||||||
* Casting away const is safe since the atomic support routines
|
|
||||||
* do not write to memory if the value has not been modified.
|
|
||||||
*/
|
|
||||||
return _atomic64_xchg_add((long long *)&v->counter, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_add - add integer to atomic variable
|
|
||||||
* @i: integer value to add
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
*
|
|
||||||
* Atomically adds @i to @v.
|
|
||||||
*/
|
|
||||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
_atomic64_xchg_add(&v->counter, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ATOMIC64_OPS(op) \
|
|
||||||
long long _atomic64_fetch_##op(long long *v, long long n); \
|
|
||||||
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
|
||||||
{ \
|
|
||||||
_atomic64_fetch_##op(&v->counter, i); \
|
|
||||||
} \
|
|
||||||
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
|
|
||||||
{ \
|
|
||||||
smp_mb(); \
|
|
||||||
return _atomic64_fetch_##op(&v->counter, i); \
|
|
||||||
}
|
|
||||||
|
|
||||||
ATOMIC64_OPS(and)
|
|
||||||
ATOMIC64_OPS(or)
|
|
||||||
ATOMIC64_OPS(xor)
|
|
||||||
|
|
||||||
#undef ATOMIC64_OPS
|
|
||||||
|
|
||||||
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
smp_mb();
|
|
||||||
return _atomic64_xchg_add(&v->counter, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_add_return - add integer and return
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
* @i: integer value to add
|
|
||||||
*
|
|
||||||
* Atomically adds @i to @v and returns @i + @v
|
|
||||||
*/
|
|
||||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return _atomic64_xchg_add(&v->counter, i) + i;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_add_unless - add unless the number is already a given value
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
* @a: the amount to add to v...
|
|
||||||
* @u: ...unless v is equal to u.
|
|
||||||
*
|
|
||||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
|
||||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
|
||||||
*/
|
|
||||||
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
|
|
||||||
long long u)
|
|
||||||
{
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic64_set - set atomic variable
|
|
||||||
* @v: pointer of type atomic64_t
|
|
||||||
* @i: required value
|
|
||||||
*
|
|
||||||
* Atomically sets the value of @v to @i.
|
|
||||||
*
|
|
||||||
* atomic64_set() can't be just a raw store, since it would be lost if it
|
|
||||||
* fell between the load and store of one of the other atomic ops.
|
|
||||||
*/
|
|
||||||
static inline void atomic64_set(atomic64_t *v, long long n)
|
|
||||||
{
|
|
||||||
_atomic64_xchg(&v->counter, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
||||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
|
||||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
|
||||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
||||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
|
||||||
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
|
|
||||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
||||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
||||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
|
||||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
|
||||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
||||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Internal definitions only beyond this point.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of atomic locks in atomic_locks[]. Must be a power of two.
|
|
||||||
* There is no reason for more than PAGE_SIZE / 8 entries, since that
|
|
||||||
* is the maximum number of pointer bits we can use to index this.
|
|
||||||
* And we cannot have more than PAGE_SIZE / 4, since this has to
|
|
||||||
* fit on a single page and each entry takes 4 bytes.
|
|
||||||
*/
|
|
||||||
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
|
|
||||||
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
extern int atomic_locks[];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* All the code that may fault while holding an atomic lock must
|
|
||||||
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
|
|
||||||
* can correctly release and reacquire the lock. Note that we
|
|
||||||
* mention the register number in a comment in "lib/atomic_asm.S" to help
|
|
||||||
* assembly coders from using this register by mistake, so if it
|
|
||||||
* is changed here, change that comment as well.
|
|
||||||
*/
|
|
||||||
#define ATOMIC_LOCK_REG 20
|
|
||||||
#define ATOMIC_LOCK_REG_NAME r20
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
/* Called from setup to initialize a hash table to point to per_cpu locks. */
|
|
||||||
void __init_atomic_per_cpu(void);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Support releasing the atomic lock in do_page_fault_ics(). */
|
|
||||||
void __atomic_fault_unlock(int *lock_ptr);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Return a pointer to the lock for the given address. */
|
|
||||||
int *__atomic_hashed_lock(volatile void *v);
|
|
||||||
|
|
||||||
/* Private helper routines in lib/atomic_asm_32.S */
|
|
||||||
struct __get_user {
|
|
||||||
unsigned long val;
|
|
||||||
int err;
|
|
||||||
};
|
|
||||||
extern struct __get_user __atomic32_cmpxchg(volatile int *p,
|
|
||||||
int *lock, int o, int n);
|
|
||||||
extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
|
|
||||||
extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
|
|
||||||
extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
|
|
||||||
int *lock, int o, int n);
|
|
||||||
extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
|
|
||||||
extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
|
|
||||||
extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
|
|
||||||
extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
|
|
||||||
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
|
|
||||||
long long o, long long n);
|
|
||||||
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
|
|
||||||
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
|
|
||||||
long long n);
|
|
||||||
extern long long __atomic64_xchg_add_unless(volatile long long *p,
|
|
||||||
int *lock, long long o, long long n);
|
|
||||||
extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
|
|
||||||
extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
|
|
||||||
extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
|
|
||||||
|
|
||||||
/* Return failure from the atomic wrappers. */
|
|
||||||
struct __get_user __atomic_bad_address(int __user *addr);
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_ATOMIC_32_H */
|
|
|
@ -1,200 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Do not include directly; use <linux/atomic.h>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_ATOMIC_64_H
|
|
||||||
#define _ASM_TILE_ATOMIC_64_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
|
||||||
#include <arch/spr_def.h>
|
|
||||||
|
|
||||||
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
|
|
||||||
|
|
||||||
#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The smp_mb() operations throughout are to support the fact that
|
|
||||||
* Linux requires memory barriers before and after the operation,
|
|
||||||
* on any routine which updates memory and returns a value.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note a subtlety of the locking here. We are required to provide a
|
|
||||||
* full memory barrier before and after the operation. However, we
|
|
||||||
* only provide an explicit mb before the operation. After the
|
|
||||||
* operation, we use barrier() to get a full mb for free, because:
|
|
||||||
*
|
|
||||||
* (1) The barrier directive to the compiler prohibits any instructions
|
|
||||||
* being statically hoisted before the barrier;
|
|
||||||
* (2) the microarchitecture will not issue any further instructions
|
|
||||||
* until the fetchadd result is available for the "+ i" add instruction;
|
|
||||||
* (3) the smb_mb before the fetchadd ensures that no other memory
|
|
||||||
* operations are in flight at this point.
|
|
||||||
*/
|
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
int val;
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
val = __insn_fetchadd4((void *)&v->counter, i) + i;
|
|
||||||
barrier(); /* equivalent to smp_mb(); see block comment above */
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ATOMIC_OPS(op) \
|
|
||||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
||||||
{ \
|
|
||||||
int val; \
|
|
||||||
smp_mb(); \
|
|
||||||
val = __insn_fetch##op##4((void *)&v->counter, i); \
|
|
||||||
smp_mb(); \
|
|
||||||
return val; \
|
|
||||||
} \
|
|
||||||
static inline void atomic_##op(int i, atomic_t *v) \
|
|
||||||
{ \
|
|
||||||
__insn_fetch##op##4((void *)&v->counter, i); \
|
|
||||||
}
|
|
||||||
|
|
||||||
ATOMIC_OPS(add)
|
|
||||||
ATOMIC_OPS(and)
|
|
||||||
ATOMIC_OPS(or)
|
|
||||||
|
|
||||||
#undef ATOMIC_OPS
|
|
||||||
|
|
||||||
static inline int atomic_fetch_xor(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
int guess, oldval = v->counter;
|
|
||||||
smp_mb();
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
||||||
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
|
|
||||||
} while (guess != oldval);
|
|
||||||
smp_mb();
|
|
||||||
return oldval;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void atomic_xor(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
int guess, oldval = v->counter;
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
||||||
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
|
|
||||||
} while (guess != oldval);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
||||||
{
|
|
||||||
int guess, oldval = v->counter;
|
|
||||||
do {
|
|
||||||
if (oldval == u)
|
|
||||||
break;
|
|
||||||
guess = oldval;
|
|
||||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
||||||
} while (guess != oldval);
|
|
||||||
return oldval;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Now the true 64-bit operations. */
|
|
||||||
|
|
||||||
#define ATOMIC64_INIT(i) { (i) }
|
|
||||||
|
|
||||||
#define atomic64_read(v) READ_ONCE((v)->counter)
|
|
||||||
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
|
|
||||||
|
|
||||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
int val;
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
val = __insn_fetchadd((void *)&v->counter, i) + i;
|
|
||||||
barrier(); /* equivalent to smp_mb; see atomic_add_return() */
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ATOMIC64_OPS(op) \
|
|
||||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
|
||||||
{ \
|
|
||||||
long val; \
|
|
||||||
smp_mb(); \
|
|
||||||
val = __insn_fetch##op((void *)&v->counter, i); \
|
|
||||||
smp_mb(); \
|
|
||||||
return val; \
|
|
||||||
} \
|
|
||||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
|
||||||
{ \
|
|
||||||
__insn_fetch##op((void *)&v->counter, i); \
|
|
||||||
}
|
|
||||||
|
|
||||||
ATOMIC64_OPS(add)
|
|
||||||
ATOMIC64_OPS(and)
|
|
||||||
ATOMIC64_OPS(or)
|
|
||||||
|
|
||||||
#undef ATOMIC64_OPS
|
|
||||||
|
|
||||||
static inline long atomic64_fetch_xor(long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
long guess, oldval = v->counter;
|
|
||||||
smp_mb();
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
||||||
oldval = __insn_cmpexch(&v->counter, guess ^ i);
|
|
||||||
} while (guess != oldval);
|
|
||||||
smp_mb();
|
|
||||||
return oldval;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void atomic64_xor(long i, atomic64_t *v)
|
|
||||||
{
|
|
||||||
long guess, oldval = v->counter;
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
||||||
oldval = __insn_cmpexch(&v->counter, guess ^ i);
|
|
||||||
} while (guess != oldval);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
||||||
{
|
|
||||||
long guess, oldval = v->counter;
|
|
||||||
do {
|
|
||||||
if (oldval == u)
|
|
||||||
break;
|
|
||||||
guess = oldval;
|
|
||||||
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
||||||
} while (guess != oldval);
|
|
||||||
return oldval != u;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
|
||||||
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
|
|
||||||
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
||||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
|
||||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
|
||||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
||||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
||||||
|
|
||||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
||||||
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
|
||||||
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
|
||||||
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
|
||||||
|
|
||||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_ATOMIC_64_H */
|
|
|
@ -1,162 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_BACKTRACE_H
|
|
||||||
#define _ASM_TILE_BACKTRACE_H
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
/* Reads 'size' bytes from 'address' and writes the data to 'result'.
|
|
||||||
* Returns true if successful, else false (e.g. memory not readable).
|
|
||||||
*/
|
|
||||||
typedef bool (*BacktraceMemoryReader)(void *result,
|
|
||||||
unsigned long address,
|
|
||||||
unsigned int size,
|
|
||||||
void *extra);
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
/* Current PC. */
|
|
||||||
unsigned long pc;
|
|
||||||
|
|
||||||
/* Current stack pointer value. */
|
|
||||||
unsigned long sp;
|
|
||||||
|
|
||||||
/* Current frame pointer value (i.e. caller's stack pointer) */
|
|
||||||
unsigned long fp;
|
|
||||||
|
|
||||||
/* Internal use only: caller's PC for first frame. */
|
|
||||||
unsigned long initial_frame_caller_pc;
|
|
||||||
|
|
||||||
/* Internal use only: callback to read memory. */
|
|
||||||
BacktraceMemoryReader read_memory_func;
|
|
||||||
|
|
||||||
/* Internal use only: arbitrary argument to read_memory_func. */
|
|
||||||
void *read_memory_func_extra;
|
|
||||||
|
|
||||||
} BacktraceIterator;
|
|
||||||
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
|
|
||||||
/* We have no idea what the caller's pc is. */
|
|
||||||
PC_LOC_UNKNOWN,
|
|
||||||
|
|
||||||
/* The caller's pc is currently in lr. */
|
|
||||||
PC_LOC_IN_LR,
|
|
||||||
|
|
||||||
/* The caller's pc can be found by dereferencing the caller's sp. */
|
|
||||||
PC_LOC_ON_STACK
|
|
||||||
|
|
||||||
} CallerPCLocation;
|
|
||||||
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
|
|
||||||
/* We have no idea what the caller's sp is. */
|
|
||||||
SP_LOC_UNKNOWN,
|
|
||||||
|
|
||||||
/* The caller's sp is currently in r52. */
|
|
||||||
SP_LOC_IN_R52,
|
|
||||||
|
|
||||||
/* The caller's sp can be found by adding a certain constant
|
|
||||||
* to the current value of sp.
|
|
||||||
*/
|
|
||||||
SP_LOC_OFFSET
|
|
||||||
|
|
||||||
} CallerSPLocation;
|
|
||||||
|
|
||||||
|
|
||||||
/* Bit values ORed into CALLER_* values for info ops. */
|
|
||||||
enum {
|
|
||||||
/* Setting the low bit on any of these values means the info op
|
|
||||||
* applies only to one bundle ago.
|
|
||||||
*/
|
|
||||||
ONE_BUNDLE_AGO_FLAG = 1,
|
|
||||||
|
|
||||||
/* Setting this bit on a CALLER_SP_* value means the PC is in LR.
|
|
||||||
* If not set, PC is on the stack.
|
|
||||||
*/
|
|
||||||
PC_IN_LR_FLAG = 2,
|
|
||||||
|
|
||||||
/* This many of the low bits of a CALLER_SP_* value are for the
|
|
||||||
* flag bits above.
|
|
||||||
*/
|
|
||||||
NUM_INFO_OP_FLAGS = 2,
|
|
||||||
|
|
||||||
/* We cannot have one in the memory pipe so this is the maximum. */
|
|
||||||
MAX_INFO_OPS_PER_BUNDLE = 2
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* Internal constants used to define 'info' operands. */
|
|
||||||
enum {
|
|
||||||
/* 0 and 1 are reserved, as are all negative numbers. */
|
|
||||||
|
|
||||||
CALLER_UNKNOWN_BASE = 2,
|
|
||||||
|
|
||||||
CALLER_SP_IN_R52_BASE = 4,
|
|
||||||
|
|
||||||
CALLER_SP_OFFSET_BASE = 8,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* Current backtracer state describing where it thinks the caller is. */
|
|
||||||
typedef struct {
|
|
||||||
/*
|
|
||||||
* Public fields
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* How do we find the caller's PC? */
|
|
||||||
CallerPCLocation pc_location : 8;
|
|
||||||
|
|
||||||
/* How do we find the caller's SP? */
|
|
||||||
CallerSPLocation sp_location : 8;
|
|
||||||
|
|
||||||
/* If sp_location == SP_LOC_OFFSET, then caller_sp == sp +
|
|
||||||
* loc->sp_offset. Else this field is undefined.
|
|
||||||
*/
|
|
||||||
uint16_t sp_offset;
|
|
||||||
|
|
||||||
/* In the most recently visited bundle a terminating bundle? */
|
|
||||||
bool at_terminating_bundle;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Private fields
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Will the forward scanner see someone clobbering sp
|
|
||||||
* (i.e. changing it with something other than addi sp, sp, N?)
|
|
||||||
*/
|
|
||||||
bool sp_clobber_follows;
|
|
||||||
|
|
||||||
/* Operand to next "visible" info op (no more than one bundle past
|
|
||||||
* the next terminating bundle), or -32768 if none.
|
|
||||||
*/
|
|
||||||
int16_t next_info_operand;
|
|
||||||
|
|
||||||
/* Is the info of in next_info_op in the very next bundle? */
|
|
||||||
bool is_next_info_operand_adjacent;
|
|
||||||
|
|
||||||
} CallerLocation;
|
|
||||||
|
|
||||||
extern void backtrace_init(BacktraceIterator *state,
|
|
||||||
BacktraceMemoryReader read_memory_func,
|
|
||||||
void *read_memory_func_extra,
|
|
||||||
unsigned long pc, unsigned long lr,
|
|
||||||
unsigned long sp, unsigned long r52);
|
|
||||||
|
|
||||||
|
|
||||||
extern bool backtrace_next(BacktraceIterator *state);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_BACKTRACE_H */
|
|
|
@ -1,100 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_BARRIER_H
|
|
||||||
#define _ASM_TILE_BARRIER_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <arch/chip.h>
|
|
||||||
#include <arch/spr_def.h>
|
|
||||||
#include <asm/timex.h>
|
|
||||||
|
|
||||||
#define __sync() __insn_mf()
|
|
||||||
|
|
||||||
#include <hv/syscall_public.h>
|
|
||||||
/*
|
|
||||||
* Issue an uncacheable load to each memory controller, then
|
|
||||||
* wait until those loads have completed.
|
|
||||||
*/
|
|
||||||
static inline void __mb_incoherent(void)
|
|
||||||
{
|
|
||||||
long clobber_r10;
|
|
||||||
asm volatile("swint2"
|
|
||||||
: "=R10" (clobber_r10)
|
|
||||||
: "R10" (HV_SYS_fence_incoherent)
|
|
||||||
: "r0", "r1", "r2", "r3", "r4",
|
|
||||||
"r5", "r6", "r7", "r8", "r9",
|
|
||||||
"r11", "r12", "r13", "r14",
|
|
||||||
"r15", "r16", "r17", "r18", "r19",
|
|
||||||
"r20", "r21", "r22", "r23", "r24",
|
|
||||||
"r25", "r26", "r27", "r28", "r29");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fence to guarantee visibility of stores to incoherent memory. */
|
|
||||||
static inline void
|
|
||||||
mb_incoherent(void)
|
|
||||||
{
|
|
||||||
__insn_mf();
|
|
||||||
|
|
||||||
{
|
|
||||||
#if CHIP_HAS_TILE_WRITE_PENDING()
|
|
||||||
const unsigned long WRITE_TIMEOUT_CYCLES = 400;
|
|
||||||
unsigned long start = get_cycles_low();
|
|
||||||
do {
|
|
||||||
if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
|
|
||||||
return;
|
|
||||||
} while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
|
|
||||||
#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
|
|
||||||
(void) __mb_incoherent();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define fast_wmb() __sync()
|
|
||||||
#define fast_rmb() __sync()
|
|
||||||
#define fast_mb() __sync()
|
|
||||||
#define fast_iob() mb_incoherent()
|
|
||||||
|
|
||||||
#define wmb() fast_wmb()
|
|
||||||
#define rmb() fast_rmb()
|
|
||||||
#define mb() fast_mb()
|
|
||||||
#define iob() fast_iob()
|
|
||||||
|
|
||||||
#ifndef __tilegx__ /* 32 bit */
|
|
||||||
/*
|
|
||||||
* We need to barrier before modifying the word, since the _atomic_xxx()
|
|
||||||
* routines just tns the lock and then read/modify/write of the word.
|
|
||||||
* But after the word is updated, the routine issues an "mf" before returning,
|
|
||||||
* and since it's a function call, we don't even need a compiler barrier.
|
|
||||||
*/
|
|
||||||
#define __smp_mb__before_atomic() __smp_mb()
|
|
||||||
#define __smp_mb__after_atomic() do { } while (0)
|
|
||||||
#define smp_mb__after_atomic() __smp_mb__after_atomic()
|
|
||||||
#else /* 64 bit */
|
|
||||||
#define __smp_mb__before_atomic() __smp_mb()
|
|
||||||
#define __smp_mb__after_atomic() __smp_mb()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The TILE architecture does not do speculative reads; this ensures
|
|
||||||
* that a control dependency also orders against loads and already provides
|
|
||||||
* a LOAD->{LOAD,STORE} order and can forgo the additional RMB.
|
|
||||||
*/
|
|
||||||
#define smp_acquire__after_ctrl_dep() barrier()
|
|
||||||
|
|
||||||
#include <asm-generic/barrier.h>
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
#endif /* _ASM_TILE_BARRIER_H */
|
|
|
@ -1,94 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 1992, Linus Torvalds.
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_BITOPS_H
|
|
||||||
#define _ASM_TILE_BITOPS_H
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <asm/barrier.h>
|
|
||||||
|
|
||||||
#ifndef _LINUX_BITOPS_H
|
|
||||||
#error only <linux/bitops.h> can be included directly
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#include <asm/bitops_64.h>
|
|
||||||
#else
|
|
||||||
#include <asm/bitops_32.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ffz - find first zero bit in word
|
|
||||||
* @word: The word to search
|
|
||||||
*
|
|
||||||
* Undefined if no zero exists, so code should check against ~0UL first.
|
|
||||||
*/
|
|
||||||
static inline unsigned long ffz(unsigned long word)
|
|
||||||
{
|
|
||||||
return __builtin_ctzl(~word);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int fls64(__u64 w)
|
|
||||||
{
|
|
||||||
return (sizeof(__u64) * 8) - __builtin_clzll(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* fls - find last set bit in word
|
|
||||||
* @x: the word to search
|
|
||||||
*
|
|
||||||
* This is defined in a similar way as the libc and compiler builtin
|
|
||||||
* ffs, but returns the position of the most significant set bit.
|
|
||||||
*
|
|
||||||
* fls(value) returns 0 if value is 0 or the position of the last
|
|
||||||
* set bit if value is nonzero. The last (most significant) bit is
|
|
||||||
* at position 32.
|
|
||||||
*/
|
|
||||||
static inline int fls(int x)
|
|
||||||
{
|
|
||||||
return fls64((unsigned int) x);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int __arch_hweight32(unsigned int w)
|
|
||||||
{
|
|
||||||
return __builtin_popcount(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int __arch_hweight16(unsigned int w)
|
|
||||||
{
|
|
||||||
return __builtin_popcount(w & 0xffff);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int __arch_hweight8(unsigned int w)
|
|
||||||
{
|
|
||||||
return __builtin_popcount(w & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long __arch_hweight64(__u64 w)
|
|
||||||
{
|
|
||||||
return __builtin_popcountll(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/bitops/builtin-__ffs.h>
|
|
||||||
#include <asm-generic/bitops/builtin-__fls.h>
|
|
||||||
#include <asm-generic/bitops/builtin-ffs.h>
|
|
||||||
#include <asm-generic/bitops/const_hweight.h>
|
|
||||||
#include <asm-generic/bitops/lock.h>
|
|
||||||
#include <asm-generic/bitops/find.h>
|
|
||||||
#include <asm-generic/bitops/sched.h>
|
|
||||||
#include <asm-generic/bitops/non-atomic.h>
|
|
||||||
#include <asm-generic/bitops/le.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_BITOPS_H */
|
|
|
@ -1,126 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_BITOPS_32_H
|
|
||||||
#define _ASM_TILE_BITOPS_32_H
|
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <asm/barrier.h>
|
|
||||||
|
|
||||||
/* Tile-specific routines to support <asm/bitops.h>. */
|
|
||||||
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
|
|
||||||
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
|
|
||||||
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* set_bit - Atomically set a bit in memory
|
|
||||||
* @nr: the bit to set
|
|
||||||
* @addr: the address to start counting from
|
|
||||||
*
|
|
||||||
* This function is atomic and may not be reordered.
|
|
||||||
* See __set_bit() if you do not require the atomic guarantees.
|
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
|
||||||
* restricted to acting on a single-word quantity.
|
|
||||||
*/
|
|
||||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
_atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* clear_bit - Clears a bit in memory
|
|
||||||
* @nr: Bit to clear
|
|
||||||
* @addr: Address to start counting from
|
|
||||||
*
|
|
||||||
* clear_bit() is atomic and may not be reordered.
|
|
||||||
* See __clear_bit() if you do not require the atomic guarantees.
|
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
|
||||||
* restricted to acting on a single-word quantity.
|
|
||||||
*
|
|
||||||
* clear_bit() may not contain a memory barrier, so if it is used for
|
|
||||||
* locking purposes, you should call smp_mb__before_atomic() and/or
|
|
||||||
* smp_mb__after_atomic() to ensure changes are visible on other cpus.
|
|
||||||
*/
|
|
||||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
_atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* change_bit - Toggle a bit in memory
|
|
||||||
* @nr: Bit to change
|
|
||||||
* @addr: Address to start counting from
|
|
||||||
*
|
|
||||||
* change_bit() is atomic and may not be reordered.
|
|
||||||
* See __change_bit() if you do not require the atomic guarantees.
|
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
|
||||||
* restricted to acting on a single-word quantity.
|
|
||||||
*/
|
|
||||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
_atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* test_and_set_bit - Set a bit and return its old value
|
|
||||||
* @nr: Bit to set
|
|
||||||
* @addr: Address to count from
|
|
||||||
*
|
|
||||||
* This operation is atomic and cannot be reordered.
|
|
||||||
* It also implies a memory barrier.
|
|
||||||
*/
|
|
||||||
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = BIT_MASK(nr);
|
|
||||||
addr += BIT_WORD(nr);
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return (_atomic_fetch_or(addr, mask) & mask) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* test_and_clear_bit - Clear a bit and return its old value
|
|
||||||
* @nr: Bit to clear
|
|
||||||
* @addr: Address to count from
|
|
||||||
*
|
|
||||||
* This operation is atomic and cannot be reordered.
|
|
||||||
* It also implies a memory barrier.
|
|
||||||
*/
|
|
||||||
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = BIT_MASK(nr);
|
|
||||||
addr += BIT_WORD(nr);
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return (_atomic_fetch_andn(addr, mask) & mask) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* test_and_change_bit - Change a bit and return its old value
|
|
||||||
* @nr: Bit to change
|
|
||||||
* @addr: Address to count from
|
|
||||||
*
|
|
||||||
* This operation is atomic and cannot be reordered.
|
|
||||||
* It also implies a memory barrier.
|
|
||||||
*/
|
|
||||||
static inline int test_and_change_bit(unsigned nr,
|
|
||||||
volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = BIT_MASK(nr);
|
|
||||||
addr += BIT_WORD(nr);
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
return (_atomic_fetch_xor(addr, mask) & mask) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/bitops/ext2-atomic.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_BITOPS_32_H */
|
|
|
@ -1,95 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_BITOPS_64_H
|
|
||||||
#define _ASM_TILE_BITOPS_64_H
|
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <asm/cmpxchg.h>
|
|
||||||
|
|
||||||
/* See <asm/bitops.h> for API comments. */
|
|
||||||
|
|
||||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
unsigned long guess, oldval;
|
|
||||||
addr += nr / BITS_PER_LONG;
|
|
||||||
oldval = *addr;
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
|
||||||
} while (guess != oldval);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The test_and_xxx_bit() routines require a memory fence before we
|
|
||||||
* start the operation, and after the operation completes. We use
|
|
||||||
* smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
|
|
||||||
* barrier(), to block until the atomic op is complete.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
int val;
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
|
|
||||||
& mask) != 0;
|
|
||||||
barrier();
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
int val;
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
smp_mb(); /* barrier for proper semantics */
|
|
||||||
val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
|
|
||||||
& mask) != 0;
|
|
||||||
barrier();
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline int test_and_change_bit(unsigned nr,
|
|
||||||
volatile unsigned long *addr)
|
|
||||||
{
|
|
||||||
unsigned long mask = (1UL << (nr % BITS_PER_LONG));
|
|
||||||
unsigned long guess, oldval;
|
|
||||||
addr += nr / BITS_PER_LONG;
|
|
||||||
oldval = *addr;
|
|
||||||
do {
|
|
||||||
guess = oldval;
|
|
||||||
oldval = cmpxchg(addr, guess, guess ^ mask);
|
|
||||||
} while (guess != oldval);
|
|
||||||
return (oldval & mask) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_BITOPS_64_H */
|
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_CACHE_H
|
|
||||||
#define _ASM_TILE_CACHE_H
|
|
||||||
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
/* bytes per L1 data cache line */
|
|
||||||
#define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
|
|
||||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
||||||
|
|
||||||
/* bytes per L2 cache line */
|
|
||||||
#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
|
|
||||||
#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
|
|
||||||
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TILEPro I/O is not always coherent (networking typically uses coherent
|
|
||||||
* I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
|
|
||||||
* L2 cacheline size helps ensure that kernel heap allocations are aligned.
|
|
||||||
* TILE-Gx I/O is always coherent when used on hash-for-home pages.
|
|
||||||
*
|
|
||||||
* However, it's possible at runtime to request not to use hash-for-home
|
|
||||||
* for the kernel heap, in which case the kernel will use flush-and-inval
|
|
||||||
* to manage coherence. As a result, we use L2_CACHE_BYTES for the
|
|
||||||
* DMA minimum alignment to avoid false sharing in the kernel heap.
|
|
||||||
*/
|
|
||||||
#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
|
|
||||||
|
|
||||||
/* use the cache line size for the L2, which is where it counts */
|
|
||||||
#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
|
|
||||||
#define SMP_CACHE_BYTES L2_CACHE_BYTES
|
|
||||||
#define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT
|
|
||||||
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
|
|
||||||
|
|
||||||
/* Group together read-mostly things to avoid cache false sharing */
|
|
||||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Originally we used small TLB pages for kernel data and grouped some
|
|
||||||
* things together as ro-after-init, enforcing the property at the end
|
|
||||||
* of initialization by making those pages read-only and non-coherent.
|
|
||||||
* This allowed better cache utilization since cache inclusion did not
|
|
||||||
* need to be maintained. However, to do this requires an extra TLB
|
|
||||||
* entry, which on balance is more of a performance hit than the
|
|
||||||
* non-coherence is a performance gain, so we now just make "read
|
|
||||||
* mostly" and "ro-after-init" be synonyms. We keep the attribute
|
|
||||||
* separate in case we change our minds at a future date.
|
|
||||||
*/
|
|
||||||
#define __ro_after_init __read_mostly
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CACHE_H */
|
|
|
@ -1,160 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_CACHEFLUSH_H
|
|
||||||
#define _ASM_TILE_CACHEFLUSH_H
|
|
||||||
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
/* Keep includes the same across arches. */
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/cache.h>
|
|
||||||
#include <arch/icache.h>
|
|
||||||
|
|
||||||
/* Caches are physically-indexed and so don't need special treatment */
|
|
||||||
#define flush_cache_all() do { } while (0)
|
|
||||||
#define flush_cache_mm(mm) do { } while (0)
|
|
||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
|
||||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
||||||
#define flush_cache_vmap(start, end) do { } while (0)
|
|
||||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
|
||||||
#define flush_icache_page(vma, pg) do { } while (0)
|
|
||||||
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
|
|
||||||
|
|
||||||
/* Flush the icache just on this cpu */
|
|
||||||
extern void __flush_icache_range(unsigned long start, unsigned long end);
|
|
||||||
|
|
||||||
/* Flush the entire icache on this cpu. */
|
|
||||||
#define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/*
|
|
||||||
* When the kernel writes to its own text we need to do an SMP
|
|
||||||
* broadcast to make the L1I coherent everywhere. This includes
|
|
||||||
* module load and single step.
|
|
||||||
*/
|
|
||||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
|
||||||
#else
|
|
||||||
#define flush_icache_range __flush_icache_range
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* An update to an executable user page requires icache flushing.
|
|
||||||
* We could carefully update only tiles that are running this process,
|
|
||||||
* and rely on the fact that we flush the icache on every context
|
|
||||||
* switch to avoid doing extra work here. But for now, I'll be
|
|
||||||
* conservative and just do a global icache flush.
|
|
||||||
*/
|
|
||||||
static inline void copy_to_user_page(struct vm_area_struct *vma,
|
|
||||||
struct page *page, unsigned long vaddr,
|
|
||||||
void *dst, void *src, int len)
|
|
||||||
{
|
|
||||||
memcpy(dst, src, len);
|
|
||||||
if (vma->vm_flags & VM_EXEC) {
|
|
||||||
flush_icache_range((unsigned long) dst,
|
|
||||||
(unsigned long) dst + len);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
|
||||||
memcpy((dst), (src), (len))
|
|
||||||
|
|
||||||
/* Flush a VA range; pads to L2 cacheline boundaries. */
|
|
||||||
static inline void __flush_buffer(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
|
||||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
|
||||||
while (next < finish) {
|
|
||||||
__insn_flush(next);
|
|
||||||
next += CHIP_FLUSH_STRIDE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
|
|
||||||
static inline void __finv_buffer(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
|
||||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
|
||||||
while (next < finish) {
|
|
||||||
__insn_finv(next);
|
|
||||||
next += CHIP_FINV_STRIDE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush a locally-homecached VA range and wait for the evicted
|
|
||||||
* cachelines to hit memory.
|
|
||||||
*/
|
|
||||||
static inline void flush_buffer_local(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
__flush_buffer(buffer, size);
|
|
||||||
mb_incoherent();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush and invalidate a locally-homecached VA range and wait for the
|
|
||||||
* evicted cachelines to hit memory.
|
|
||||||
*/
|
|
||||||
static inline void finv_buffer_local(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
__finv_buffer(buffer, size);
|
|
||||||
mb_incoherent();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __tilepro__
|
|
||||||
/* Invalidate a VA range; pads to L2 cacheline boundaries. */
|
|
||||||
static inline void __inv_buffer(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
|
|
||||||
char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
|
|
||||||
while (next < finish) {
|
|
||||||
__insn_inv(next);
|
|
||||||
next += CHIP_INV_STRIDE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invalidate a VA range and wait for it to be complete. */
|
|
||||||
static inline void inv_buffer(void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
__inv_buffer(buffer, size);
|
|
||||||
mb();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush and invalidate a VA range that is homed remotely, waiting
|
|
||||||
* until the memory controller holds the flushed values. If "hfh" is
|
|
||||||
* true, we will do a more expensive flush involving additional loads
|
|
||||||
* to make sure we have touched all the possible home cpus of a buffer
|
|
||||||
* that is homed with "hash for home".
|
|
||||||
*/
|
|
||||||
void finv_buffer_remote(void *buffer, size_t size, int hfh);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
|
||||||
* it needs a way to flush as much of the CPU's caches as possible:
|
|
||||||
*
|
|
||||||
* TODO: fill this in!
|
|
||||||
*/
|
|
||||||
static inline void sched_cacheflush(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CACHEFLUSH_H */
|
|
|
@ -1,42 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_CHECKSUM_H
|
|
||||||
#define _ASM_TILE_CHECKSUM_H
|
|
||||||
|
|
||||||
#include <asm-generic/checksum.h>
|
|
||||||
|
|
||||||
/* Allow us to provide a more optimized do_csum(). */
|
|
||||||
__wsum do_csum(const unsigned char *buff, int len);
|
|
||||||
#define do_csum do_csum
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the sum of all the 16-bit subwords in a long.
|
|
||||||
* This sums two subwords on a 32-bit machine, and four on 64 bits.
|
|
||||||
* The implementation does two vector adds to capture any overflow.
|
|
||||||
*/
|
|
||||||
static inline unsigned int csum_long(unsigned long x)
|
|
||||||
{
|
|
||||||
unsigned long ret;
|
|
||||||
#ifdef __tilegx__
|
|
||||||
ret = __insn_v2sadu(x, 0);
|
|
||||||
ret = __insn_v2sadu(ret, 0);
|
|
||||||
#else
|
|
||||||
ret = __insn_sadh_u(x, 0);
|
|
||||||
ret = __insn_sadh_u(ret, 0);
|
|
||||||
#endif
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CHECKSUM_H */
|
|
|
@ -1,132 +0,0 @@
|
||||||
/*
|
|
||||||
* cmpxchg.h -- forked from asm/atomic.h with this copyright:
|
|
||||||
*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_CMPXCHG_H
|
|
||||||
#define _ASM_TILE_CMPXCHG_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
|
||||||
|
|
||||||
/* Nonexistent functions intended to cause compile errors. */
|
|
||||||
extern void __xchg_called_with_bad_pointer(void)
|
|
||||||
__compiletime_error("Bad argument size for xchg");
|
|
||||||
extern void __cmpxchg_called_with_bad_pointer(void)
|
|
||||||
__compiletime_error("Bad argument size for cmpxchg");
|
|
||||||
|
|
||||||
#ifndef __tilegx__
|
|
||||||
|
|
||||||
/* Note the _atomic_xxx() routines include a final mb(). */
|
|
||||||
int _atomic_xchg(int *ptr, int n);
|
|
||||||
int _atomic_xchg_add(int *v, int i);
|
|
||||||
int _atomic_xchg_add_unless(int *v, int a, int u);
|
|
||||||
int _atomic_cmpxchg(int *ptr, int o, int n);
|
|
||||||
long long _atomic64_xchg(long long *v, long long n);
|
|
||||||
long long _atomic64_xchg_add(long long *v, long long i);
|
|
||||||
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
|
|
||||||
long long _atomic64_cmpxchg(long long *v, long long o, long long n);
|
|
||||||
|
|
||||||
#define xchg(ptr, n) \
|
|
||||||
({ \
|
|
||||||
if (sizeof(*(ptr)) != 4) \
|
|
||||||
__xchg_called_with_bad_pointer(); \
|
|
||||||
smp_mb(); \
|
|
||||||
(typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define cmpxchg(ptr, o, n) \
|
|
||||||
({ \
|
|
||||||
if (sizeof(*(ptr)) != 4) \
|
|
||||||
__cmpxchg_called_with_bad_pointer(); \
|
|
||||||
smp_mb(); \
|
|
||||||
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
|
|
||||||
(int)n); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define xchg64(ptr, n) \
|
|
||||||
({ \
|
|
||||||
if (sizeof(*(ptr)) != 8) \
|
|
||||||
__xchg_called_with_bad_pointer(); \
|
|
||||||
smp_mb(); \
|
|
||||||
(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
|
|
||||||
(long long)(n)); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define cmpxchg64(ptr, o, n) \
|
|
||||||
({ \
|
|
||||||
if (sizeof(*(ptr)) != 8) \
|
|
||||||
__cmpxchg_called_with_bad_pointer(); \
|
|
||||||
smp_mb(); \
|
|
||||||
(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
|
|
||||||
(long long)o, (long long)n); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#define xchg(ptr, n) \
|
|
||||||
({ \
|
|
||||||
typeof(*(ptr)) __x; \
|
|
||||||
smp_mb(); \
|
|
||||||
switch (sizeof(*(ptr))) { \
|
|
||||||
case 4: \
|
|
||||||
__x = (typeof(__x))(unsigned long) \
|
|
||||||
__insn_exch4((ptr), \
|
|
||||||
(u32)(unsigned long)(n)); \
|
|
||||||
break; \
|
|
||||||
case 8: \
|
|
||||||
__x = (typeof(__x)) \
|
|
||||||
__insn_exch((ptr), (unsigned long)(n)); \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
__xchg_called_with_bad_pointer(); \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
smp_mb(); \
|
|
||||||
__x; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define cmpxchg(ptr, o, n) \
|
|
||||||
({ \
|
|
||||||
typeof(*(ptr)) __x; \
|
|
||||||
__insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
|
|
||||||
smp_mb(); \
|
|
||||||
switch (sizeof(*(ptr))) { \
|
|
||||||
case 4: \
|
|
||||||
__x = (typeof(__x))(unsigned long) \
|
|
||||||
__insn_cmpexch4((ptr), \
|
|
||||||
(u32)(unsigned long)(n)); \
|
|
||||||
break; \
|
|
||||||
case 8: \
|
|
||||||
__x = (typeof(__x))__insn_cmpexch((ptr), \
|
|
||||||
(long long)(n)); \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
__cmpxchg_called_with_bad_pointer(); \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
smp_mb(); \
|
|
||||||
__x; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define xchg64 xchg
|
|
||||||
#define cmpxchg64 cmpxchg
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CMPXCHG_H */
|
|
|
@ -1,233 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_COMPAT_H
|
|
||||||
#define _ASM_TILE_COMPAT_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Architecture specific compatibility types
|
|
||||||
*/
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
|
|
||||||
#define COMPAT_USER_HZ 100
|
|
||||||
|
|
||||||
/* "long" and pointer-based types are different. */
|
|
||||||
typedef s32 compat_long_t;
|
|
||||||
typedef u32 compat_ulong_t;
|
|
||||||
typedef u32 compat_size_t;
|
|
||||||
typedef s32 compat_ssize_t;
|
|
||||||
typedef s32 compat_off_t;
|
|
||||||
typedef s32 compat_time_t;
|
|
||||||
typedef s32 compat_clock_t;
|
|
||||||
typedef u32 compat_ino_t;
|
|
||||||
typedef u32 compat_caddr_t;
|
|
||||||
typedef u32 compat_uptr_t;
|
|
||||||
|
|
||||||
/* Many types are "int" or otherwise the same. */
|
|
||||||
typedef __kernel_pid_t compat_pid_t;
|
|
||||||
typedef __kernel_uid_t __compat_uid_t;
|
|
||||||
typedef __kernel_gid_t __compat_gid_t;
|
|
||||||
typedef __kernel_uid32_t __compat_uid32_t;
|
|
||||||
typedef __kernel_uid32_t __compat_gid32_t;
|
|
||||||
typedef __kernel_mode_t compat_mode_t;
|
|
||||||
typedef __kernel_dev_t compat_dev_t;
|
|
||||||
typedef __kernel_loff_t compat_loff_t;
|
|
||||||
typedef __kernel_ipc_pid_t compat_ipc_pid_t;
|
|
||||||
typedef __kernel_daddr_t compat_daddr_t;
|
|
||||||
typedef __kernel_fsid_t compat_fsid_t;
|
|
||||||
typedef __kernel_timer_t compat_timer_t;
|
|
||||||
typedef __kernel_key_t compat_key_t;
|
|
||||||
typedef int compat_int_t;
|
|
||||||
typedef s64 compat_s64;
|
|
||||||
typedef uint compat_uint_t;
|
|
||||||
typedef u64 compat_u64;
|
|
||||||
|
|
||||||
/* We use the same register dump format in 32-bit images. */
|
|
||||||
typedef unsigned long compat_elf_greg_t;
|
|
||||||
#define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t))
|
|
||||||
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
|
|
||||||
|
|
||||||
struct compat_timespec {
|
|
||||||
compat_time_t tv_sec;
|
|
||||||
s32 tv_nsec;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct compat_timeval {
|
|
||||||
compat_time_t tv_sec;
|
|
||||||
s32 tv_usec;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define compat_stat stat
|
|
||||||
#define compat_statfs statfs
|
|
||||||
|
|
||||||
struct compat_sysctl {
|
|
||||||
unsigned int name;
|
|
||||||
int nlen;
|
|
||||||
unsigned int oldval;
|
|
||||||
unsigned int oldlenp;
|
|
||||||
unsigned int newval;
|
|
||||||
unsigned int newlen;
|
|
||||||
unsigned int __unused[4];
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
struct compat_flock {
|
|
||||||
short l_type;
|
|
||||||
short l_whence;
|
|
||||||
compat_off_t l_start;
|
|
||||||
compat_off_t l_len;
|
|
||||||
compat_pid_t l_pid;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define F_GETLK64 12 /* using 'struct flock64' */
|
|
||||||
#define F_SETLK64 13
|
|
||||||
#define F_SETLKW64 14
|
|
||||||
|
|
||||||
struct compat_flock64 {
|
|
||||||
short l_type;
|
|
||||||
short l_whence;
|
|
||||||
compat_loff_t l_start;
|
|
||||||
compat_loff_t l_len;
|
|
||||||
compat_pid_t l_pid;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define COMPAT_RLIM_INFINITY 0xffffffff
|
|
||||||
|
|
||||||
#define _COMPAT_NSIG 64
|
|
||||||
#define _COMPAT_NSIG_BPW 32
|
|
||||||
|
|
||||||
typedef u32 compat_sigset_word;
|
|
||||||
|
|
||||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
|
||||||
|
|
||||||
struct compat_ipc64_perm {
|
|
||||||
compat_key_t key;
|
|
||||||
__compat_uid32_t uid;
|
|
||||||
__compat_gid32_t gid;
|
|
||||||
__compat_uid32_t cuid;
|
|
||||||
__compat_gid32_t cgid;
|
|
||||||
unsigned short mode;
|
|
||||||
unsigned short __pad1;
|
|
||||||
unsigned short seq;
|
|
||||||
unsigned short __pad2;
|
|
||||||
compat_ulong_t unused1;
|
|
||||||
compat_ulong_t unused2;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct compat_semid64_ds {
|
|
||||||
struct compat_ipc64_perm sem_perm;
|
|
||||||
compat_time_t sem_otime;
|
|
||||||
compat_ulong_t __unused1;
|
|
||||||
compat_time_t sem_ctime;
|
|
||||||
compat_ulong_t __unused2;
|
|
||||||
compat_ulong_t sem_nsems;
|
|
||||||
compat_ulong_t __unused3;
|
|
||||||
compat_ulong_t __unused4;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct compat_msqid64_ds {
|
|
||||||
struct compat_ipc64_perm msg_perm;
|
|
||||||
compat_time_t msg_stime;
|
|
||||||
compat_ulong_t __unused1;
|
|
||||||
compat_time_t msg_rtime;
|
|
||||||
compat_ulong_t __unused2;
|
|
||||||
compat_time_t msg_ctime;
|
|
||||||
compat_ulong_t __unused3;
|
|
||||||
compat_ulong_t msg_cbytes;
|
|
||||||
compat_ulong_t msg_qnum;
|
|
||||||
compat_ulong_t msg_qbytes;
|
|
||||||
compat_pid_t msg_lspid;
|
|
||||||
compat_pid_t msg_lrpid;
|
|
||||||
compat_ulong_t __unused4;
|
|
||||||
compat_ulong_t __unused5;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct compat_shmid64_ds {
|
|
||||||
struct compat_ipc64_perm shm_perm;
|
|
||||||
compat_size_t shm_segsz;
|
|
||||||
compat_time_t shm_atime;
|
|
||||||
compat_ulong_t __unused1;
|
|
||||||
compat_time_t shm_dtime;
|
|
||||||
compat_ulong_t __unused2;
|
|
||||||
compat_time_t shm_ctime;
|
|
||||||
compat_ulong_t __unused3;
|
|
||||||
compat_pid_t shm_cpid;
|
|
||||||
compat_pid_t shm_lpid;
|
|
||||||
compat_ulong_t shm_nattch;
|
|
||||||
compat_ulong_t __unused4;
|
|
||||||
compat_ulong_t __unused5;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A pointer passed in from user mode. This should not
|
|
||||||
* be used for syscall parameters, just declare them
|
|
||||||
* as pointers because the syscall entry code will have
|
|
||||||
* appropriately converted them already.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline void __user *compat_ptr(compat_uptr_t uptr)
|
|
||||||
{
|
|
||||||
return (void __user *)(long)(s32)uptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|
||||||
{
|
|
||||||
return (u32)(unsigned long)uptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sign-extend when storing a kernel pointer to a user's ptregs. */
|
|
||||||
static inline unsigned long ptr_to_compat_reg(void __user *uptr)
|
|
||||||
{
|
|
||||||
return (long)(int)(long __force)uptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
|
||||||
{
|
|
||||||
struct pt_regs *regs = task_pt_regs(current);
|
|
||||||
return (void __user *)regs->sp - len;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int is_compat_task(void)
|
|
||||||
{
|
|
||||||
return current_thread_info()->status & TS_COMPAT;
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|
||||||
struct pt_regs *regs);
|
|
||||||
|
|
||||||
/* Compat syscalls. */
|
|
||||||
struct compat_siginfo;
|
|
||||||
struct compat_sigaltstack;
|
|
||||||
long compat_sys_rt_sigreturn(void);
|
|
||||||
long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
|
|
||||||
long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
|
|
||||||
long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
|
|
||||||
u32 dummy, u32 low, u32 high);
|
|
||||||
long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
|
|
||||||
u32 dummy, u32 low, u32 high);
|
|
||||||
long compat_sys_sync_file_range2(int fd, unsigned int flags,
|
|
||||||
u32 offset_lo, u32 offset_hi,
|
|
||||||
u32 nbytes_lo, u32 nbytes_hi);
|
|
||||||
long compat_sys_fallocate(int fd, int mode,
|
|
||||||
u32 offset_lo, u32 offset_hi,
|
|
||||||
u32 len_lo, u32 len_hi);
|
|
||||||
long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
|
|
||||||
unsigned int offset_low, loff_t __user * result,
|
|
||||||
unsigned int origin);
|
|
||||||
|
|
||||||
/* Assembly trampoline to avoid clobbering r0. */
|
|
||||||
long _compat_sys_rt_sigreturn(void);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_COMPAT_H */
|
|
|
@ -1,31 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_CURRENT_H
|
|
||||||
#define _ASM_TILE_CURRENT_H
|
|
||||||
|
|
||||||
#include <linux/thread_info.h>
|
|
||||||
|
|
||||||
struct task_struct;
|
|
||||||
|
|
||||||
static inline struct task_struct *get_current(void)
|
|
||||||
{
|
|
||||||
return current_thread_info()->task;
|
|
||||||
}
|
|
||||||
#define current get_current()
|
|
||||||
|
|
||||||
/* Return a usable "task_struct" pointer even if the real one is corrupt. */
|
|
||||||
struct task_struct *validate_current(void);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CURRENT_H */
|
|
|
@ -1,34 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_DELAY_H
|
|
||||||
#define _ASM_TILE_DELAY_H
|
|
||||||
|
|
||||||
/* Undefined functions to get compile-time errors. */
|
|
||||||
extern void __bad_udelay(void);
|
|
||||||
extern void __bad_ndelay(void);
|
|
||||||
|
|
||||||
extern void __udelay(unsigned long usecs);
|
|
||||||
extern void __ndelay(unsigned long nsecs);
|
|
||||||
extern void __delay(unsigned long loops);
|
|
||||||
|
|
||||||
#define udelay(n) (__builtin_constant_p(n) ? \
|
|
||||||
((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \
|
|
||||||
__udelay(n))
|
|
||||||
|
|
||||||
#define ndelay(n) (__builtin_constant_p(n) ? \
|
|
||||||
((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \
|
|
||||||
__ndelay(n))
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_DELAY_H */
|
|
|
@ -1,33 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
* Arch specific extensions to struct device
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_DEVICE_H
|
|
||||||
#define _ASM_TILE_DEVICE_H
|
|
||||||
|
|
||||||
struct dev_archdata {
|
|
||||||
/* Offset of the DMA address from the PA. */
|
|
||||||
dma_addr_t dma_offset;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Highest DMA address that can be generated by devices that
|
|
||||||
* have limited DMA capability, i.e. non 64-bit capable.
|
|
||||||
*/
|
|
||||||
dma_addr_t max_direct_dma_addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct pdev_archdata {
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_DEVICE_H */
|
|
|
@ -1,17 +0,0 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef _ASM_TILE_DIV64_H
|
|
||||||
#define _ASM_TILE_DIV64_H
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
static inline u64 mul_u32_u32(u32 a, u32 b)
|
|
||||||
{
|
|
||||||
return __insn_mul_lu_lu(a, b);
|
|
||||||
}
|
|
||||||
#define mul_u32_u32 mul_u32_u32
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <asm-generic/div64.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_DIV64_H */
|
|
|
@ -1,50 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_DMA_MAPPING_H
|
|
||||||
#define _ASM_TILE_DMA_MAPPING_H
|
|
||||||
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
#include <linux/cache.h>
|
|
||||||
#include <linux/io.h>
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern const struct dma_map_ops *tile_dma_map_ops;
|
|
||||||
extern const struct dma_map_ops *gx_pci_dma_map_ops;
|
|
||||||
extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
|
|
||||||
extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
|
|
||||||
|
|
||||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|
||||||
{
|
|
||||||
return tile_dma_map_ops;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline dma_addr_t get_dma_offset(struct device *dev)
|
|
||||||
{
|
|
||||||
return dev->archdata.dma_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|
||||||
{
|
|
||||||
dev->archdata.dma_offset = off;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define HAVE_ARCH_DMA_SET_MASK 1
|
|
||||||
int dma_set_mask(struct device *dev, u64 mask);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_DMA_MAPPING_H */
|
|
|
@ -1,25 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_DMA_H
|
|
||||||
#define _ASM_TILE_DMA_H
|
|
||||||
|
|
||||||
#include <asm-generic/dma.h>
|
|
||||||
|
|
||||||
/* Needed by drivers/pci/quirks.c */
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
extern int isa_dma_bridge_buggy;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_DMA_H */
|
|
|
@ -1,182 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_ELF_H
|
|
||||||
#define _ASM_TILE_ELF_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ELF register definitions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
#include <linux/ptrace.h>
|
|
||||||
#include <linux/elf-em.h>
|
|
||||||
#include <asm/byteorder.h>
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
typedef unsigned long elf_greg_t;
|
|
||||||
|
|
||||||
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
|
|
||||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
|
||||||
|
|
||||||
/* Provide a nominal data structure. */
|
|
||||||
#define ELF_NFPREG 0
|
|
||||||
typedef double elf_fpreg_t;
|
|
||||||
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define ELF_CLASS ELFCLASS64
|
|
||||||
#else
|
|
||||||
#define ELF_CLASS ELFCLASS32
|
|
||||||
#endif
|
|
||||||
#ifdef __BIG_ENDIAN__
|
|
||||||
#define ELF_DATA ELFDATA2MSB
|
|
||||||
#else
|
|
||||||
#define ELF_DATA ELFDATA2LSB
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There seems to be a bug in how compat_binfmt_elf.c works: it
|
|
||||||
* #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info().
|
|
||||||
* Hack around this by providing an enum value of ELF_ARCH.
|
|
||||||
*/
|
|
||||||
enum { ELF_ARCH = CHIP_ELF_TYPE() };
|
|
||||||
#define ELF_ARCH ELF_ARCH
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is used to ensure we don't load something for the wrong architecture.
|
|
||||||
*/
|
|
||||||
#define elf_check_arch(x) \
|
|
||||||
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
|
|
||||||
(x)->e_ident[EI_DATA] == ELF_DATA && \
|
|
||||||
(x)->e_machine == CHIP_ELF_TYPE())
|
|
||||||
|
|
||||||
/* The module loader only handles a few relocation types. */
|
|
||||||
#ifndef __tilegx__
|
|
||||||
#define R_TILE_32 1
|
|
||||||
#define R_TILE_JOFFLONG_X1 15
|
|
||||||
#define R_TILE_IMM16_X0_LO 25
|
|
||||||
#define R_TILE_IMM16_X1_LO 26
|
|
||||||
#define R_TILE_IMM16_X0_HA 29
|
|
||||||
#define R_TILE_IMM16_X1_HA 30
|
|
||||||
#else
|
|
||||||
#define R_TILEGX_64 1
|
|
||||||
#define R_TILEGX_JUMPOFF_X1 21
|
|
||||||
#define R_TILEGX_IMM16_X0_HW0 36
|
|
||||||
#define R_TILEGX_IMM16_X1_HW0 37
|
|
||||||
#define R_TILEGX_IMM16_X0_HW1 38
|
|
||||||
#define R_TILEGX_IMM16_X1_HW1 39
|
|
||||||
#define R_TILEGX_IMM16_X0_HW2_LAST 48
|
|
||||||
#define R_TILEGX_IMM16_X1_HW2_LAST 49
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Use standard page size for core dumps. */
|
|
||||||
#define ELF_EXEC_PAGESIZE PAGE_SIZE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
|
||||||
* use of this is to invoke "./ld.so someprog" to test out a new version of
|
|
||||||
* the loader. We need to make sure that it is out of the way of the program
|
|
||||||
* that it will "exec", and that there is sufficient room for the brk.
|
|
||||||
*/
|
|
||||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
||||||
|
|
||||||
#define ELF_CORE_COPY_REGS(_dest, _regs) \
|
|
||||||
memcpy((char *) &_dest, (char *) _regs, \
|
|
||||||
sizeof(struct pt_regs));
|
|
||||||
|
|
||||||
/* No additional FP registers to copy. */
|
|
||||||
#define ELF_CORE_COPY_FPREGS(t, fpu) 0
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This yields a mask that user programs can use to figure out what
|
|
||||||
* instruction set this CPU supports. This could be done in user space,
|
|
||||||
* but it's not easy, and we've already done it here.
|
|
||||||
*/
|
|
||||||
#define ELF_HWCAP (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This yields a string that ld.so will use to load implementation
|
|
||||||
* specific libraries for optimization. This is more specific in
|
|
||||||
* intent than poking at uname or /proc/cpuinfo.
|
|
||||||
*/
|
|
||||||
#define ELF_PLATFORM (NULL)
|
|
||||||
|
|
||||||
extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr);
|
|
||||||
|
|
||||||
#define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr)
|
|
||||||
|
|
||||||
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
|
|
||||||
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
|
|
||||||
|
|
||||||
/* Tilera Linux has no personalities currently, so no need to do anything. */
|
|
||||||
#define SET_PERSONALITY(ex) do { } while (0)
|
|
||||||
|
|
||||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
|
||||||
/* Support auto-mapping of the user interrupt vectors. */
|
|
||||||
struct linux_binprm;
|
|
||||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
||||||
int executable_stack);
|
|
||||||
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
|
||||||
#define ARCH_DLINFO \
|
|
||||||
do { \
|
|
||||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
struct mm_struct;
|
|
||||||
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
||||||
#define arch_randomize_brk arch_randomize_brk
|
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
|
||||||
|
|
||||||
#define COMPAT_ELF_PLATFORM "tilegx-m32"
|
|
||||||
|
|
||||||
/*
|
|
||||||
* "Compat" binaries have the same machine type, but 32-bit class,
|
|
||||||
* since they're not a separate machine type, but just a 32-bit
|
|
||||||
* variant of the standard 64-bit architecture.
|
|
||||||
*/
|
|
||||||
#define compat_elf_check_arch(x) \
|
|
||||||
((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
|
|
||||||
(x)->e_machine == CHIP_ELF_TYPE())
|
|
||||||
|
|
||||||
#define compat_start_thread(regs, ip, usp) do { \
|
|
||||||
regs->pc = ptr_to_compat_reg((void *)(ip)); \
|
|
||||||
regs->sp = ptr_to_compat_reg((void *)(usp)); \
|
|
||||||
single_step_execve(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use SET_PERSONALITY to indicate compatibility via TS_COMPAT.
|
|
||||||
*/
|
|
||||||
#undef SET_PERSONALITY
|
|
||||||
#define SET_PERSONALITY(ex) \
|
|
||||||
do { \
|
|
||||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
|
|
||||||
current_thread_info()->status &= ~TS_COMPAT; \
|
|
||||||
} while (0)
|
|
||||||
#define COMPAT_SET_PERSONALITY(ex) \
|
|
||||||
do { \
|
|
||||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
|
|
||||||
current_thread_info()->status |= TS_COMPAT; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2)
|
|
||||||
|
|
||||||
#endif /* CONFIG_COMPAT */
|
|
||||||
|
|
||||||
#define CORE_DUMP_USE_REGSET
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_ELF_H */
|
|
|
@ -1,87 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (C) 1998 Ingo Molnar
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_FIXMAP_H
|
|
||||||
#define _ASM_TILE_FIXMAP_H
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <asm/kmap_types.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Here we define all the compile-time 'special' virtual
|
|
||||||
* addresses. The point is to have a constant address at
|
|
||||||
* compile time, but to set the physical address only
|
|
||||||
* in the boot process. We allocate these special addresses
|
|
||||||
* from the end of supervisor virtual memory backwards.
|
|
||||||
* Also this lets us do fail-safe vmalloc(), we
|
|
||||||
* can guarantee that these special addresses and
|
|
||||||
* vmalloc()-ed addresses never overlap.
|
|
||||||
*
|
|
||||||
* these 'compile-time allocated' memory buffers are
|
|
||||||
* fixed-size 4k pages. (or larger if used with an increment
|
|
||||||
* higher than 1) use fixmap_set(idx,phys) to associate
|
|
||||||
* physical memory with fixmap indices.
|
|
||||||
*
|
|
||||||
* TLB entries of such buffers will not be flushed across
|
|
||||||
* task switches.
|
|
||||||
*/
|
|
||||||
enum fixed_addresses {
|
|
||||||
#ifdef __tilegx__
|
|
||||||
/*
|
|
||||||
* TILEPro has unmapped memory above so the hole isn't needed,
|
|
||||||
* and in any case the hole pushes us over a single 16MB pmd.
|
|
||||||
*/
|
|
||||||
FIX_HOLE,
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
|
||||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
|
||||||
#endif
|
|
||||||
#ifdef __tilegx__ /* see homecache.c */
|
|
||||||
FIX_HOMECACHE_BEGIN,
|
|
||||||
FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1,
|
|
||||||
#endif
|
|
||||||
__end_of_permanent_fixed_addresses,
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Temporary boot-time mappings, used before ioremap() is functional.
|
|
||||||
* Not currently needed by the Tile architecture.
|
|
||||||
*/
|
|
||||||
#define NR_FIX_BTMAPS 0
|
|
||||||
#if NR_FIX_BTMAPS
|
|
||||||
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
|
|
||||||
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
|
|
||||||
__end_of_fixed_addresses
|
|
||||||
#else
|
|
||||||
__end_of_fixed_addresses = __end_of_permanent_fixed_addresses
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
|
||||||
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
|
||||||
#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
|
|
||||||
#define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE)
|
|
||||||
|
|
||||||
#include <asm-generic/fixmap.h>
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_FIXMAP_H */
|
|
|
@ -1,42 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_FTRACE_H
|
|
||||||
#define _ASM_TILE_FTRACE_H
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_TRACER
|
|
||||||
|
|
||||||
#define MCOUNT_ADDR ((unsigned long)(__mcount))
|
|
||||||
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
extern void __mcount(void);
|
|
||||||
|
|
||||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
||||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|
||||||
{
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dyn_arch_ftrace {
|
|
||||||
};
|
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_FTRACE_H */
|
|
|
@ -1,166 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* These routines make two important assumptions:
|
|
||||||
*
|
|
||||||
* 1. atomic_t is really an int and can be freely cast back and forth
|
|
||||||
* (validated in __init_atomic_per_cpu).
|
|
||||||
*
|
|
||||||
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
|
||||||
* the same locking convention that all the kernel atomic routines use.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_FUTEX_H
|
|
||||||
#define _ASM_TILE_FUTEX_H
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/futex.h>
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <asm/atomic.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Support macros for futex operations. Do not use these macros directly.
|
|
||||||
* They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
|
|
||||||
* __futex_cmpxchg() additionally assumes "oldval".
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
#define __futex_asm(OP) \
|
|
||||||
asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
|
|
||||||
".pushsection .fixup,\"ax\"\n" \
|
|
||||||
"0: { movei %0, %5; j 9f }\n" \
|
|
||||||
".section __ex_table,\"a\"\n" \
|
|
||||||
".align 8\n" \
|
|
||||||
".quad 1b, 0b\n" \
|
|
||||||
".popsection\n" \
|
|
||||||
"9:" \
|
|
||||||
: "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
|
|
||||||
: "r" (uaddr), "r" (oparg), "i" (-EFAULT))
|
|
||||||
|
|
||||||
#define __futex_set() __futex_asm(exch4)
|
|
||||||
#define __futex_add() __futex_asm(fetchadd4)
|
|
||||||
#define __futex_or() __futex_asm(fetchor4)
|
|
||||||
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
|
|
||||||
#define __futex_cmpxchg() \
|
|
||||||
({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
|
|
||||||
|
|
||||||
#define __futex_xor() \
|
|
||||||
({ \
|
|
||||||
u32 oldval, n = oparg; \
|
|
||||||
if ((ret = __get_user(oldval, uaddr)) == 0) { \
|
|
||||||
do { \
|
|
||||||
oparg = oldval ^ n; \
|
|
||||||
__futex_cmpxchg(); \
|
|
||||||
} while (ret == 0 && oldval != val); \
|
|
||||||
} \
|
|
||||||
})
|
|
||||||
|
|
||||||
/* No need to prefetch, since the atomic ops go to the home cache anyway. */
|
|
||||||
#define __futex_prolog()
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#define __futex_call(FN) \
|
|
||||||
{ \
|
|
||||||
struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
|
|
||||||
val = gu.val; \
|
|
||||||
ret = gu.err; \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __futex_set() __futex_call(__atomic32_xchg)
|
|
||||||
#define __futex_add() __futex_call(__atomic32_xchg_add)
|
|
||||||
#define __futex_or() __futex_call(__atomic32_fetch_or)
|
|
||||||
#define __futex_andn() __futex_call(__atomic32_fetch_andn)
|
|
||||||
#define __futex_xor() __futex_call(__atomic32_fetch_xor)
|
|
||||||
|
|
||||||
#define __futex_cmpxchg() \
|
|
||||||
{ \
|
|
||||||
struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
|
|
||||||
lock, oldval, oparg); \
|
|
||||||
val = gu.val; \
|
|
||||||
ret = gu.err; \
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the lock pointer for the atomic calls to use, and issue a
|
|
||||||
* prefetch to the user address to bring it into cache. Similar to
|
|
||||||
* __atomic_setup(), but we can't do a read into the L1 since it might
|
|
||||||
* fault; instead we do a prefetch into the L2.
|
|
||||||
*/
|
|
||||||
#define __futex_prolog() \
|
|
||||||
int *lock; \
|
|
||||||
__insn_prefetch(uaddr); \
|
|
||||||
lock = __atomic_hashed_lock((int __force *)uaddr)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
|
|
||||||
u32 __user *uaddr)
|
|
||||||
{
|
|
||||||
int uninitialized_var(val), ret;
|
|
||||||
|
|
||||||
__futex_prolog();
|
|
||||||
|
|
||||||
/* The 32-bit futex code makes this assumption, so validate it here. */
|
|
||||||
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
|
|
||||||
|
|
||||||
pagefault_disable();
|
|
||||||
switch (op) {
|
|
||||||
case FUTEX_OP_SET:
|
|
||||||
__futex_set();
|
|
||||||
break;
|
|
||||||
case FUTEX_OP_ADD:
|
|
||||||
__futex_add();
|
|
||||||
break;
|
|
||||||
case FUTEX_OP_OR:
|
|
||||||
__futex_or();
|
|
||||||
break;
|
|
||||||
case FUTEX_OP_ANDN:
|
|
||||||
__futex_andn();
|
|
||||||
break;
|
|
||||||
case FUTEX_OP_XOR:
|
|
||||||
__futex_xor();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
ret = -ENOSYS;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
*oval = val;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
||||||
u32 oldval, u32 oparg)
|
|
||||||
{
|
|
||||||
int ret, val;
|
|
||||||
|
|
||||||
__futex_prolog();
|
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
__futex_cmpxchg();
|
|
||||||
|
|
||||||
*uval = val;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_FUTEX_H */
|
|
|
@ -1,45 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_HARDIRQ_H
|
|
||||||
#define _ASM_TILE_HARDIRQ_H
|
|
||||||
|
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <linux/cache.h>
|
|
||||||
|
|
||||||
#include <asm/irq.h>
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
unsigned int __softirq_pending;
|
|
||||||
long idle_timestamp;
|
|
||||||
|
|
||||||
/* Hard interrupt statistics. */
|
|
||||||
unsigned int irq_timer_count;
|
|
||||||
unsigned int irq_syscall_count;
|
|
||||||
unsigned int irq_resched_count;
|
|
||||||
unsigned int irq_hv_flush_count;
|
|
||||||
unsigned int irq_call_count;
|
|
||||||
unsigned int irq_hv_msg_count;
|
|
||||||
unsigned int irq_dev_intr_count;
|
|
||||||
|
|
||||||
} ____cacheline_aligned irq_cpustat_t;
|
|
||||||
|
|
||||||
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
|
||||||
|
|
||||||
#define __ARCH_IRQ_STAT
|
|
||||||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
|
||||||
|
|
||||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_HARDIRQ_H */
|
|
|
@ -1,30 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Provide methods for access control of per-cpu resources like
|
|
||||||
* UDN, IDN, or IPI.
|
|
||||||
*/
|
|
||||||
#ifndef _ASM_TILE_HARDWALL_H
|
|
||||||
#define _ASM_TILE_HARDWALL_H
|
|
||||||
|
|
||||||
#include <uapi/asm/hardwall.h>
|
|
||||||
|
|
||||||
/* /proc hooks for hardwall. */
|
|
||||||
struct proc_dir_entry;
|
|
||||||
#ifdef CONFIG_HARDWALL
|
|
||||||
void proc_tile_hardwall_init(struct proc_dir_entry *root);
|
|
||||||
int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task);
|
|
||||||
#else
|
|
||||||
static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
|
|
||||||
#endif
|
|
||||||
#endif /* _ASM_TILE_HARDWALL_H */
|
|
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
|
||||||
* Gerhard.Wichert@pdb.siemens.de
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Used in CONFIG_HIGHMEM systems for memory pages which
|
|
||||||
* are not addressable by direct kernel virtual addresses.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_HIGHMEM_H
|
|
||||||
#define _ASM_TILE_HIGHMEM_H
|
|
||||||
|
|
||||||
#include <linux/interrupt.h>
|
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <asm/tlbflush.h>
|
|
||||||
#include <asm/homecache.h>
|
|
||||||
|
|
||||||
/* declarations for highmem.c */
|
|
||||||
extern unsigned long highstart_pfn, highend_pfn;
|
|
||||||
|
|
||||||
extern pte_t *pkmap_page_table;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ordering is:
|
|
||||||
*
|
|
||||||
* FIXADDR_TOP
|
|
||||||
* fixed_addresses
|
|
||||||
* FIXADDR_START
|
|
||||||
* temp fixed addresses
|
|
||||||
* FIXADDR_BOOT_START
|
|
||||||
* Persistent kmap area
|
|
||||||
* PKMAP_BASE
|
|
||||||
* VMALLOC_END
|
|
||||||
* Vmalloc area
|
|
||||||
* VMALLOC_START
|
|
||||||
* high_memory
|
|
||||||
*/
|
|
||||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
|
||||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
|
||||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
||||||
|
|
||||||
void *kmap_high(struct page *page);
|
|
||||||
void kunmap_high(struct page *page);
|
|
||||||
void *kmap(struct page *page);
|
|
||||||
void kunmap(struct page *page);
|
|
||||||
void *kmap_fix_kpte(struct page *page, int finished);
|
|
||||||
|
|
||||||
/* This macro is used only in map_new_virtual() to map "page". */
|
|
||||||
#define kmap_prot page_to_kpgprot(page)
|
|
||||||
|
|
||||||
void *kmap_atomic(struct page *page);
|
|
||||||
void __kunmap_atomic(void *kvaddr);
|
|
||||||
void *kmap_atomic_pfn(unsigned long pfn);
|
|
||||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
|
|
||||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
|
|
||||||
void kmap_atomic_fix_kpte(struct page *page, int finished);
|
|
||||||
|
|
||||||
#define flush_cache_kmaps() do { } while (0)
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_HIGHMEM_H */
|
|
|
@ -1,123 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* Handle issues around the Tile "home cache" model of coherence.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_HOMECACHE_H
|
|
||||||
#define _ASM_TILE_HOMECACHE_H
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
#include <linux/cpumask.h>
|
|
||||||
|
|
||||||
struct page;
|
|
||||||
struct task_struct;
|
|
||||||
struct vm_area_struct;
|
|
||||||
struct zone;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Coherence point for the page is its memory controller.
|
|
||||||
* It is not present in any cache (L1 or L2).
|
|
||||||
*/
|
|
||||||
#define PAGE_HOME_UNCACHED -1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Is this page immutable (unwritable) and thus able to be cached more
|
|
||||||
* widely than would otherwise be possible? This means we have "nc" set.
|
|
||||||
*/
|
|
||||||
#define PAGE_HOME_IMMUTABLE -2
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Each cpu considers its own cache to be the home for the page,
|
|
||||||
* which makes it incoherent.
|
|
||||||
*/
|
|
||||||
#define PAGE_HOME_INCOHERENT -3
|
|
||||||
|
|
||||||
/* Home for the page is distributed via hash-for-home. */
|
|
||||||
#define PAGE_HOME_HASH -4
|
|
||||||
|
|
||||||
/* Support wrapper to use instead of explicit hv_flush_remote(). */
|
|
||||||
extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
|
|
||||||
const struct cpumask *cache_cpumask,
|
|
||||||
HV_VirtAddr tlb_va, unsigned long tlb_length,
|
|
||||||
unsigned long tlb_pgsize,
|
|
||||||
const struct cpumask *tlb_cpumask,
|
|
||||||
HV_Remote_ASID *asids, int asidcount);
|
|
||||||
|
|
||||||
/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
|
|
||||||
extern pte_t pte_set_home(pte_t pte, int home);
|
|
||||||
|
|
||||||
/* Do a cache eviction on the specified cpus. */
|
|
||||||
extern void homecache_evict(const struct cpumask *mask);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Change a kernel page's homecache. It must not be mapped in user space.
|
|
||||||
* If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
|
|
||||||
* no other cpu can reference the page, and causes a full-chip cache/TLB flush.
|
|
||||||
*/
|
|
||||||
extern void homecache_change_page_home(struct page *, int order, int home);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush a page out of whatever cache(s) it is in.
|
|
||||||
* This is more than just finv, since it properly handles waiting
|
|
||||||
* for the data to reach memory, but it can be quite
|
|
||||||
* heavyweight, particularly on incoherent or immutable memory.
|
|
||||||
*/
|
|
||||||
extern void homecache_finv_page(struct page *);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush a page out of the specified home cache.
|
|
||||||
* Note that the specified home need not be the actual home of the page,
|
|
||||||
* as for example might be the case when coordinating with I/O devices.
|
|
||||||
*/
|
|
||||||
extern void homecache_finv_map_page(struct page *, int home);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a page with the given GFP flags, home, and optionally
|
|
||||||
* node. These routines are actually just wrappers around the normal
|
|
||||||
* alloc_pages() / alloc_pages_node() functions, which set and clear
|
|
||||||
* a per-cpu variable to communicate with homecache_new_kernel_page().
|
|
||||||
* If !CONFIG_HOMECACHE, uses homecache_change_page_home().
|
|
||||||
*/
|
|
||||||
extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
|
|
||||||
unsigned int order, int home);
|
|
||||||
extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
|
|
||||||
unsigned int order, int home);
|
|
||||||
#define homecache_alloc_page(gfp_mask, home) \
|
|
||||||
homecache_alloc_pages(gfp_mask, 0, home)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* These routines are just pass-throughs to free_pages() when
|
|
||||||
* we support full homecaching. If !CONFIG_HOMECACHE, then these
|
|
||||||
* routines use homecache_change_page_home() to reset the home
|
|
||||||
* back to the default before returning the page to the allocator.
|
|
||||||
*/
|
|
||||||
void __homecache_free_pages(struct page *, unsigned int order);
|
|
||||||
void homecache_free_pages(unsigned long addr, unsigned int order);
|
|
||||||
#define __homecache_free_page(page) __homecache_free_pages((page), 0)
|
|
||||||
#define homecache_free_page(page) homecache_free_pages((page), 0)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Report the page home for LOWMEM pages by examining their kernel PTE,
|
|
||||||
* or for highmem pages as the default home.
|
|
||||||
*/
|
|
||||||
extern int page_home(struct page *);
|
|
||||||
|
|
||||||
#define homecache_migrate_kthread() do {} while (0)
|
|
||||||
|
|
||||||
#define homecache_kpte_lock() 0
|
|
||||||
#define homecache_kpte_unlock(flags) do {} while (0)
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_HOMECACHE_H */
|
|
|
@ -1,122 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_HUGETLB_H
|
|
||||||
#define _ASM_TILE_HUGETLB_H
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
#include <asm-generic/hugetlb.h>
|
|
||||||
|
|
||||||
|
|
||||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
||||||
unsigned long addr,
|
|
||||||
unsigned long len) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the arch doesn't supply something else, assume that hugepage
|
|
||||||
* size aligned regions are ok without further preparation.
|
|
||||||
*/
|
|
||||||
static inline int prepare_hugepage_range(struct file *file,
|
|
||||||
unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
struct hstate *h = hstate_file(file);
|
|
||||||
if (len & ~huge_page_mask(h))
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~huge_page_mask(h))
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
||||||
unsigned long addr, unsigned long end,
|
|
||||||
unsigned long floor,
|
|
||||||
unsigned long ceiling)
|
|
||||||
{
|
|
||||||
free_pgd_range(tlb, addr, end, floor, ceiling);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
||||||
pte_t *ptep, pte_t pte)
|
|
||||||
{
|
|
||||||
set_pte(ptep, pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
return ptep_get_and_clear(mm, addr, ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
ptep_clear_flush(vma, addr, ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int huge_pte_none(pte_t pte)
|
|
||||||
{
|
|
||||||
return pte_none(pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
|
||||||
{
|
|
||||||
return pte_wrprotect(pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
ptep_set_wrprotect(mm, addr, ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, pte_t *ptep,
|
|
||||||
pte_t pte, int dirty)
|
|
||||||
{
|
|
||||||
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
|
||||||
{
|
|
||||||
return *ptep;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void arch_clear_hugepage_flags(struct page *page)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_SUPER_PAGES
|
|
||||||
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
|
||||||
struct page *page, int writable)
|
|
||||||
{
|
|
||||||
size_t pagesize = huge_page_size(hstate_vma(vma));
|
|
||||||
if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
|
|
||||||
entry = pte_mksuper(entry);
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
#define arch_make_huge_pte arch_make_huge_pte
|
|
||||||
|
|
||||||
/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
|
|
||||||
enum {
|
|
||||||
HUGE_SHIFT_PGDIR = 0,
|
|
||||||
HUGE_SHIFT_PMD = 1,
|
|
||||||
HUGE_SHIFT_PAGE = 2,
|
|
||||||
HUGE_SHIFT_ENTRIES
|
|
||||||
};
|
|
||||||
extern int huge_shift[HUGE_SHIFT_ENTRIES];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_HUGETLB_H */
|
|
|
@ -1,60 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* This header defines a wrapper interface for managing hypervisor
|
|
||||||
* device calls that will result in an interrupt at some later time.
|
|
||||||
* In particular, this provides wrappers for hv_preada() and
|
|
||||||
* hv_pwritea().
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_HV_DRIVER_H
|
|
||||||
#define _ASM_TILE_HV_DRIVER_H
|
|
||||||
|
|
||||||
#include <hv/hypervisor.h>
|
|
||||||
|
|
||||||
struct hv_driver_cb;
|
|
||||||
|
|
||||||
/* A callback to be invoked when an operation completes. */
|
|
||||||
typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A structure to hold information about an outstanding call.
|
|
||||||
* The driver must allocate a separate structure for each call.
|
|
||||||
*/
|
|
||||||
struct hv_driver_cb {
|
|
||||||
hv_driver_callback_t *callback; /* Function to call on interrupt. */
|
|
||||||
void *dev; /* Driver-specific state variable. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Wrapper for invoking hv_dev_preada(). */
|
|
||||||
static inline int
|
|
||||||
tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len,
|
|
||||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
|
||||||
struct hv_driver_cb *callback)
|
|
||||||
{
|
|
||||||
return hv_dev_preada(devhdl, flags, sgl_len, sgl,
|
|
||||||
offset, (HV_IntArg)callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wrapper for invoking hv_dev_pwritea(). */
|
|
||||||
static inline int
|
|
||||||
tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len,
|
|
||||||
HV_SGL sgl[/* sgl_len */], __hv64 offset,
|
|
||||||
struct hv_driver_cb *callback)
|
|
||||||
{
|
|
||||||
return hv_dev_pwritea(devhdl, flags, sgl_len, sgl,
|
|
||||||
offset, (HV_IntArg)callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_HV_DRIVER_H */
|
|
|
@ -1,25 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_IDE_H
|
|
||||||
#define _ASM_TILE_IDE_H
|
|
||||||
|
|
||||||
/* For IDE on PCI */
|
|
||||||
#define MAX_HWIFS 10
|
|
||||||
|
|
||||||
#define ide_default_io_ctl(base) (0)
|
|
||||||
|
|
||||||
#include <asm-generic/ide_iops.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_IDE_H */
|
|
|
@ -1,59 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2015 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
#ifndef __ASM_TILE_INSN_H
|
|
||||||
#define __ASM_TILE_INSN_H
|
|
||||||
|
|
||||||
#include <arch/opcode.h>
|
|
||||||
|
|
||||||
static inline tilegx_bundle_bits NOP(void)
|
|
||||||
{
|
|
||||||
return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
|
||||||
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
|
||||||
create_Opcode_X0(RRR_0_OPCODE_X0) |
|
|
||||||
create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
|
|
||||||
create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
|
|
||||||
create_Opcode_X1(RRR_0_OPCODE_X1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline tilegx_bundle_bits tilegx_gen_branch(unsigned long pc,
|
|
||||||
unsigned long addr,
|
|
||||||
bool link)
|
|
||||||
{
|
|
||||||
tilegx_bundle_bits opcode_x0, opcode_x1;
|
|
||||||
long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
|
|
||||||
|
|
||||||
if (link) {
|
|
||||||
/* opcode: jal addr */
|
|
||||||
opcode_x1 =
|
|
||||||
create_Opcode_X1(JUMP_OPCODE_X1) |
|
|
||||||
create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
|
|
||||||
create_JumpOff_X1(pcrel_by_instr);
|
|
||||||
} else {
|
|
||||||
/* opcode: j addr */
|
|
||||||
opcode_x1 =
|
|
||||||
create_Opcode_X1(JUMP_OPCODE_X1) |
|
|
||||||
create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
|
|
||||||
create_JumpOff_X1(pcrel_by_instr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* opcode: fnop */
|
|
||||||
opcode_x0 =
|
|
||||||
create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
|
|
||||||
create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
|
|
||||||
create_Opcode_X0(RRR_0_OPCODE_X0);
|
|
||||||
|
|
||||||
return opcode_x1 | opcode_x0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_TILE_INSN_H */
|
|
|
@ -1,509 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_IO_H
|
|
||||||
#define _ASM_TILE_IO_H
|
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/bug.h>
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
/* Maximum PCI I/O space address supported. */
|
|
||||||
#define IO_SPACE_LIMIT 0xffffffff
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
|
||||||
* access.
|
|
||||||
*/
|
|
||||||
#define xlate_dev_mem_ptr(p) __va(p)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert a virtual cached pointer to an uncached pointer.
|
|
||||||
*/
|
|
||||||
#define xlate_dev_kmem_ptr(p) p
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Change "struct page" to physical address.
|
|
||||||
*/
|
|
||||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to
|
|
||||||
* long before casting it to a pointer to avoid compiler warnings.
|
|
||||||
*/
|
|
||||||
#if CHIP_HAS_MMIO()
|
|
||||||
extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
|
|
||||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
|
||||||
pgprot_t pgprot);
|
|
||||||
extern void iounmap(volatile void __iomem *addr);
|
|
||||||
#else
|
|
||||||
#define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr))
|
|
||||||
#define iounmap(addr) ((void)0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
|
|
||||||
#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
|
|
||||||
#define ioremap_wt(physaddr, size) ioremap(physaddr, size)
|
|
||||||
#define ioremap_uc(physaddr, size) ioremap(physaddr, size)
|
|
||||||
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
|
|
||||||
|
|
||||||
#define mmiowb()
|
|
||||||
|
|
||||||
/* Conversion between virtual and physical mappings. */
|
|
||||||
#define mm_ptov(addr) ((void *)phys_to_virt(addr))
|
|
||||||
#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
|
|
||||||
|
|
||||||
#if CHIP_HAS_MMIO()
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We use inline assembly to guarantee that the compiler does not
|
|
||||||
* split an access into multiple byte-sized accesses as it might
|
|
||||||
* sometimes do if a register data structure is marked "packed".
|
|
||||||
* Obviously on tile we can't tolerate such an access being
|
|
||||||
* actually unaligned, but we want to avoid the case where the
|
|
||||||
* compiler conservatively would generate multiple accesses even
|
|
||||||
* for an aligned read or write.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
return *(const volatile u8 __force *)addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
u16 ret;
|
|
||||||
asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
|
|
||||||
barrier();
|
|
||||||
return le16_to_cpu(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
u32 ret;
|
|
||||||
/* Sign-extend to conform to u32 ABI sign-extension convention. */
|
|
||||||
asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
|
|
||||||
barrier();
|
|
||||||
return le32_to_cpu(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
u64 ret;
|
|
||||||
asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
|
|
||||||
barrier();
|
|
||||||
return le64_to_cpu(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
*(volatile u8 __force *)addr = val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
|
|
||||||
{
|
|
||||||
asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The on-chip I/O hardware on tilegx is configured with VA=PA for the
|
|
||||||
* kernel's PA range. The low-level APIs and field names use "va" and
|
|
||||||
* "void *" nomenclature, to be consistent with the general notion
|
|
||||||
* that the addresses in question are virtualizable, but in the kernel
|
|
||||||
* context we are actually manipulating PA values. (In other contexts,
|
|
||||||
* e.g. access from user space, we do in fact use real virtual addresses
|
|
||||||
* in the va fields.) To allow readers of the code to understand what's
|
|
||||||
* happening, we direct their attention to this comment by using the
|
|
||||||
* following two functions that just duplicate __va() and __pa().
|
|
||||||
*/
|
|
||||||
typedef unsigned long tile_io_addr_t;
|
|
||||||
static inline tile_io_addr_t va_to_tile_io_addr(void *va)
|
|
||||||
{
|
|
||||||
BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
|
|
||||||
return __pa(va);
|
|
||||||
}
|
|
||||||
static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
|
|
||||||
{
|
|
||||||
return __va(tile_io_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* CHIP_HAS_MMIO() */
|
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
|
|
||||||
extern u8 _tile_readb(unsigned long addr);
|
|
||||||
extern u16 _tile_readw(unsigned long addr);
|
|
||||||
extern u32 _tile_readl(unsigned long addr);
|
|
||||||
extern u64 _tile_readq(unsigned long addr);
|
|
||||||
extern void _tile_writeb(u8 val, unsigned long addr);
|
|
||||||
extern void _tile_writew(u16 val, unsigned long addr);
|
|
||||||
extern void _tile_writel(u32 val, unsigned long addr);
|
|
||||||
extern void _tile_writeq(u64 val, unsigned long addr);
|
|
||||||
|
|
||||||
#define __raw_readb(addr) _tile_readb((unsigned long)(addr))
|
|
||||||
#define __raw_readw(addr) _tile_readw((unsigned long)(addr))
|
|
||||||
#define __raw_readl(addr) _tile_readl((unsigned long)(addr))
|
|
||||||
#define __raw_readq(addr) _tile_readq((unsigned long)(addr))
|
|
||||||
#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)(addr))
|
|
||||||
#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)(addr))
|
|
||||||
#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)(addr))
|
|
||||||
#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)(addr))
|
|
||||||
|
|
||||||
#else /* CONFIG_PCI */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The tilepro architecture does not support IOMEM unless PCI is enabled.
|
|
||||||
* Unfortunately we can't yet simply not declare these methods,
|
|
||||||
* since some generic code that compiles into the kernel, but
|
|
||||||
* we never run, uses them unconditionally.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int iomem_panic(void)
|
|
||||||
{
|
|
||||||
panic("readb/writeb and friends do not exist on tile without PCI");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 readb(unsigned long addr)
|
|
||||||
{
|
|
||||||
return iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 _readw(unsigned long addr)
|
|
||||||
{
|
|
||||||
return iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 readl(unsigned long addr)
|
|
||||||
{
|
|
||||||
return iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 readq(unsigned long addr)
|
|
||||||
{
|
|
||||||
return iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void writeb(u8 val, unsigned long addr)
|
|
||||||
{
|
|
||||||
iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void writew(u16 val, unsigned long addr)
|
|
||||||
{
|
|
||||||
iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void writel(u32 val, unsigned long addr)
|
|
||||||
{
|
|
||||||
iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void writeq(u64 val, unsigned long addr)
|
|
||||||
{
|
|
||||||
iomem_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_PCI */
|
|
||||||
|
|
||||||
#endif /* CHIP_HAS_MMIO() */
|
|
||||||
|
|
||||||
#define readb __raw_readb
|
|
||||||
#define readw __raw_readw
|
|
||||||
#define readl __raw_readl
|
|
||||||
#define readq __raw_readq
|
|
||||||
#define writeb __raw_writeb
|
|
||||||
#define writew __raw_writew
|
|
||||||
#define writel __raw_writel
|
|
||||||
#define writeq __raw_writeq
|
|
||||||
|
|
||||||
#define readb_relaxed readb
|
|
||||||
#define readw_relaxed readw
|
|
||||||
#define readl_relaxed readl
|
|
||||||
#define readq_relaxed readq
|
|
||||||
#define writeb_relaxed writeb
|
|
||||||
#define writew_relaxed writew
|
|
||||||
#define writel_relaxed writel
|
|
||||||
#define writeq_relaxed writeq
|
|
||||||
|
|
||||||
#define ioread8 readb
|
|
||||||
#define ioread16 readw
|
|
||||||
#define ioread32 readl
|
|
||||||
#define ioread64 readq
|
|
||||||
#define iowrite8 writeb
|
|
||||||
#define iowrite16 writew
|
|
||||||
#define iowrite32 writel
|
|
||||||
#define iowrite64 writeq
|
|
||||||
|
|
||||||
#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
|
|
||||||
|
|
||||||
static inline void memset_io(volatile void *dst, int val, size_t len)
|
|
||||||
{
|
|
||||||
size_t x;
|
|
||||||
BUG_ON((unsigned long)dst & 0x3);
|
|
||||||
val = (val & 0xff) * 0x01010101;
|
|
||||||
for (x = 0; x < len; x += 4)
|
|
||||||
writel(val, dst + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
|
|
||||||
size_t len)
|
|
||||||
{
|
|
||||||
size_t x;
|
|
||||||
BUG_ON((unsigned long)src & 0x3);
|
|
||||||
for (x = 0; x < len; x += 4)
|
|
||||||
*(u32 *)(dst + x) = readl(src + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
|
||||||
size_t len)
|
|
||||||
{
|
|
||||||
size_t x;
|
|
||||||
BUG_ON((unsigned long)dst & 0x3);
|
|
||||||
for (x = 0; x < len; x += 4)
|
|
||||||
writel(*(u32 *)(src + x), dst + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
|
|
||||||
|
|
||||||
static inline u8 inb(unsigned long addr)
|
|
||||||
{
|
|
||||||
return readb((volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 inw(unsigned long addr)
|
|
||||||
{
|
|
||||||
return readw((volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 inl(unsigned long addr)
|
|
||||||
{
|
|
||||||
return readl((volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outb(u8 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
writeb(b, (volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outw(u16 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
writew(b, (volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outl(u32 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
writel(b, (volatile void __iomem *) addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
u8 *buf = buffer;
|
|
||||||
do {
|
|
||||||
u8 x = inb(addr);
|
|
||||||
*buf++ = x;
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
u16 *buf = buffer;
|
|
||||||
do {
|
|
||||||
u16 x = inw(addr);
|
|
||||||
*buf++ = x;
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
u32 *buf = buffer;
|
|
||||||
do {
|
|
||||||
u32 x = inl(addr);
|
|
||||||
*buf++ = x;
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
const u8 *buf = buffer;
|
|
||||||
do {
|
|
||||||
outb(*buf++, addr);
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
const u16 *buf = buffer;
|
|
||||||
do {
|
|
||||||
outw(*buf++, addr);
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
if (count) {
|
|
||||||
const u32 *buf = buffer;
|
|
||||||
do {
|
|
||||||
outl(*buf++, addr);
|
|
||||||
} while (--count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __iomem *ioport_map(unsigned long port, unsigned int len);
|
|
||||||
extern void ioport_unmap(void __iomem *addr);
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The TilePro architecture does not support IOPORT, even with PCI.
|
|
||||||
* Unfortunately we can't yet simply not declare these methods,
|
|
||||||
* since some generic code that compiles into the kernel, but
|
|
||||||
* we never run, uses them unconditionally.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline long ioport_panic(void)
|
|
||||||
{
|
|
||||||
#ifdef __tilegx__
|
|
||||||
panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
|
|
||||||
#else
|
|
||||||
panic("inb/outb and friends do not exist on tile");
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
|
|
||||||
{
|
|
||||||
pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void ioport_unmap(void __iomem *addr)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 inb(unsigned long addr)
|
|
||||||
{
|
|
||||||
return ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u16 inw(unsigned long addr)
|
|
||||||
{
|
|
||||||
return ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 inl(unsigned long addr)
|
|
||||||
{
|
|
||||||
return ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outb(u8 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outw(u16 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outl(u32 b, unsigned long addr)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
|
||||||
{
|
|
||||||
ioport_panic();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
|
|
||||||
|
|
||||||
#define inb_p(addr) inb(addr)
|
|
||||||
#define inw_p(addr) inw(addr)
|
|
||||||
#define inl_p(addr) inl(addr)
|
|
||||||
#define outb_p(x, addr) outb((x), (addr))
|
|
||||||
#define outw_p(x, addr) outw((x), (addr))
|
|
||||||
#define outl_p(x, addr) outl((x), (addr))
|
|
||||||
|
|
||||||
#define ioread16be(addr) be16_to_cpu(ioread16(addr))
|
|
||||||
#define ioread32be(addr) be32_to_cpu(ioread32(addr))
|
|
||||||
#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
|
|
||||||
#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
|
|
||||||
|
|
||||||
#define ioread8_rep(p, dst, count) \
|
|
||||||
insb((unsigned long) (p), (dst), (count))
|
|
||||||
#define ioread16_rep(p, dst, count) \
|
|
||||||
insw((unsigned long) (p), (dst), (count))
|
|
||||||
#define ioread32_rep(p, dst, count) \
|
|
||||||
insl((unsigned long) (p), (dst), (count))
|
|
||||||
|
|
||||||
#define iowrite8_rep(p, src, count) \
|
|
||||||
outsb((unsigned long) (p), (src), (count))
|
|
||||||
#define iowrite16_rep(p, src, count) \
|
|
||||||
outsw((unsigned long) (p), (src), (count))
|
|
||||||
#define iowrite32_rep(p, src, count) \
|
|
||||||
outsl((unsigned long) (p), (src), (count))
|
|
||||||
|
|
||||||
#define virt_to_bus virt_to_phys
|
|
||||||
#define bus_to_virt phys_to_virt
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_IO_H */
|
|
|
@ -1,87 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_IRQ_H
|
|
||||||
#define _ASM_TILE_IRQ_H
|
|
||||||
|
|
||||||
#include <linux/hardirq.h>
|
|
||||||
|
|
||||||
/* The hypervisor interface provides 32 IRQs. */
|
|
||||||
#define NR_IRQS 32
|
|
||||||
|
|
||||||
/* IRQ numbers used for linux IPIs. */
|
|
||||||
#define IRQ_RESCHEDULE 0
|
|
||||||
/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */
|
|
||||||
#define NR_IRQS_LEGACY 1
|
|
||||||
|
|
||||||
#define irq_canonicalize(irq) (irq)
|
|
||||||
|
|
||||||
void ack_bad_irq(unsigned int irq);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Different ways of handling interrupts. Tile interrupts are always
|
|
||||||
* per-cpu; there is no global interrupt controller to implement
|
|
||||||
* enable/disable. Most onboard devices can send their interrupts to
|
|
||||||
* many tiles at the same time, and Tile-specific drivers know how to
|
|
||||||
* deal with this.
|
|
||||||
*
|
|
||||||
* However, generic devices (usually PCIE based, sometimes GPIO)
|
|
||||||
* expect that interrupts will fire on a single core at a time and
|
|
||||||
* that the irq can be enabled or disabled from any core at any time.
|
|
||||||
* We implement this by directing such interrupts to a single core.
|
|
||||||
*
|
|
||||||
* One added wrinkle is that PCI interrupts can be either
|
|
||||||
* hardware-cleared (legacy interrupts) or software cleared (MSI).
|
|
||||||
* Other generic device systems (GPIO) are always software-cleared.
|
|
||||||
*
|
|
||||||
* The enums below are used by drivers for onboard devices, including
|
|
||||||
* the internals of PCI root complex and GPIO. They allow the driver
|
|
||||||
* to tell the generic irq code what kind of interrupt is mapped to a
|
|
||||||
* particular IRQ number.
|
|
||||||
*/
|
|
||||||
enum {
|
|
||||||
/* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
|
|
||||||
TILE_IRQ_PERCPU,
|
|
||||||
/* global interrupt, hardware responsible for clearing. */
|
|
||||||
TILE_IRQ_HW_CLEAR,
|
|
||||||
/* global interrupt, software responsible for clearing. */
|
|
||||||
TILE_IRQ_SW_CLEAR,
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Paravirtualized drivers should call this when they dynamically
|
|
||||||
* allocate a new IRQ or discover an IRQ that was pre-allocated by the
|
|
||||||
* hypervisor for use with their particular device. This gives the
|
|
||||||
* IRQ subsystem an opportunity to do interrupt-type-specific
|
|
||||||
* initialization.
|
|
||||||
*
|
|
||||||
* ISSUE: We should modify this API so that registering anything
|
|
||||||
* except percpu interrupts also requires providing callback methods
|
|
||||||
* for enabling and disabling the interrupt. This would allow the
|
|
||||||
* generic IRQ code to proxy enable/disable_irq() calls back into the
|
|
||||||
* PCI subsystem, which in turn could enable or disable the interrupt
|
|
||||||
* at the PCI shim.
|
|
||||||
*/
|
|
||||||
void tile_irq_activate(unsigned int irq, int tile_irq_type);
|
|
||||||
|
|
||||||
void setup_irq_regs(void);
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
|
||||||
bool exclude_self);
|
|
||||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_IRQ_H */
|
|
|
@ -1,15 +0,0 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef __ASM_IRQ_WORK_H
|
|
||||||
#define __ASM_IRQ_WORK_H
|
|
||||||
|
|
||||||
static inline bool arch_irq_work_has_interrupt(void)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
extern bool self_interrupt_ok;
|
|
||||||
return self_interrupt_ok;
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_IRQ_WORK_H */
|
|
|
@ -1,311 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_IRQFLAGS_H
|
|
||||||
#define _ASM_TILE_IRQFLAGS_H
|
|
||||||
|
|
||||||
#include <arch/interrupts.h>
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The set of interrupts we want to allow when interrupts are nominally
|
|
||||||
* disabled. The remainder are effectively "NMI" interrupts from
|
|
||||||
* the point of view of the generic Linux code. Note that synchronous
|
|
||||||
* interrupts (aka "non-queued") are not blocked by the mask in any case.
|
|
||||||
*/
|
|
||||||
#define LINUX_MASKABLE_INTERRUPTS \
|
|
||||||
(~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
|
|
||||||
|
|
||||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
|
||||||
/* The same macro, but for the two 32-bit SPRs separately. */
|
|
||||||
#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
|
|
||||||
#define LINUX_MASKABLE_INTERRUPTS_HI \
|
|
||||||
(~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
|
|
||||||
#include <asm/percpu.h>
|
|
||||||
#include <arch/spr_def.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set and clear kernel interrupt masks.
|
|
||||||
*
|
|
||||||
* NOTE: __insn_mtspr() is a compiler builtin marked as a memory
|
|
||||||
* clobber. We rely on it being equivalent to a compiler barrier in
|
|
||||||
* this code since arch_local_irq_save() and friends must act as
|
|
||||||
* compiler barriers. This compiler semantic is baked into enough
|
|
||||||
* places that the compiler will maintain it going forward.
|
|
||||||
*/
|
|
||||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
|
||||||
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
|
|
||||||
# error Fix assumptions about which word various interrupts are in
|
|
||||||
#endif
|
|
||||||
#define interrupt_mask_set(n) do { \
|
|
||||||
int __n = (n); \
|
|
||||||
int __mask = 1 << (__n & 0x1f); \
|
|
||||||
if (__n < 32) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
|
|
||||||
else \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
|
|
||||||
} while (0)
|
|
||||||
#define interrupt_mask_reset(n) do { \
|
|
||||||
int __n = (n); \
|
|
||||||
int __mask = 1 << (__n & 0x1f); \
|
|
||||||
if (__n < 32) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
|
|
||||||
else \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
|
|
||||||
} while (0)
|
|
||||||
#define interrupt_mask_check(n) ({ \
|
|
||||||
int __n = (n); \
|
|
||||||
(((__n < 32) ? \
|
|
||||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
|
|
||||||
__insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
|
|
||||||
>> (__n & 0x1f)) & 1; \
|
|
||||||
})
|
|
||||||
#define interrupt_mask_set_mask(mask) do { \
|
|
||||||
unsigned long long __m = (mask); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
|
|
||||||
} while (0)
|
|
||||||
#define interrupt_mask_reset_mask(mask) do { \
|
|
||||||
unsigned long long __m = (mask); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
|
|
||||||
} while (0)
|
|
||||||
#define interrupt_mask_save_mask() \
|
|
||||||
(__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
|
|
||||||
(((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
|
|
||||||
#define interrupt_mask_restore_mask(mask) do { \
|
|
||||||
unsigned long long __m = (mask); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
|
|
||||||
} while (0)
|
|
||||||
#else
|
|
||||||
#define interrupt_mask_set(n) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
|
|
||||||
#define interrupt_mask_reset(n) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
|
|
||||||
#define interrupt_mask_check(n) \
|
|
||||||
((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
|
|
||||||
#define interrupt_mask_set_mask(mask) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
|
|
||||||
#define interrupt_mask_reset_mask(mask) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
|
|
||||||
#define interrupt_mask_save_mask() \
|
|
||||||
__insn_mfspr(SPR_INTERRUPT_MASK_K)
|
|
||||||
#define interrupt_mask_restore_mask(mask) \
|
|
||||||
__insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The set of interrupts we want active if irqs are enabled.
|
|
||||||
* Note that in particular, the tile timer interrupt comes and goes
|
|
||||||
* from this set, since we have no other way to turn off the timer.
|
|
||||||
* Likewise, INTCTRL_K is removed and re-added during device
|
|
||||||
* interrupts, as is the the hardwall UDN_FIREWALL interrupt.
|
|
||||||
* We use a low bit (MEM_ERROR) as our sentinel value and make sure it
|
|
||||||
* is always claimed as an "active interrupt" so we can query that bit
|
|
||||||
* to know our current state.
|
|
||||||
*/
|
|
||||||
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
|
|
||||||
#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PREEMPT
|
|
||||||
/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
|
|
||||||
extern unsigned int debug_smp_processor_id(void);
|
|
||||||
# define smp_processor_id() debug_smp_processor_id()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Disable interrupts. */
|
|
||||||
#define arch_local_irq_disable() \
|
|
||||||
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
|
|
||||||
|
|
||||||
/* Disable all interrupts, including NMIs. */
|
|
||||||
#define arch_local_irq_disable_all() \
|
|
||||||
interrupt_mask_set_mask(-1ULL)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read the set of maskable interrupts.
|
|
||||||
* We avoid the preemption warning here via raw_cpu_ptr since even
|
|
||||||
* if irqs are already enabled, it's harmless to read the wrong cpu's
|
|
||||||
* enabled mask.
|
|
||||||
*/
|
|
||||||
#define arch_local_irqs_enabled() \
|
|
||||||
(*raw_cpu_ptr(&interrupts_enabled_mask))
|
|
||||||
|
|
||||||
/* Re-enable all maskable interrupts. */
|
|
||||||
#define arch_local_irq_enable() \
|
|
||||||
interrupt_mask_reset_mask(arch_local_irqs_enabled())
|
|
||||||
|
|
||||||
/* Disable or enable interrupts based on flag argument. */
|
|
||||||
#define arch_local_irq_restore(disabled) do { \
|
|
||||||
if (disabled) \
|
|
||||||
arch_local_irq_disable(); \
|
|
||||||
else \
|
|
||||||
arch_local_irq_enable(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* Return true if "flags" argument means interrupts are disabled. */
|
|
||||||
#define arch_irqs_disabled_flags(flags) ((flags) != 0)
|
|
||||||
|
|
||||||
/* Return true if interrupts are currently disabled. */
|
|
||||||
#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
|
|
||||||
|
|
||||||
/* Save whether interrupts are currently disabled. */
|
|
||||||
#define arch_local_save_flags() arch_irqs_disabled()
|
|
||||||
|
|
||||||
/* Save whether interrupts are currently disabled, then disable them. */
|
|
||||||
#define arch_local_irq_save() ({ \
|
|
||||||
unsigned long __flags = arch_local_save_flags(); \
|
|
||||||
arch_local_irq_disable(); \
|
|
||||||
__flags; })
|
|
||||||
|
|
||||||
/* Prevent the given interrupt from being enabled next time we enable irqs. */
|
|
||||||
#define arch_local_irq_mask(interrupt) \
|
|
||||||
this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
|
|
||||||
|
|
||||||
/* Prevent the given interrupt from being enabled immediately. */
|
|
||||||
#define arch_local_irq_mask_now(interrupt) do { \
|
|
||||||
arch_local_irq_mask(interrupt); \
|
|
||||||
interrupt_mask_set(interrupt); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* Allow the given interrupt to be enabled next time we enable irqs. */
|
|
||||||
#define arch_local_irq_unmask(interrupt) \
|
|
||||||
this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
|
|
||||||
|
|
||||||
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
|
|
||||||
#define arch_local_irq_unmask_now(interrupt) do { \
|
|
||||||
arch_local_irq_unmask(interrupt); \
|
|
||||||
if (!irqs_disabled()) \
|
|
||||||
interrupt_mask_reset(interrupt); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
/* We provide a somewhat more restricted set for assembly. */
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
#if INT_MEM_ERROR != 0
|
|
||||||
# error Fix IRQS_DISABLED() macro
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
|
|
||||||
#define IRQS_DISABLED(tmp) \
|
|
||||||
mfspr tmp, SPR_INTERRUPT_MASK_K; \
|
|
||||||
andi tmp, tmp, 1
|
|
||||||
|
|
||||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
|
||||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
|
||||||
moveli reg, hw2_last(interrupts_enabled_mask); \
|
|
||||||
shl16insli reg, reg, hw1(interrupts_enabled_mask); \
|
|
||||||
shl16insli reg, reg, hw0(interrupts_enabled_mask); \
|
|
||||||
add reg, reg, tp
|
|
||||||
|
|
||||||
/* Disable interrupts. */
|
|
||||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
|
||||||
moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
|
|
||||||
shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
|
|
||||||
shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
|
|
||||||
|
|
||||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
|
||||||
#define IRQ_DISABLE_ALL(tmp) \
|
|
||||||
movei tmp, -1; \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
|
|
||||||
|
|
||||||
/* Enable interrupts. */
|
|
||||||
#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
|
|
||||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
|
||||||
ld tmp0, tmp0
|
|
||||||
#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
|
|
||||||
|
|
||||||
#else /* !__tilegx__ */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return 0 or 1 to indicate whether interrupts are currently disabled.
|
|
||||||
* Note that it's important that we use a bit from the "low" mask word,
|
|
||||||
* since when we are enabling, that is the word we write first, so if we
|
|
||||||
* are interrupted after only writing half of the mask, the interrupt
|
|
||||||
* handler will correctly observe that we have interrupts enabled, and
|
|
||||||
* will enable interrupts itself on return from the interrupt handler
|
|
||||||
* (making the original code's write of the "high" mask word idempotent).
|
|
||||||
*/
|
|
||||||
#define IRQS_DISABLED(tmp) \
|
|
||||||
mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
|
|
||||||
shri tmp, tmp, INT_MEM_ERROR; \
|
|
||||||
andi tmp, tmp, 1
|
|
||||||
|
|
||||||
/* Load up a pointer to &interrupts_enabled_mask. */
|
|
||||||
#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
|
|
||||||
moveli reg, lo16(interrupts_enabled_mask); \
|
|
||||||
auli reg, reg, ha16(interrupts_enabled_mask); \
|
|
||||||
add reg, reg, tp
|
|
||||||
|
|
||||||
/* Disable interrupts. */
|
|
||||||
#define IRQ_DISABLE(tmp0, tmp1) \
|
|
||||||
{ \
|
|
||||||
movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
|
|
||||||
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
|
||||||
}; \
|
|
||||||
{ \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
|
|
||||||
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
|
|
||||||
}; \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
|
|
||||||
|
|
||||||
/* Disable ALL synchronous interrupts (used by NMI entry). */
|
|
||||||
#define IRQ_DISABLE_ALL(tmp) \
|
|
||||||
movei tmp, -1; \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
|
|
||||||
|
|
||||||
/* Enable interrupts. */
|
|
||||||
#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
|
|
||||||
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
|
|
||||||
{ \
|
|
||||||
lw tmp0, tmp0; \
|
|
||||||
addi tmp1, tmp0, 4 \
|
|
||||||
}; \
|
|
||||||
lw tmp1, tmp1
|
|
||||||
#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
|
|
||||||
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define IRQ_ENABLE(tmp0, tmp1) \
|
|
||||||
IRQ_ENABLE_LOAD(tmp0, tmp1); \
|
|
||||||
IRQ_ENABLE_APPLY(tmp0, tmp1)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Do the CPU's IRQ-state tracing from assembly code. We call a
|
|
||||||
* C function, but almost everywhere we do, we don't mind clobbering
|
|
||||||
* all the caller-saved registers.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
||||||
# define TRACE_IRQS_ON jal trace_hardirqs_on
|
|
||||||
# define TRACE_IRQS_OFF jal trace_hardirqs_off
|
|
||||||
#else
|
|
||||||
# define TRACE_IRQS_ON
|
|
||||||
# define TRACE_IRQS_OFF
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_IRQFLAGS_H */
|
|
|
@ -1,58 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2015 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_JUMP_LABEL_H
|
|
||||||
#define _ASM_TILE_JUMP_LABEL_H
|
|
||||||
|
|
||||||
#include <arch/opcode.h>
|
|
||||||
|
|
||||||
#define JUMP_LABEL_NOP_SIZE TILE_BUNDLE_SIZE_IN_BYTES
|
|
||||||
|
|
||||||
static __always_inline bool arch_static_branch(struct static_key *key,
|
|
||||||
bool branch)
|
|
||||||
{
|
|
||||||
asm_volatile_goto("1:\n\t"
|
|
||||||
"nop" "\n\t"
|
|
||||||
".pushsection __jump_table, \"aw\"\n\t"
|
|
||||||
".quad 1b, %l[l_yes], %0 + %1 \n\t"
|
|
||||||
".popsection\n\t"
|
|
||||||
: : "i" (key), "i" (branch) : : l_yes);
|
|
||||||
return false;
|
|
||||||
l_yes:
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline bool arch_static_branch_jump(struct static_key *key,
|
|
||||||
bool branch)
|
|
||||||
{
|
|
||||||
asm_volatile_goto("1:\n\t"
|
|
||||||
"j %l[l_yes]" "\n\t"
|
|
||||||
".pushsection __jump_table, \"aw\"\n\t"
|
|
||||||
".quad 1b, %l[l_yes], %0 + %1 \n\t"
|
|
||||||
".popsection\n\t"
|
|
||||||
: : "i" (key), "i" (branch) : : l_yes);
|
|
||||||
return false;
|
|
||||||
l_yes:
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef u64 jump_label_t;
|
|
||||||
|
|
||||||
struct jump_entry {
|
|
||||||
jump_label_t code;
|
|
||||||
jump_label_t target;
|
|
||||||
jump_label_t key;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_JUMP_LABEL_H */
|
|
|
@ -1,28 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_KDEBUG_H
|
|
||||||
#define _ASM_TILE_KDEBUG_H
|
|
||||||
|
|
||||||
#include <linux/notifier.h>
|
|
||||||
|
|
||||||
enum die_val {
|
|
||||||
DIE_OOPS = 1,
|
|
||||||
DIE_BREAK,
|
|
||||||
DIE_SSTEPBP,
|
|
||||||
DIE_PAGE_FAULT,
|
|
||||||
DIE_COMPILED_BPT
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_KDEBUG_H */
|
|
|
@ -1,65 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* based on kexec.h from other architectures in linux-2.6.18
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_KEXEC_H
|
|
||||||
#define _ASM_TILE_KEXEC_H
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
#ifndef __tilegx__
|
|
||||||
/* Maximum physical address we can use pages from. */
|
|
||||||
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
|
|
||||||
/* Maximum address we can reach in physical address mode. */
|
|
||||||
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
|
|
||||||
/* Maximum address we can use for the control code buffer. */
|
|
||||||
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
|
|
||||||
#else
|
|
||||||
/* We need to limit the memory below PGDIR_SIZE since
|
|
||||||
* we only setup page table for [0, PGDIR_SIZE) before final kexec.
|
|
||||||
*/
|
|
||||||
/* Maximum physical address we can use pages from. */
|
|
||||||
#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
|
|
||||||
/* Maximum address we can reach in physical address mode. */
|
|
||||||
#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
|
|
||||||
/* Maximum address we can use for the control code buffer. */
|
|
||||||
#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We don't bother to provide a unique identifier, since we can only
|
|
||||||
* reboot with a single type of kernel image anyway.
|
|
||||||
*/
|
|
||||||
#define KEXEC_ARCH KEXEC_ARCH_DEFAULT
|
|
||||||
|
|
||||||
/* Use the tile override for the page allocator. */
|
|
||||||
struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
|
||||||
#define kimage_alloc_pages_arch kimage_alloc_pages_arch
|
|
||||||
|
|
||||||
#define MAX_NOTE_BYTES 1024
|
|
||||||
|
|
||||||
/* Defined in arch/tile/kernel/relocate_kernel.S */
|
|
||||||
extern const unsigned char relocate_new_kernel[];
|
|
||||||
extern const unsigned long relocate_new_kernel_size;
|
|
||||||
extern void relocate_new_kernel_end(void);
|
|
||||||
|
|
||||||
/* Provide a dummy definition to avoid build failures. */
|
|
||||||
static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_KEXEC_H */
|
|
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2013 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* TILE-Gx KGDB support.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __TILE_KGDB_H__
|
|
||||||
#define __TILE_KGDB_H__
|
|
||||||
|
|
||||||
#include <linux/kdebug.h>
|
|
||||||
#include <arch/opcode.h>
|
|
||||||
|
|
||||||
#define GDB_SIZEOF_REG sizeof(unsigned long)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TILE-Gx gdb is expecting the following register layout:
|
|
||||||
* 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
|
|
||||||
* plus the PC and the faultnum.
|
|
||||||
*
|
|
||||||
* Even though kernel not use the 8 special GPRs, they need to be present
|
|
||||||
* in the registers sent for correct processing in the host-side gdb.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#define DBG_MAX_REG_NUM (56+8+2)
|
|
||||||
#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* BUFMAX defines the maximum number of characters in inbound/outbound
|
|
||||||
* buffers at least NUMREGBYTES*2 are needed for register packets,
|
|
||||||
* Longer buffer is needed to list all threads.
|
|
||||||
*/
|
|
||||||
#define BUFMAX 2048
|
|
||||||
|
|
||||||
#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Require cache flush for set/clear a software breakpoint or write memory.
|
|
||||||
*/
|
|
||||||
#define CACHE_FLUSH_IS_SAFE 1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The compiled-in breakpoint instruction can be used to "break" into
|
|
||||||
* the debugger via magic system request key (sysrq-G).
|
|
||||||
*/
|
|
||||||
static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
|
|
||||||
|
|
||||||
enum tilegx_regnum {
|
|
||||||
TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
|
|
||||||
TILEGX_FAULTNUM_REGNUM,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Generate a breakpoint exception to "break" into the debugger.
|
|
||||||
*/
|
|
||||||
static inline void arch_kgdb_breakpoint(void)
|
|
||||||
{
|
|
||||||
asm volatile (".quad %0\n\t"
|
|
||||||
::""(compiled_bpt));
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __TILE_KGDB_H__ */
|
|
|
@ -1,28 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_KMAP_TYPES_H
|
|
||||||
#define _ASM_TILE_KMAP_TYPES_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In 32-bit TILE Linux we have to balance the desire to have a lot of
|
|
||||||
* nested atomic mappings with the fact that large page sizes and many
|
|
||||||
* processors chew up address space quickly. In a typical
|
|
||||||
* 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
|
|
||||||
* adds 4MB of required address-space. For now we leave KM_TYPE_NR
|
|
||||||
* set to depth 8.
|
|
||||||
*/
|
|
||||||
#define KM_TYPE_NR 8
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_KMAP_TYPES_H */
|
|
|
@ -1,83 +0,0 @@
|
||||||
/*
|
|
||||||
* arch/tile/include/asm/kprobes.h
|
|
||||||
*
|
|
||||||
* Copyright 2012 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_KPROBES_H
|
|
||||||
#define _ASM_TILE_KPROBES_H
|
|
||||||
|
|
||||||
#include <asm-generic/kprobes.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_KPROBES
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/ptrace.h>
|
|
||||||
#include <linux/percpu.h>
|
|
||||||
#include <arch/opcode.h>
|
|
||||||
|
|
||||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
|
||||||
#define MAX_INSN_SIZE 2
|
|
||||||
|
|
||||||
#define kretprobe_blacklist_size 0
|
|
||||||
|
|
||||||
typedef tile_bundle_bits kprobe_opcode_t;
|
|
||||||
|
|
||||||
#define flush_insn_slot(p) \
|
|
||||||
flush_icache_range((unsigned long)p->addr, \
|
|
||||||
(unsigned long)p->addr + \
|
|
||||||
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
|
|
||||||
|
|
||||||
struct kprobe;
|
|
||||||
|
|
||||||
/* Architecture specific copy of original instruction. */
|
|
||||||
struct arch_specific_insn {
|
|
||||||
kprobe_opcode_t *insn;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct prev_kprobe {
|
|
||||||
struct kprobe *kp;
|
|
||||||
unsigned long status;
|
|
||||||
unsigned long saved_pc;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define MAX_JPROBES_STACK_SIZE 128
|
|
||||||
#define MAX_JPROBES_STACK_ADDR \
|
|
||||||
(((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
|
|
||||||
- sizeof(struct pt_regs))
|
|
||||||
|
|
||||||
#define MIN_JPROBES_STACK_SIZE(ADDR) \
|
|
||||||
((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
|
|
||||||
? MAX_JPROBES_STACK_ADDR - (ADDR) \
|
|
||||||
: MAX_JPROBES_STACK_SIZE)
|
|
||||||
|
|
||||||
/* per-cpu kprobe control block. */
|
|
||||||
struct kprobe_ctlblk {
|
|
||||||
unsigned long kprobe_status;
|
|
||||||
unsigned long kprobe_saved_pc;
|
|
||||||
unsigned long jprobe_saved_sp;
|
|
||||||
struct prev_kprobe prev_kprobe;
|
|
||||||
struct pt_regs jprobe_saved_regs;
|
|
||||||
char jprobes_stack[MAX_JPROBES_STACK_SIZE];
|
|
||||||
};
|
|
||||||
|
|
||||||
extern tile_bundle_bits breakpoint2_insn;
|
|
||||||
extern tile_bundle_bits breakpoint_insn;
|
|
||||||
|
|
||||||
void arch_remove_kprobe(struct kprobe *);
|
|
||||||
|
|
||||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|
||||||
unsigned long val, void *data);
|
|
||||||
|
|
||||||
#endif /* CONFIG_KPROBES */
|
|
||||||
#endif /* _ASM_TILE_KPROBES_H */
|
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_LINKAGE_H
|
|
||||||
#define _ASM_TILE_LINKAGE_H
|
|
||||||
|
|
||||||
#include <feedback.h>
|
|
||||||
|
|
||||||
#define __ALIGN .align 8
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The STD_ENTRY and STD_ENDPROC macros put the function in a
|
|
||||||
* self-named .text.foo section, and if linker feedback collection
|
|
||||||
* is enabled, add a suitable call to the feedback collection code.
|
|
||||||
* STD_ENTRY_SECTION lets you specify a non-standard section name.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define STD_ENTRY(name) \
|
|
||||||
.pushsection .text.##name, "ax"; \
|
|
||||||
ENTRY(name); \
|
|
||||||
FEEDBACK_ENTER(name)
|
|
||||||
|
|
||||||
#define STD_ENTRY_SECTION(name, section) \
|
|
||||||
.pushsection section, "ax"; \
|
|
||||||
ENTRY(name); \
|
|
||||||
FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name)
|
|
||||||
|
|
||||||
#define STD_ENDPROC(name) \
|
|
||||||
ENDPROC(name); \
|
|
||||||
.Lend_##name:; \
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
/* Create a file-static function entry set up for feedback gathering. */
|
|
||||||
#define STD_ENTRY_LOCAL(name) \
|
|
||||||
.pushsection .text.##name, "ax"; \
|
|
||||||
ALIGN; \
|
|
||||||
name:; \
|
|
||||||
FEEDBACK_ENTER(name)
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_LINKAGE_H */
|
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_MMU_H
|
|
||||||
#define _ASM_TILE_MMU_H
|
|
||||||
|
|
||||||
/* Capture any arch- and mm-specific information. */
|
|
||||||
struct mm_context {
|
|
||||||
/*
|
|
||||||
* Written under the mmap_sem semaphore; read without the
|
|
||||||
* semaphore but atomically, but it is conservatively set.
|
|
||||||
*/
|
|
||||||
unsigned long priority_cached;
|
|
||||||
unsigned long vdso_base;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct mm_context mm_context_t;
|
|
||||||
|
|
||||||
void leave_mm(int cpu);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_MMU_H */
|
|
|
@ -1,137 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_MMU_CONTEXT_H
|
|
||||||
#define _ASM_TILE_MMU_CONTEXT_H
|
|
||||||
|
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/mm_types.h>
|
|
||||||
|
|
||||||
#include <asm/setup.h>
|
|
||||||
#include <asm/page.h>
|
|
||||||
#include <asm/pgalloc.h>
|
|
||||||
#include <asm/pgtable.h>
|
|
||||||
#include <asm/tlbflush.h>
|
|
||||||
#include <asm/homecache.h>
|
|
||||||
#include <asm-generic/mm_hooks.h>
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
|
|
||||||
* also call hv_install_context().
|
|
||||||
*/
|
|
||||||
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
|
|
||||||
{
|
|
||||||
/* FIXME: DIRECTIO should not always be set. FIXME. */
|
|
||||||
int rc = hv_install_context(__pa(pgdir), prot, asid,
|
|
||||||
HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
|
|
||||||
if (rc < 0)
|
|
||||||
panic("hv_install_context failed: %d", rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void install_page_table(pgd_t *pgdir, int asid)
|
|
||||||
{
|
|
||||||
pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
|
|
||||||
__install_page_table(pgdir, asid, *ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* "Lazy" TLB mode is entered when we are switching to a kernel task,
|
|
||||||
* which borrows the mm of the previous task. The goal of this
|
|
||||||
* optimization is to avoid having to install a new page table. On
|
|
||||||
* early x86 machines (where the concept originated) you couldn't do
|
|
||||||
* anything short of a full page table install for invalidation, so
|
|
||||||
* handling a remote TLB invalidate required doing a page table
|
|
||||||
* re-install. Someone clearly decided that it was silly to keep
|
|
||||||
* doing this while in "lazy" TLB mode, so the optimization involves
|
|
||||||
* installing the swapper page table instead the first time one
|
|
||||||
* occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running
|
|
||||||
* the kernel task doesn't need to take any more interrupts. At that
|
|
||||||
* point it's then necessary to explicitly reinstall it when context
|
|
||||||
* switching back to the original mm.
|
|
||||||
*
|
|
||||||
* On Tile, we have to do a page-table install whenever DMA is enabled,
|
|
||||||
* so in that case lazy mode doesn't help anyway. And more generally,
|
|
||||||
* we have efficient per-page TLB shootdown, and don't expect to spend
|
|
||||||
* that much time in kernel tasks in general, so just leaving the
|
|
||||||
* kernel task borrowing the old page table, but handling TLB
|
|
||||||
* shootdowns, is a reasonable thing to do. And importantly, this
|
|
||||||
* lets us use the hypervisor's internal APIs for TLB shootdown, which
|
|
||||||
* means we don't have to worry about having TLB shootdowns blocked
|
|
||||||
* when Linux is disabling interrupts; see the page migration code for
|
|
||||||
* an example of where it's important for TLB shootdowns to complete
|
|
||||||
* even when interrupts are disabled at the Linux level.
|
|
||||||
*/
|
|
||||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
|
|
||||||
{
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
|
||||||
/*
|
|
||||||
* We have to do an "identity" page table switch in order to
|
|
||||||
* clear any pending DMA interrupts.
|
|
||||||
*/
|
|
||||||
if (current->thread.tile_dma_state.enabled)
|
|
||||||
install_page_table(mm->pgd, __this_cpu_read(current_asid));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
||||||
struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
if (likely(prev != next)) {
|
|
||||||
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
/* Pick new ASID. */
|
|
||||||
int asid = __this_cpu_read(current_asid) + 1;
|
|
||||||
if (asid > max_asid) {
|
|
||||||
asid = min_asid;
|
|
||||||
local_flush_tlb();
|
|
||||||
}
|
|
||||||
__this_cpu_write(current_asid, asid);
|
|
||||||
|
|
||||||
/* Clear cpu from the old mm, and set it in the new one. */
|
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
||||||
|
|
||||||
/* Re-load page tables */
|
|
||||||
install_page_table(next->pgd, asid);
|
|
||||||
|
|
||||||
/* See how we should set the red/black cache info */
|
|
||||||
check_mm_caching(prev, next);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Since we're changing to a new mm, we have to flush
|
|
||||||
* the icache in case some physical page now being mapped
|
|
||||||
* has subsequently been repurposed and has new code.
|
|
||||||
*/
|
|
||||||
__flush_icache();
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void activate_mm(struct mm_struct *prev_mm,
|
|
||||||
struct mm_struct *next_mm)
|
|
||||||
{
|
|
||||||
switch_mm(prev_mm, next_mm, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define destroy_context(mm) do { } while (0)
|
|
||||||
#define deactivate_mm(tsk, mm) do { } while (0)
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_MMU_CONTEXT_H */
|
|
|
@ -1,70 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_MMZONE_H
|
|
||||||
#define _ASM_TILE_MMZONE_H
|
|
||||||
|
|
||||||
extern struct pglist_data node_data[];
|
|
||||||
#define NODE_DATA(nid) (&node_data[nid])
|
|
||||||
|
|
||||||
extern void get_memcfg_numa(void);
|
|
||||||
|
|
||||||
#ifdef CONFIG_DISCONTIGMEM
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Generally, memory ranges are always doled out by the hypervisor in
|
|
||||||
* fixed-size, power-of-two increments. That would make computing the node
|
|
||||||
* very easy. We could just take a couple high bits of the PA, which
|
|
||||||
* denote the memory shim, and we'd be done. However, when we're doing
|
|
||||||
* memory striping, this may not be true; PAs with different high bit
|
|
||||||
* values might be in the same node. Thus, we keep a lookup table to
|
|
||||||
* translate the high bits of the PFN to the node number.
|
|
||||||
*/
|
|
||||||
extern int highbits_to_node[];
|
|
||||||
|
|
||||||
static inline int pfn_to_nid(unsigned long pfn)
|
|
||||||
{
|
|
||||||
return highbits_to_node[__pfn_to_highbits(pfn)];
|
|
||||||
}
|
|
||||||
|
|
||||||
#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
|
|
||||||
|
|
||||||
static inline int pfn_valid(unsigned long pfn)
|
|
||||||
{
|
|
||||||
int nid = pfn_to_nid(pfn);
|
|
||||||
|
|
||||||
if (nid >= 0)
|
|
||||||
return (pfn < node_end_pfn(nid));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Information on the NUMA nodes that we compute early */
|
|
||||||
extern unsigned long node_start_pfn[];
|
|
||||||
extern unsigned long node_end_pfn[];
|
|
||||||
extern unsigned long node_memmap_pfn[];
|
|
||||||
extern unsigned long node_percpu_pfn[];
|
|
||||||
extern unsigned long node_free_pfn[];
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
extern unsigned long node_lowmem_end_pfn[];
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_PCI
|
|
||||||
extern unsigned long pci_reserve_start_pfn;
|
|
||||||
extern unsigned long pci_reserve_end_pfn;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* CONFIG_DISCONTIGMEM */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_MMZONE_H */
|
|
|
@ -1,40 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_MODULE_H
|
|
||||||
#define _ASM_TILE_MODULE_H
|
|
||||||
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
#include <asm-generic/module.h>
|
|
||||||
|
|
||||||
/* We can't use modules built with different page sizes. */
|
|
||||||
#if defined(CONFIG_PAGE_SIZE_16KB)
|
|
||||||
# define MODULE_PGSZ " 16KB"
|
|
||||||
#elif defined(CONFIG_PAGE_SIZE_64KB)
|
|
||||||
# define MODULE_PGSZ " 64KB"
|
|
||||||
#else
|
|
||||||
# define MODULE_PGSZ ""
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* We don't really support no-SMP so tag if someone tries. */
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
#define MODULE_NOSMP ""
|
|
||||||
#else
|
|
||||||
#define MODULE_NOSMP " nosmp"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_MODULE_H */
|
|
|
@ -1,345 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PAGE_H
|
|
||||||
#define _ASM_TILE_PAGE_H
|
|
||||||
|
|
||||||
#include <linux/const.h>
|
|
||||||
#include <hv/hypervisor.h>
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
|
|
||||||
#if defined(CONFIG_PAGE_SIZE_4KB) /* tilepro only */
|
|
||||||
#define PAGE_SHIFT 12
|
|
||||||
#define CTX_PAGE_FLAG HV_CTX_PG_SM_4K
|
|
||||||
#elif defined(CONFIG_PAGE_SIZE_16KB)
|
|
||||||
#define PAGE_SHIFT 14
|
|
||||||
#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
|
|
||||||
#elif defined(CONFIG_PAGE_SIZE_64KB)
|
|
||||||
#define PAGE_SHIFT 16
|
|
||||||
#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
|
|
||||||
#else
|
|
||||||
#error Page size not specified in Kconfig
|
|
||||||
#endif
|
|
||||||
#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
|
|
||||||
|
|
||||||
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
|
||||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
|
||||||
|
|
||||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
|
||||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the Kconfig doesn't specify, set a maximum zone order that
|
|
||||||
* is enough so that we can create huge pages from small pages given
|
|
||||||
* the respective sizes of the two page types. See <linux/mmzone.h>.
|
|
||||||
*/
|
|
||||||
#ifndef CONFIG_FORCE_MAX_ZONEORDER
|
|
||||||
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
|
|
||||||
struct page;
|
|
||||||
|
|
||||||
static inline void clear_page(void *page)
|
|
||||||
{
|
|
||||||
memset(page, 0, PAGE_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void copy_page(void *to, void *from)
|
|
||||||
{
|
|
||||||
memcpy(to, from, PAGE_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void clear_user_page(void *page, unsigned long vaddr,
|
|
||||||
struct page *pg)
|
|
||||||
{
|
|
||||||
clear_page(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
|
||||||
struct page *topage)
|
|
||||||
{
|
|
||||||
copy_page(to, from);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Hypervisor page tables are made of the same basic structure.
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef HV_PTE pte_t;
|
|
||||||
typedef HV_PTE pgd_t;
|
|
||||||
typedef HV_PTE pgprot_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* User L2 page tables are managed as one L2 page table per page,
|
|
||||||
* because we use the page allocator for them. This keeps the allocation
|
|
||||||
* simple, but it's also inefficient, since L2 page tables are much smaller
|
|
||||||
* than pages (currently 2KB vs 64KB). So we should revisit this.
|
|
||||||
*/
|
|
||||||
typedef struct page *pgtable_t;
|
|
||||||
|
|
||||||
/* Must be a macro since it is used to create constants. */
|
|
||||||
#define __pgprot(val) hv_pte(val)
|
|
||||||
|
|
||||||
/* Rarely-used initializers, typically with a "zero" value. */
|
|
||||||
#define __pte(x) hv_pte(x)
|
|
||||||
#define __pgd(x) hv_pte(x)
|
|
||||||
|
|
||||||
static inline u64 pgprot_val(pgprot_t pgprot)
|
|
||||||
{
|
|
||||||
return hv_pte_val(pgprot);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 pte_val(pte_t pte)
|
|
||||||
{
|
|
||||||
return hv_pte_val(pte);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 pgd_val(pgd_t pgd)
|
|
||||||
{
|
|
||||||
return hv_pte_val(pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
typedef HV_PTE pmd_t;
|
|
||||||
|
|
||||||
#define __pmd(x) hv_pte(x)
|
|
||||||
|
|
||||||
static inline u64 pmd_val(pmd_t pmd)
|
|
||||||
{
|
|
||||||
return hv_pte_val(pmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline __attribute_const__ int get_order(unsigned long size)
|
|
||||||
{
|
|
||||||
return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
|
||||||
|
|
||||||
#define HUGE_MAX_HSTATE 6
|
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
|
||||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Allow overriding how much VA or PA the kernel will use. */
|
|
||||||
#define MAX_PA_WIDTH CHIP_PA_WIDTH()
|
|
||||||
#define MAX_VA_WIDTH CHIP_VA_WIDTH()
|
|
||||||
|
|
||||||
/* Each memory controller has PAs distinct in their high bits. */
|
|
||||||
#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
|
|
||||||
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
|
|
||||||
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
|
|
||||||
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We reserve the lower half of memory for user-space programs, and the
|
|
||||||
* upper half for system code. We re-map all of physical memory in the
|
|
||||||
* upper half, which takes a quarter of our VA space. Then we have
|
|
||||||
* the vmalloc regions. The supervisor code lives at the highest address,
|
|
||||||
* with the hypervisor above that.
|
|
||||||
*
|
|
||||||
* Loadable kernel modules are placed immediately after the static
|
|
||||||
* supervisor code, with each being allocated a 256MB region of
|
|
||||||
* address space, so we don't have to worry about the range of "jal"
|
|
||||||
* and other branch instructions.
|
|
||||||
*
|
|
||||||
* For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
|
|
||||||
* Similarly, for now we don't play any struct page mapping games.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
|
|
||||||
# error Too much PA to map with the VA available!
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
|
|
||||||
#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
|
|
||||||
#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
|
|
||||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
|
|
||||||
#define _VMALLOC_START FIXADDR_TOP
|
|
||||||
#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
|
|
||||||
#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
|
|
||||||
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
|
|
||||||
|
|
||||||
#else /* !__tilegx__ */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A PAGE_OFFSET of 0xC0000000 means that the kernel has
|
|
||||||
* a virtual address space of one gigabyte, which limits the
|
|
||||||
* amount of physical memory you can use to about 768MB.
|
|
||||||
* If you want more physical memory than this then see the CONFIG_HIGHMEM
|
|
||||||
* option in the kernel configuration.
|
|
||||||
*
|
|
||||||
* The top 16MB chunk in the table below is unavailable to Linux. Since
|
|
||||||
* the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
|
|
||||||
* (depending on whether the kernel is at PL2 or Pl1), we map all of the
|
|
||||||
* bottom of RAM at this address with a huge page table entry to minimize
|
|
||||||
* its ITLB footprint (as well as at PAGE_OFFSET). The last architected
|
|
||||||
* requirement is that user interrupt vectors live at 0xfc000000, so we
|
|
||||||
* make that range of memory available to user processes. The remaining
|
|
||||||
* regions are sized as shown; the first four addresses use the PL 1
|
|
||||||
* values, and after that, we show "typical" values, since the actual
|
|
||||||
* addresses depend on kernel #defines.
|
|
||||||
*
|
|
||||||
* MEM_HV_START 0xfe000000
|
|
||||||
* MEM_SV_START (kernel code) 0xfd000000
|
|
||||||
* MEM_USER_INTRPT (user vector) 0xfc000000
|
|
||||||
* FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
|
|
||||||
* PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
|
|
||||||
* VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
|
|
||||||
* mapped LOWMEM 0xc0000000
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
|
|
||||||
#define MEM_SV_START _AC(0xfd000000, UL)
|
|
||||||
#define MEM_HV_START _AC(0xfe000000, UL)
|
|
||||||
|
|
||||||
#define INTRPT_SIZE 0x4000
|
|
||||||
|
|
||||||
/* Tolerate page size larger than the architecture interrupt region size. */
|
|
||||||
#if PAGE_SIZE > INTRPT_SIZE
|
|
||||||
#undef INTRPT_SIZE
|
|
||||||
#define INTRPT_SIZE PAGE_SIZE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
|
|
||||||
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
|
|
||||||
|
|
||||||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
|
||||||
|
|
||||||
/* On 32-bit architectures we mix kernel modules in with other vmaps. */
|
|
||||||
#define MEM_MODULE_START VMALLOC_START
|
|
||||||
#define MEM_MODULE_END VMALLOC_END
|
|
||||||
|
|
||||||
#endif /* __tilegx__ */
|
|
||||||
|
|
||||||
#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
|
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
|
|
||||||
/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
|
|
||||||
extern unsigned long pbase_map[];
|
|
||||||
extern void *vbase_map[];
|
|
||||||
|
|
||||||
static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr)
|
|
||||||
{
|
|
||||||
unsigned long kaddr = (unsigned long)_kaddr;
|
|
||||||
return pbase_map[kaddr >> HPAGE_SHIFT] +
|
|
||||||
((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
|
||||||
{
|
|
||||||
return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
|
||||||
{
|
|
||||||
unsigned long pfn = kaddr_to_pfn(kaddr);
|
|
||||||
return ((phys_addr_t)pfn << PAGE_SHIFT) +
|
|
||||||
((unsigned long)kaddr & (PAGE_SIZE-1));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
|
||||||
{
|
|
||||||
return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
|
|
||||||
static inline int virt_addr_valid(const volatile void *kaddr)
|
|
||||||
{
|
|
||||||
extern void *high_memory; /* copied from <linux/mm.h> */
|
|
||||||
return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* !CONFIG_HIGHMEM */
|
|
||||||
|
|
||||||
static inline unsigned long kaddr_to_pfn(const volatile void *kaddr)
|
|
||||||
{
|
|
||||||
return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *pfn_to_kaddr(unsigned long pfn)
|
|
||||||
{
|
|
||||||
return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline phys_addr_t virt_to_phys(const volatile void *kaddr)
|
|
||||||
{
|
|
||||||
return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *phys_to_virt(phys_addr_t paddr)
|
|
||||||
{
|
|
||||||
return (void *)((unsigned long)paddr + PAGE_OFFSET);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check that the given address is within some mapped range of PAs. */
|
|
||||||
#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
|
|
||||||
|
|
||||||
#endif /* !CONFIG_HIGHMEM */
|
|
||||||
|
|
||||||
/* All callers are not consistent in how they call these functions. */
|
|
||||||
#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
|
|
||||||
#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
|
|
||||||
|
|
||||||
extern int devmem_is_allowed(unsigned long pagenr);
|
|
||||||
|
|
||||||
#ifdef CONFIG_FLATMEM
|
|
||||||
static inline int pfn_valid(unsigned long pfn)
|
|
||||||
{
|
|
||||||
return pfn < max_mapnr;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Provide as macros since these require some other headers included. */
|
|
||||||
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
|
|
||||||
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
|
|
||||||
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The kernel text is mapped at MEM_SV_START as read-only. To allow
|
|
||||||
* modifying kernel text, it is also mapped at PAGE_OFFSET as read-write.
|
|
||||||
* This macro converts a kernel address to its writable kernel text mapping,
|
|
||||||
* which is used to modify the text code on a running kernel by kgdb,
|
|
||||||
* ftrace, kprobe, jump label, etc.
|
|
||||||
*/
|
|
||||||
#define ktext_writable_addr(kaddr) \
|
|
||||||
((unsigned long)(kaddr) - MEM_SV_START + PAGE_OFFSET)
|
|
||||||
|
|
||||||
struct mm_struct;
|
|
||||||
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
|
||||||
extern pte_t *virt_to_kpte(unsigned long kaddr);
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#define VM_DATA_DEFAULT_FLAGS \
|
|
||||||
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
||||||
|
|
||||||
#include <asm-generic/memory_model.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PAGE_H */
|
|
|
@ -1,229 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PCI_H
|
|
||||||
#define _ASM_TILE_PCI_H
|
|
||||||
|
|
||||||
#include <linux/dma-mapping.h>
|
|
||||||
#include <linux/pci.h>
|
|
||||||
#include <asm-generic/pci_iomap.h>
|
|
||||||
|
|
||||||
#ifndef __tilegx__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Structure of a PCI controller (host bridge)
|
|
||||||
*/
|
|
||||||
struct pci_controller {
|
|
||||||
int index; /* PCI domain number */
|
|
||||||
struct pci_bus *root_bus;
|
|
||||||
|
|
||||||
int last_busno;
|
|
||||||
|
|
||||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
|
||||||
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
|
|
||||||
|
|
||||||
struct pci_ops *ops;
|
|
||||||
|
|
||||||
int irq_base; /* Base IRQ from the Hypervisor */
|
|
||||||
int plx_gen1; /* flag for PLX Gen 1 configuration */
|
|
||||||
|
|
||||||
/* Address ranges that are routed to this controller/bridge. */
|
|
||||||
struct resource mem_resources[3];
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This flag tells if the platform is TILEmpower that needs
|
|
||||||
* special configuration for the PLX switch chip.
|
|
||||||
*/
|
|
||||||
extern int tile_plx_gen1;
|
|
||||||
|
|
||||||
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
|
|
||||||
|
|
||||||
#define TILE_NUM_PCIE 2
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The hypervisor maps the entirety of CPA-space as bus addresses, so
|
|
||||||
* bus addresses are physical addresses. The networking and block
|
|
||||||
* device layers use this boolean for bounce buffer decisions.
|
|
||||||
*/
|
|
||||||
#define PCI_DMA_BUS_IS_PHYS 1
|
|
||||||
|
|
||||||
/* generic pci stuff */
|
|
||||||
#include <asm-generic/pci.h>
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#include <asm/page.h>
|
|
||||||
#include <gxio/trio.h>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* We reserve the hugepage-size address range at the top of the 64-bit address
|
|
||||||
* space to serve as the PCI window, emulating the BAR0 space of an endpoint
|
|
||||||
* device. This window is used by the chip-to-chip applications running on
|
|
||||||
* the RC node. The reason for carving out this window is that Mem-Maps that
|
|
||||||
* back up this window will not overlap with those that map the real physical
|
|
||||||
* memory.
|
|
||||||
*/
|
|
||||||
#define PCIE_HOST_BAR0_SIZE HPAGE_SIZE
|
|
||||||
#define PCIE_HOST_BAR0_START HPAGE_MASK
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The first PAGE_SIZE of the above "BAR" window is mapped to the
|
|
||||||
* gxpci_host_regs structure.
|
|
||||||
*/
|
|
||||||
#define PCIE_HOST_REGS_SIZE PAGE_SIZE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the PCI address where the Mem-Map interrupt regions start.
|
|
||||||
* We use the 2nd to the last huge page of the 64-bit address space.
|
|
||||||
* The last huge page is used for the rootcomplex "bar", for C2C purpose.
|
|
||||||
*/
|
|
||||||
#define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Each Mem-Map interrupt region occupies 4KB.
|
|
||||||
*/
|
|
||||||
#define MEM_MAP_INTR_REGION_SIZE (1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate the PCI BAR window right below 4GB.
|
|
||||||
*/
|
|
||||||
#define TILE_PCI_BAR_WINDOW_TOP (1ULL << 32)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate 1GB for the PCI BAR window.
|
|
||||||
*/
|
|
||||||
#define TILE_PCI_BAR_WINDOW_SIZE (1 << 30)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the highest bus address targeting the host memory that
|
|
||||||
* can be generated by legacy PCI devices with 32-bit or less
|
|
||||||
* DMA capability, dictated by the BAR window size and location.
|
|
||||||
*/
|
|
||||||
#define TILE_PCI_MAX_DIRECT_DMA_ADDRESS \
|
|
||||||
(TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We shift the PCI bus range for all the physical memory up by the whole PA
|
|
||||||
* range. The corresponding CPA of an incoming PCI request will be the PCI
|
|
||||||
* address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies
|
|
||||||
* that the 64-bit capable devices will be given DMA addresses as
|
|
||||||
* the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
|
|
||||||
* devices, we create a separate map region that handles the low
|
|
||||||
* 4GB.
|
|
||||||
*
|
|
||||||
* This design lets us avoid the "PCI hole" problem where the host bridge
|
|
||||||
* won't pass DMA traffic with target addresses that happen to fall within the
|
|
||||||
* BAR space. This enables us to use all the physical memory for DMA, instead
|
|
||||||
* of wasting the same amount of physical memory as the BAR window size.
|
|
||||||
*/
|
|
||||||
#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Start of the PCI memory resource, which starts at the end of the
|
|
||||||
* maximum system physical RAM address.
|
|
||||||
*/
|
|
||||||
#define TILE_PCI_MEM_START (1ULL << CHIP_PA_WIDTH())
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Structure of a PCI controller (host bridge) on Gx.
|
|
||||||
*/
|
|
||||||
struct pci_controller {
|
|
||||||
|
|
||||||
/* Pointer back to the TRIO that this PCIe port is connected to. */
|
|
||||||
gxio_trio_context_t *trio;
|
|
||||||
int mac; /* PCIe mac index on the TRIO shim */
|
|
||||||
int trio_index; /* Index of TRIO shim that contains the MAC. */
|
|
||||||
|
|
||||||
int pio_mem_index; /* PIO region index for memory access */
|
|
||||||
|
|
||||||
#ifdef CONFIG_TILE_PCI_IO
|
|
||||||
int pio_io_index; /* PIO region index for I/O space access */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mem-Map regions for all the memory controllers so that Linux can
|
|
||||||
* map all of its physical memory space to the PCI bus.
|
|
||||||
*/
|
|
||||||
int mem_maps[MAX_NUMNODES];
|
|
||||||
|
|
||||||
int index; /* PCI domain number */
|
|
||||||
struct pci_bus *root_bus;
|
|
||||||
|
|
||||||
/* PCI I/O space resource for this controller. */
|
|
||||||
struct resource io_space;
|
|
||||||
char io_space_name[32];
|
|
||||||
|
|
||||||
/* PCI memory space resource for this controller. */
|
|
||||||
struct resource mem_space;
|
|
||||||
char mem_space_name[32];
|
|
||||||
|
|
||||||
uint64_t mem_offset; /* cpu->bus memory mapping offset. */
|
|
||||||
|
|
||||||
int first_busno;
|
|
||||||
|
|
||||||
struct pci_ops *ops;
|
|
||||||
|
|
||||||
/* Table that maps the INTx numbers to Linux irq numbers. */
|
|
||||||
int irq_intx_table[4];
|
|
||||||
};
|
|
||||||
|
|
||||||
extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
|
|
||||||
extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
|
|
||||||
extern int num_trio_shims;
|
|
||||||
|
|
||||||
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The PCI address space does not equal the physical memory address
|
|
||||||
* space (we have an IOMMU). The IDE and SCSI device layers use this
|
|
||||||
* boolean for bounce buffer decisions.
|
|
||||||
*/
|
|
||||||
#define PCI_DMA_BUS_IS_PHYS 0
|
|
||||||
|
|
||||||
#endif /* __tilegx__ */
|
|
||||||
|
|
||||||
int __init tile_pci_init(void);
|
|
||||||
int __init pcibios_init(void);
|
|
||||||
|
|
||||||
void pcibios_fixup_bus(struct pci_bus *bus);
|
|
||||||
|
|
||||||
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This decides whether to display the domain number in /proc.
|
|
||||||
*/
|
|
||||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* pcibios_assign_all_busses() tells whether or not the bus numbers
|
|
||||||
* should be reassigned, in case the BIOS didn't do it correctly, or
|
|
||||||
* in case we don't have a BIOS and we want to let Linux do it.
|
|
||||||
*/
|
|
||||||
static inline int pcibios_assign_all_busses(void)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define PCIBIOS_MIN_MEM 0
|
|
||||||
/* Minimum PCI I/O address, starting at the page boundary. */
|
|
||||||
#define PCIBIOS_MIN_IO PAGE_SIZE
|
|
||||||
|
|
||||||
/* Use any cpu for PCI. */
|
|
||||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PCI_H */
|
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PERCPU_H
|
|
||||||
#define _ASM_TILE_PERCPU_H
|
|
||||||
|
|
||||||
register unsigned long my_cpu_offset_reg asm("tp");
|
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
|
||||||
/*
|
|
||||||
* For full preemption, we can't just use the register variable
|
|
||||||
* directly, since we need barrier() to hazard against it, causing the
|
|
||||||
* compiler to reload anything computed from a previous "tp" value.
|
|
||||||
* But we also don't want to use volatile asm, since we'd like the
|
|
||||||
* compiler to be able to cache the value across multiple percpu reads.
|
|
||||||
* So we use a fake stack read as a hazard against barrier().
|
|
||||||
* The 'U' constraint is like 'm' but disallows postincrement.
|
|
||||||
*/
|
|
||||||
static inline unsigned long __my_cpu_offset(void)
|
|
||||||
{
|
|
||||||
unsigned long tp;
|
|
||||||
register unsigned long *sp asm("sp");
|
|
||||||
asm("move %0, tp" : "=r" (tp) : "U" (*sp));
|
|
||||||
return tp;
|
|
||||||
}
|
|
||||||
#define __my_cpu_offset __my_cpu_offset()
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* We don't need to hazard against barrier() since "tp" doesn't ever
|
|
||||||
* change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
|
|
||||||
* changes at function call points, at which we are already re-reading
|
|
||||||
* the value of "tp" due to "my_cpu_offset_reg" being a global variable.
|
|
||||||
*/
|
|
||||||
#define __my_cpu_offset my_cpu_offset_reg
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
|
|
||||||
|
|
||||||
#include <asm-generic/percpu.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PERCPU_H */
|
|
|
@ -1,22 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2014 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PERF_EVENT_H
|
|
||||||
#define _ASM_TILE_PERF_EVENT_H
|
|
||||||
|
|
||||||
#include <linux/percpu.h>
|
|
||||||
DECLARE_PER_CPU(u64, perf_irqs);
|
|
||||||
|
|
||||||
unsigned long handle_syscall_link_address(void);
|
|
||||||
#endif /* _ASM_TILE_PERF_EVENT_H */
|
|
|
@ -1,164 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PGALLOC_H
|
|
||||||
#define _ASM_TILE_PGALLOC_H
|
|
||||||
|
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/mmzone.h>
|
|
||||||
#include <asm/fixmap.h>
|
|
||||||
#include <asm/page.h>
|
|
||||||
#include <hv/hypervisor.h>
|
|
||||||
|
|
||||||
/* Bits for the size of the second-level page table. */
|
|
||||||
#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
|
|
||||||
/* How big is a kernel L2 page table? */
|
|
||||||
#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
|
|
||||||
|
|
||||||
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
|
|
||||||
#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
|
|
||||||
#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
|
|
||||||
#else
|
|
||||||
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* How many pages do we need, as an "order", for a user L2 page table? */
|
|
||||||
#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
|
|
||||||
|
|
||||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_64BIT
|
|
||||||
set_pte(pmdp, pmd);
|
|
||||||
#else
|
|
||||||
set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
|
||||||
pmd_t *pmd, pte_t *ptep)
|
|
||||||
{
|
|
||||||
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
|
|
||||||
__pgprot(_PAGE_PRESENT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
||||||
pgtable_t page)
|
|
||||||
{
|
|
||||||
set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
|
|
||||||
__pgprot(_PAGE_PRESENT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate and free page tables.
|
|
||||||
*/
|
|
||||||
|
|
||||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
|
||||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
|
||||||
|
|
||||||
extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
|
|
||||||
int order);
|
|
||||||
extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
|
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|
||||||
unsigned long address)
|
|
||||||
{
|
|
||||||
return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
|
||||||
{
|
|
||||||
pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
||||||
|
|
||||||
static inline pte_t *
|
|
||||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
|
||||||
{
|
|
||||||
return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
||||||
{
|
|
||||||
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
|
|
||||||
pte_free(mm, virt_to_page(pte));
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
|
||||||
unsigned long address, int order);
|
|
||||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
|
|
||||||
unsigned long address)
|
|
||||||
{
|
|
||||||
__pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define check_pgt_cache() do { } while (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Get the small-page pte_t lowmem entry for a given pfn.
|
|
||||||
* This may or may not be in use, depending on whether the initial
|
|
||||||
* huge-page entry for the page has already been shattered.
|
|
||||||
*/
|
|
||||||
pte_t *get_prealloc_pte(unsigned long pfn);
|
|
||||||
|
|
||||||
/* During init, we can shatter kernel huge pages if needed. */
|
|
||||||
void shatter_pmd(pmd_t *pmd);
|
|
||||||
|
|
||||||
/* After init, a more complex technique is required. */
|
|
||||||
void shatter_huge_page(unsigned long addr);
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
|
|
||||||
#define pud_populate(mm, pud, pmd) \
|
|
||||||
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
|
|
||||||
|
|
||||||
/* Bits for the size of the L1 (intermediate) page table. */
|
|
||||||
#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
|
|
||||||
|
|
||||||
/* How big is a kernel L2 page table? */
|
|
||||||
#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
|
|
||||||
|
|
||||||
/* We currently allocate L1 page tables by page. */
|
|
||||||
#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
|
|
||||||
#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
|
|
||||||
#else
|
|
||||||
#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* How many pages do we need, as an "order", for an L1 page table? */
|
|
||||||
#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
|
|
||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
||||||
{
|
|
||||||
struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
|
|
||||||
return (pmd_t *)page_to_virt(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
|
|
||||||
unsigned long address)
|
|
||||||
{
|
|
||||||
__pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
|
|
||||||
L1_USER_PGTABLE_ORDER);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __tilegx__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PGALLOC_H */
|
|
|
@ -1,518 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* This file contains the functions and defines necessary to modify and use
|
|
||||||
* the TILE page table tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PGTABLE_H
|
|
||||||
#define _ASM_TILE_PGTABLE_H
|
|
||||||
|
|
||||||
#include <hv/hypervisor.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
|
||||||
#include <linux/threads.h>
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include <linux/list.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/pfn.h>
|
|
||||||
#include <asm/processor.h>
|
|
||||||
#include <asm/fixmap.h>
|
|
||||||
#include <asm/page.h>
|
|
||||||
|
|
||||||
struct mm_struct;
|
|
||||||
struct vm_area_struct;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ZERO_PAGE is a global shared page that is always zero: used
|
|
||||||
* for zero-mapped memory areas etc..
|
|
||||||
*/
|
|
||||||
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
|
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
|
||||||
|
|
||||||
extern pgd_t swapper_pg_dir[];
|
|
||||||
extern pgprot_t swapper_pgprot;
|
|
||||||
extern struct kmem_cache *pgd_cache;
|
|
||||||
extern spinlock_t pgd_lock;
|
|
||||||
extern struct list_head pgd_list;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The very last slots in the pgd_t are for addresses unusable by Linux
|
|
||||||
* (pgd_addr_invalid() returns true). So we use them for the list structure.
|
|
||||||
* The x86 code we are modelled on uses the page->private/index fields
|
|
||||||
* (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
|
|
||||||
* our pgds are so much smaller than a page, it seems a waste to
|
|
||||||
* spend a whole page on each pgd.
|
|
||||||
*/
|
|
||||||
#define PGD_LIST_OFFSET \
|
|
||||||
((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
|
|
||||||
#define pgd_to_list(pgd) \
|
|
||||||
((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
|
|
||||||
#define list_to_pgd(list) \
|
|
||||||
((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
|
|
||||||
|
|
||||||
extern void pgtable_cache_init(void);
|
|
||||||
extern void paging_init(void);
|
|
||||||
extern void set_page_homes(void);
|
|
||||||
|
|
||||||
#define FIRST_USER_ADDRESS 0UL
|
|
||||||
|
|
||||||
#define _PAGE_PRESENT HV_PTE_PRESENT
|
|
||||||
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
|
|
||||||
#define _PAGE_SUPER_PAGE HV_PTE_SUPER
|
|
||||||
#define _PAGE_READABLE HV_PTE_READABLE
|
|
||||||
#define _PAGE_WRITABLE HV_PTE_WRITABLE
|
|
||||||
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
|
|
||||||
#define _PAGE_ACCESSED HV_PTE_ACCESSED
|
|
||||||
#define _PAGE_DIRTY HV_PTE_DIRTY
|
|
||||||
#define _PAGE_GLOBAL HV_PTE_GLOBAL
|
|
||||||
#define _PAGE_USER HV_PTE_USER
|
|
||||||
|
|
||||||
/*
|
|
||||||
* All the "standard" bits. Cache-control bits are managed elsewhere.
|
|
||||||
* This is used to test for valid level-2 page table pointers by checking
|
|
||||||
* all the bits, and to mask away the cache control bits for mprotect.
|
|
||||||
*/
|
|
||||||
#define _PAGE_ALL (\
|
|
||||||
_PAGE_PRESENT | \
|
|
||||||
_PAGE_HUGE_PAGE | \
|
|
||||||
_PAGE_SUPER_PAGE | \
|
|
||||||
_PAGE_READABLE | \
|
|
||||||
_PAGE_WRITABLE | \
|
|
||||||
_PAGE_EXECUTABLE | \
|
|
||||||
_PAGE_ACCESSED | \
|
|
||||||
_PAGE_DIRTY | \
|
|
||||||
_PAGE_GLOBAL | \
|
|
||||||
_PAGE_USER \
|
|
||||||
)
|
|
||||||
|
|
||||||
#define PAGE_NONE \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
|
|
||||||
#define PAGE_SHARED \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
|
||||||
_PAGE_USER | _PAGE_ACCESSED)
|
|
||||||
|
|
||||||
#define PAGE_SHARED_EXEC \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
|
|
||||||
_PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
|
|
||||||
#define PAGE_COPY_NOEXEC \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
|
||||||
#define PAGE_COPY_EXEC \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
|
||||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
|
||||||
#define PAGE_COPY \
|
|
||||||
PAGE_COPY_NOEXEC
|
|
||||||
#define PAGE_READONLY \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
|
|
||||||
#define PAGE_READONLY_EXEC \
|
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
|
|
||||||
_PAGE_READABLE | _PAGE_EXECUTABLE)
|
|
||||||
|
|
||||||
#define _PAGE_KERNEL_RO \
|
|
||||||
(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
|
|
||||||
#define _PAGE_KERNEL \
|
|
||||||
(_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
|
|
||||||
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
|
|
||||||
|
|
||||||
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
|
||||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
|
|
||||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
|
|
||||||
|
|
||||||
#define page_to_kpgprot(p) PAGE_KERNEL
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We could tighten these up, but for now writable or executable
|
|
||||||
* implies readable.
|
|
||||||
*/
|
|
||||||
#define __P000 PAGE_NONE
|
|
||||||
#define __P001 PAGE_READONLY
|
|
||||||
#define __P010 PAGE_COPY /* this is write-only, which we won't support */
|
|
||||||
#define __P011 PAGE_COPY
|
|
||||||
#define __P100 PAGE_READONLY_EXEC
|
|
||||||
#define __P101 PAGE_READONLY_EXEC
|
|
||||||
#define __P110 PAGE_COPY_EXEC
|
|
||||||
#define __P111 PAGE_COPY_EXEC
|
|
||||||
|
|
||||||
#define __S000 PAGE_NONE
|
|
||||||
#define __S001 PAGE_READONLY
|
|
||||||
#define __S010 PAGE_SHARED
|
|
||||||
#define __S011 PAGE_SHARED
|
|
||||||
#define __S100 PAGE_READONLY_EXEC
|
|
||||||
#define __S101 PAGE_READONLY_EXEC
|
|
||||||
#define __S110 PAGE_SHARED_EXEC
|
|
||||||
#define __S111 PAGE_SHARED_EXEC
|
|
||||||
|
|
||||||
/*
|
|
||||||
* All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
|
|
||||||
* and PAGE_HUGE_PAGE, which must be one and zero, respectively.
|
|
||||||
* We set the ignored bits to zero.
|
|
||||||
*/
|
|
||||||
#define _PAGE_TABLE _PAGE_PRESENT
|
|
||||||
|
|
||||||
/* Inherit the caching flags from the old protection bits. */
|
|
||||||
#define pgprot_modify(oldprot, newprot) \
|
|
||||||
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
|
|
||||||
|
|
||||||
/* Just setting the PFN to zero suffices. */
|
|
||||||
#define pte_pgprot(x) hv_pte_set_pa((x), 0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* For PTEs and PDEs, we must clear the Present bit first when
|
|
||||||
* clearing a page table entry, so clear the bottom half first and
|
|
||||||
* enforce ordering with a barrier.
|
|
||||||
*/
|
|
||||||
static inline void __pte_clear(pte_t *ptep)
|
|
||||||
{
|
|
||||||
#ifdef __tilegx__
|
|
||||||
ptep->val = 0;
|
|
||||||
#else
|
|
||||||
u32 *tmp = (u32 *)ptep;
|
|
||||||
tmp[0] = 0;
|
|
||||||
barrier();
|
|
||||||
tmp[1] = 0;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The following only work if pte_present() is true.
|
|
||||||
* Undefined behaviour if not..
|
|
||||||
*/
|
|
||||||
#define pte_present hv_pte_get_present
|
|
||||||
#define pte_mknotpresent hv_pte_clear_present
|
|
||||||
#define pte_user hv_pte_get_user
|
|
||||||
#define pte_read hv_pte_get_readable
|
|
||||||
#define pte_dirty hv_pte_get_dirty
|
|
||||||
#define pte_young hv_pte_get_accessed
|
|
||||||
#define pte_write hv_pte_get_writable
|
|
||||||
#define pte_exec hv_pte_get_executable
|
|
||||||
#define pte_huge hv_pte_get_page
|
|
||||||
#define pte_super hv_pte_get_super
|
|
||||||
#define pte_rdprotect hv_pte_clear_readable
|
|
||||||
#define pte_exprotect hv_pte_clear_executable
|
|
||||||
#define pte_mkclean hv_pte_clear_dirty
|
|
||||||
#define pte_mkold hv_pte_clear_accessed
|
|
||||||
#define pte_wrprotect hv_pte_clear_writable
|
|
||||||
#define pte_mksmall hv_pte_clear_page
|
|
||||||
#define pte_mkread hv_pte_set_readable
|
|
||||||
#define pte_mkexec hv_pte_set_executable
|
|
||||||
#define pte_mkdirty hv_pte_set_dirty
|
|
||||||
#define pte_mkyoung hv_pte_set_accessed
|
|
||||||
#define pte_mkwrite hv_pte_set_writable
|
|
||||||
#define pte_mkhuge hv_pte_set_page
|
|
||||||
#define pte_mksuper hv_pte_set_super
|
|
||||||
|
|
||||||
#define pte_special(pte) 0
|
|
||||||
#define pte_mkspecial(pte) (pte)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use some spare bits in the PTE for user-caching tags.
|
|
||||||
*/
|
|
||||||
#define pte_set_forcecache hv_pte_set_client0
|
|
||||||
#define pte_get_forcecache hv_pte_get_client0
|
|
||||||
#define pte_clear_forcecache hv_pte_clear_client0
|
|
||||||
#define pte_set_anyhome hv_pte_set_client1
|
|
||||||
#define pte_get_anyhome hv_pte_get_client1
|
|
||||||
#define pte_clear_anyhome hv_pte_clear_client1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
|
|
||||||
*/
|
|
||||||
#define pte_migrating hv_pte_get_migrating
|
|
||||||
#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
|
|
||||||
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
|
|
||||||
|
|
||||||
#define pte_ERROR(e) \
|
|
||||||
pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
|
|
||||||
#define pgd_ERROR(e) \
|
|
||||||
pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
|
|
||||||
|
|
||||||
/* Return PA and protection info for a given kernel VA. */
|
|
||||||
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
|
|
||||||
* the right order on 32-bit platforms and also allows us to write
|
|
||||||
* hooks to check valid PTEs, etc., if we want.
|
|
||||||
*/
|
|
||||||
void __set_pte(pte_t *ptep, pte_t pte);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* set_pte() sets the given PTE and also sanity-checks the
|
|
||||||
* requested PTE against the page homecaching. Unspecified parts
|
|
||||||
* of the PTE are filled in when it is written to memory, i.e. all
|
|
||||||
* caching attributes if "!forcecache", or the home cpu if "anyhome".
|
|
||||||
*/
|
|
||||||
extern void set_pte(pte_t *ptep, pte_t pte);
|
|
||||||
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
|
|
||||||
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
|
|
||||||
|
|
||||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
|
||||||
|
|
||||||
static inline int pte_none(pte_t pte)
|
|
||||||
{
|
|
||||||
return !pte.val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long pte_pfn(pte_t pte)
|
|
||||||
{
|
|
||||||
return PFN_DOWN(hv_pte_get_pa(pte));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set or get the remote cache cpu in a pgprot with remote caching. */
|
|
||||||
extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
|
|
||||||
extern int get_remote_cache_cpu(pgprot_t prot);
|
|
||||||
|
|
||||||
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
|
|
||||||
{
|
|
||||||
return hv_pte_set_pa(prot, PFN_PHYS(pfn));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Support for priority mappings. */
|
|
||||||
extern void start_mm_caching(struct mm_struct *mm);
|
|
||||||
extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Encode and de-code a swap entry (see <linux/swapops.h>).
|
|
||||||
* We put the swap file type+offset in the 32 high bits;
|
|
||||||
* I believe we can just leave the low bits clear.
|
|
||||||
*/
|
|
||||||
#define __swp_type(swp) ((swp).val & 0x1f)
|
|
||||||
#define __swp_offset(swp) ((swp).val >> 5)
|
|
||||||
#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
|
|
||||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
|
|
||||||
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Conversion functions: convert a page and protection to a page entry,
|
|
||||||
* and a page entry and page directory to the page they refer to.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we are doing an mprotect(), just accept the new vma->vm_page_prot
|
|
||||||
* value and combine it with the PFN from the old PTE to get a new PTE.
|
|
||||||
*/
|
|
||||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
||||||
{
|
|
||||||
return pfn_pte(pte_pfn(pte), newprot);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
|
||||||
*
|
|
||||||
* This macro returns the index of the entry in the pgd page which would
|
|
||||||
* control the given virtual address.
|
|
||||||
*/
|
|
||||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* pgd_offset() returns a (pgd_t *)
|
|
||||||
* pgd_index() is used get the offset into the pgd page's array of pgd_t's.
|
|
||||||
*/
|
|
||||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A shortcut which implies the use of the kernel's pgd, instead
|
|
||||||
* of a process's.
|
|
||||||
*/
|
|
||||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
|
||||||
|
|
||||||
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
|
|
||||||
#define pte_unmap(pte) do { } while (0)
|
|
||||||
|
|
||||||
/* Clear a non-executable kernel PTE and flush it from the TLB. */
|
|
||||||
#define kpte_clear_flush(ptep, vaddr) \
|
|
||||||
do { \
|
|
||||||
pte_clear(&init_mm, (vaddr), (ptep)); \
|
|
||||||
local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The kernel page tables contain what we need, and we flush when we
|
|
||||||
* change specific page table entries.
|
|
||||||
*/
|
|
||||||
#define update_mmu_cache(vma, address, pte) do { } while (0)
|
|
||||||
|
|
||||||
#ifdef CONFIG_FLATMEM
|
|
||||||
#define kern_addr_valid(addr) (1)
|
|
||||||
#endif /* CONFIG_FLATMEM */
|
|
||||||
|
|
||||||
extern void vmalloc_sync_all(void);
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#include <asm/pgtable_64.h>
|
|
||||||
#else
|
|
||||||
#include <asm/pgtable_32.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
static inline int pmd_none(pmd_t pmd)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Only check low word on 32-bit platforms, since it might be
|
|
||||||
* out of sync with upper half.
|
|
||||||
*/
|
|
||||||
return (unsigned long)pmd_val(pmd) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pmd_present(pmd_t pmd)
|
|
||||||
{
|
|
||||||
return pmd_val(pmd) & _PAGE_PRESENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pmd_bad(pmd_t pmd)
|
|
||||||
{
|
|
||||||
return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
|
||||||
{
|
|
||||||
return npg >> (20 - PAGE_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
|
||||||
*
|
|
||||||
* This function returns the index of the entry in the pmd which would
|
|
||||||
* control the given virtual address.
|
|
||||||
*/
|
|
||||||
static inline unsigned long pmd_index(unsigned long address)
|
|
||||||
{
|
|
||||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
|
|
||||||
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|
||||||
unsigned long address,
|
|
||||||
pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
|
||||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|
||||||
unsigned long address, pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
|
||||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|
||||||
unsigned long address,
|
|
||||||
pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
|
|
||||||
{
|
|
||||||
set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
|
|
||||||
|
|
||||||
/* Create a pmd from a PTFN. */
|
|
||||||
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
|
|
||||||
{
|
|
||||||
return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
|
|
||||||
#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A given kernel pmd_t maps to a specific virtual address (either a
|
|
||||||
* kernel huge page or a kernel pte_t table). Since kernel pte_t
|
|
||||||
* tables can be aligned at sub-page granularity, this function can
|
|
||||||
* return non-page-aligned pointers, despite its name.
|
|
||||||
*/
|
|
||||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|
||||||
{
|
|
||||||
phys_addr_t pa =
|
|
||||||
(phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
|
|
||||||
return (unsigned long)__va(pa);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A pmd_t points to the base of a huge page or to a pte_t array.
|
|
||||||
* If a pte_t array, since we can have multiple per page, we don't
|
|
||||||
* have a one-to-one mapping of pmd_t's to pages. However, this is
|
|
||||||
* OK for pte_lockptr(), since we just end up with potentially one
|
|
||||||
* lock being used for several pte_t arrays.
|
|
||||||
*/
|
|
||||||
#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
|
|
||||||
|
|
||||||
static inline void pmd_clear(pmd_t *pmdp)
|
|
||||||
{
|
|
||||||
__pte_clear(pmdp_ptep(pmdp));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
|
|
||||||
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
|
|
||||||
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
|
||||||
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
|
||||||
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
|
|
||||||
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
|
||||||
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
|
||||||
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
|
||||||
#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
|
|
||||||
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
|
|
||||||
|
|
||||||
#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
|
|
||||||
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
|
|
||||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
|
||||||
|
|
||||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
||||||
{
|
|
||||||
return pfn_pmd(pmd_pfn(pmd), newprot);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
||||||
#define pmd_trans_huge pmd_huge_page
|
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
|
||||||
*
|
|
||||||
* This macro returns the index of the entry in the pte page which would
|
|
||||||
* control the given virtual address.
|
|
||||||
*/
|
|
||||||
static inline unsigned long pte_index(unsigned long address)
|
|
||||||
{
|
|
||||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
|
||||||
{
|
|
||||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/pgtable.h>
|
|
||||||
|
|
||||||
/* Support /proc/NN/pgtable API. */
|
|
||||||
struct seq_file;
|
|
||||||
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
|
|
||||||
unsigned long vaddr, unsigned long pagesize,
|
|
||||||
pte_t *ptep, void **datap);
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PGTABLE_H */
|
|
|
@ -1,122 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PGTABLE_32_H
|
|
||||||
#define _ASM_TILE_PGTABLE_32_H
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The level-1 index is defined by the huge page size. A PGD is composed
|
|
||||||
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
|
|
||||||
*/
|
|
||||||
#define PGDIR_SHIFT HPAGE_SHIFT
|
|
||||||
#define PGDIR_SIZE HPAGE_SIZE
|
|
||||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
||||||
#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
|
|
||||||
#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
|
|
||||||
#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The level-2 index is defined by the difference between the huge
|
|
||||||
* page size and the normal page size. A PTE is composed of
|
|
||||||
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
|
|
||||||
* Note that the hypervisor docs use PTE for what we call pte_t, so
|
|
||||||
* this nomenclature is somewhat confusing.
|
|
||||||
*/
|
|
||||||
#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Right now we initialize only a single pte table. It can be extended
|
|
||||||
* easily, subsequent pte tables have to be allocated in one physical
|
|
||||||
* chunk of RAM.
|
|
||||||
*
|
|
||||||
* HOWEVER, if we are using an allocation scheme with slop after the
|
|
||||||
* end of the page table (e.g. where our L2 page tables are 2KB but
|
|
||||||
* our pages are 64KB and we are allocating via the page allocator)
|
|
||||||
* we can't extend it easily.
|
|
||||||
*/
|
|
||||||
#define LAST_PKMAP PTRS_PER_PTE
|
|
||||||
|
|
||||||
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
|
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
# define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
|
|
||||||
#else
|
|
||||||
# define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Align the vmalloc area to an L2 page table, and leave a guard page
|
|
||||||
* at the beginning and end. The vmalloc code also puts in an internal
|
|
||||||
* guard page between each allocation.
|
|
||||||
*/
|
|
||||||
#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
|
|
||||||
extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
|
|
||||||
#define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE)
|
|
||||||
#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
|
|
||||||
|
|
||||||
/* This is the maximum possible amount of lowmem. */
|
|
||||||
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
|
|
||||||
|
|
||||||
/* We have no pmd or pud since we are strictly a two-level page table */
|
|
||||||
#define __ARCH_USE_5LEVEL_HACK
|
|
||||||
#include <asm-generic/pgtable-nopmd.h>
|
|
||||||
|
|
||||||
static inline int pud_huge_page(pud_t pud) { return 0; }
|
|
||||||
|
|
||||||
/* We don't define any pgds for these addresses. */
|
|
||||||
static inline int pgd_addr_invalid(unsigned long addr)
|
|
||||||
{
|
|
||||||
return addr >= MEM_HV_START;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Provide versions of these routines that can be used safely when
|
|
||||||
* the hypervisor may be asynchronously modifying dirty/accessed bits.
|
|
||||||
* ptep_get_and_clear() matches the generic one but we provide it to
|
|
||||||
* be parallel with the 64-bit code.
|
|
||||||
*/
|
|
||||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
||||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
|
||||||
|
|
||||||
extern int ptep_test_and_clear_young(struct vm_area_struct *,
|
|
||||||
unsigned long addr, pte_t *);
|
|
||||||
extern void ptep_set_wrprotect(struct mm_struct *,
|
|
||||||
unsigned long addr, pte_t *);
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
|
||||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
pte_t pte = *ptep;
|
|
||||||
pte_clear(_mm, addr, ptep);
|
|
||||||
return pte;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* pmds are wrappers around pgds, which are the same as ptes.
|
|
||||||
* It's often convenient to "cast" back and forth and use the pte methods,
|
|
||||||
* which are the methods supplied by the hypervisor.
|
|
||||||
*/
|
|
||||||
#define pmd_pte(pmd) ((pmd).pud.pgd)
|
|
||||||
#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
|
|
||||||
#define pte_pmd(pte) ((pmd_t){ { (pte) } })
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PGTABLE_32_H */
|
|
|
@ -1,172 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PGTABLE_64_H
|
|
||||||
#define _ASM_TILE_PGTABLE_64_H
|
|
||||||
|
|
||||||
/* The level-0 page table breaks the address space into 32-bit chunks. */
|
|
||||||
#define PGDIR_SHIFT HV_LOG2_L1_SPAN
|
|
||||||
#define PGDIR_SIZE HV_L1_SPAN
|
|
||||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
||||||
#define PTRS_PER_PGD HV_L0_ENTRIES
|
|
||||||
#define PGD_INDEX(va) HV_L0_INDEX(va)
|
|
||||||
#define SIZEOF_PGD HV_L0_SIZE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The level-1 index is defined by the huge page size. A PMD is composed
|
|
||||||
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
|
|
||||||
*/
|
|
||||||
#define PMD_SHIFT HPAGE_SHIFT
|
|
||||||
#define PMD_SIZE HPAGE_SIZE
|
|
||||||
#define PMD_MASK (~(PMD_SIZE-1))
|
|
||||||
#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
|
|
||||||
#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
|
|
||||||
#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The level-2 index is defined by the difference between the huge
|
|
||||||
* page size and the normal page size. A PTE is composed of
|
|
||||||
* PTRS_PER_PTE pte_t's and is the bottom level of the page table.
|
|
||||||
* Note that the hypervisor docs use PTE for what we call pte_t, so
|
|
||||||
* this nomenclature is somewhat confusing.
|
|
||||||
*/
|
|
||||||
#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Align the vmalloc area to an L2 page table. Omit guard pages at
|
|
||||||
* the beginning and end for simplicity (particularly in the per-cpu
|
|
||||||
* memory allocation code). The vmalloc code puts in an internal
|
|
||||||
* guard page between each allocation.
|
|
||||||
*/
|
|
||||||
#define _VMALLOC_END MEM_SV_START
|
|
||||||
#define VMALLOC_END _VMALLOC_END
|
|
||||||
#define VMALLOC_START _VMALLOC_START
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/* We have no pud since we are a three-level page table. */
|
|
||||||
#define __ARCH_USE_5LEVEL_HACK
|
|
||||||
#include <asm-generic/pgtable-nopud.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* pmds are the same as pgds and ptes, so converting is a no-op.
|
|
||||||
*/
|
|
||||||
#define pmd_pte(pmd) (pmd)
|
|
||||||
#define pmdp_ptep(pmdp) (pmdp)
|
|
||||||
#define pte_pmd(pte) (pte)
|
|
||||||
|
|
||||||
#define pud_pte(pud) ((pud).pgd)
|
|
||||||
|
|
||||||
static inline int pud_none(pud_t pud)
|
|
||||||
{
|
|
||||||
return pud_val(pud) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pud_present(pud_t pud)
|
|
||||||
{
|
|
||||||
return pud_val(pud) & _PAGE_PRESENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pud_huge_page(pud_t pud)
|
|
||||||
{
|
|
||||||
return pud_val(pud) & _PAGE_HUGE_PAGE;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pmd_ERROR(e) \
|
|
||||||
pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
|
|
||||||
|
|
||||||
static inline void pud_clear(pud_t *pudp)
|
|
||||||
{
|
|
||||||
__pte_clear(&pudp->pgd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pud_bad(pud_t pud)
|
|
||||||
{
|
|
||||||
return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return the page-table frame number (ptfn) that a pud_t points at. */
|
|
||||||
#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
|
|
||||||
|
|
||||||
/* Return the page frame number (pfn) that a pud_t points at. */
|
|
||||||
#define pud_pfn(pud) pte_pfn(pud_pte(pud))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A given kernel pud_t maps to a kernel pmd_t table at a specific
|
|
||||||
* virtual address. Since kernel pmd_t tables can be aligned at
|
|
||||||
* sub-page granularity, this macro can return non-page-aligned
|
|
||||||
* pointers, despite its name.
|
|
||||||
*/
|
|
||||||
#define pud_page_vaddr(pud) \
|
|
||||||
(__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A pud_t points to a pmd_t array. Since we can have multiple per
|
|
||||||
* page, we don't have a one-to-one mapping of pud_t's to pages.
|
|
||||||
*/
|
|
||||||
#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
|
|
||||||
|
|
||||||
static inline unsigned long pud_index(unsigned long address)
|
|
||||||
{
|
|
||||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pmd_offset(pud, address) \
|
|
||||||
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
|
|
||||||
|
|
||||||
/* Normalize an address to having the correct high bits set. */
|
|
||||||
#define pgd_addr_normalize pgd_addr_normalize
|
|
||||||
static inline unsigned long pgd_addr_normalize(unsigned long addr)
|
|
||||||
{
|
|
||||||
return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
|
|
||||||
(CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We don't define any pgds for these addresses. */
|
|
||||||
static inline int pgd_addr_invalid(unsigned long addr)
|
|
||||||
{
|
|
||||||
return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use atomic instructions to provide atomicity against the hypervisor.
|
|
||||||
*/
|
|
||||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
||||||
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
|
|
||||||
HV_PTE_INDEX_ACCESSED) & 0x1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
|
||||||
static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
__insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
|
||||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
|
|
||||||
unsigned long addr, pte_t *ptep)
|
|
||||||
{
|
|
||||||
return hv_pte(__insn_exch(&ptep->val, 0UL));
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PGTABLE_64_H */
|
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2014 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PMC_H
|
|
||||||
#define _ASM_TILE_PMC_H
|
|
||||||
|
|
||||||
#include <linux/ptrace.h>
|
|
||||||
|
|
||||||
#define TILE_BASE_COUNTERS 2
|
|
||||||
|
|
||||||
/* Bitfields below are derived from SPR PERF_COUNT_CTL*/
|
|
||||||
#ifndef __tilegx__
|
|
||||||
/* PERF_COUNT_CTL on TILEPro */
|
|
||||||
#define TILE_CTL_EXCL_USER (1 << 7) /* exclude user level */
|
|
||||||
#define TILE_CTL_EXCL_KERNEL (1 << 8) /* exclude kernel level */
|
|
||||||
#define TILE_CTL_EXCL_HV (1 << 9) /* exclude hypervisor level */
|
|
||||||
|
|
||||||
#define TILE_SEL_MASK 0x7f /* 7 bits for event SEL,
|
|
||||||
COUNT_0_SEL */
|
|
||||||
#define TILE_PLM_MASK 0x780 /* 4 bits priv level msks,
|
|
||||||
COUNT_0_MASK*/
|
|
||||||
#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_PLM_MASK)
|
|
||||||
|
|
||||||
#else /* __tilegx__*/
|
|
||||||
/* PERF_COUNT_CTL on TILEGx*/
|
|
||||||
#define TILE_CTL_EXCL_USER (1 << 10) /* exclude user level */
|
|
||||||
#define TILE_CTL_EXCL_KERNEL (1 << 11) /* exclude kernel level */
|
|
||||||
#define TILE_CTL_EXCL_HV (1 << 12) /* exclude hypervisor level */
|
|
||||||
|
|
||||||
#define TILE_SEL_MASK 0x3f /* 6 bits for event SEL,
|
|
||||||
COUNT_0_SEL*/
|
|
||||||
#define TILE_BOX_MASK 0x1c0 /* 3 bits box msks,
|
|
||||||
COUNT_0_BOX */
|
|
||||||
#define TILE_PLM_MASK 0x3c00 /* 4 bits priv level msks,
|
|
||||||
COUNT_0_MASK */
|
|
||||||
#define TILE_EVENT_MASK (TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK)
|
|
||||||
#endif /* __tilegx__*/
|
|
||||||
|
|
||||||
/* Takes register and fault number. Returns error to disable the interrupt. */
|
|
||||||
typedef int (*perf_irq_t)(struct pt_regs *, int);
|
|
||||||
|
|
||||||
int userspace_perf_handler(struct pt_regs *regs, int fault);
|
|
||||||
|
|
||||||
perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq);
|
|
||||||
void release_pmc_hardware(void);
|
|
||||||
|
|
||||||
unsigned long pmc_get_overflow(void);
|
|
||||||
void pmc_ack_overflow(unsigned long status);
|
|
||||||
|
|
||||||
void unmask_pmc_interrupts(void);
|
|
||||||
void mask_pmc_interrupts(void);
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PMC_H */
|
|
|
@ -1,368 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation, version 2.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but
|
|
||||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
||||||
* NON INFRINGEMENT. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_TILE_PROCESSOR_H
|
|
||||||
#define _ASM_TILE_PROCESSOR_H
|
|
||||||
|
|
||||||
#include <arch/chip.h>
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
|
|
||||||
* normally would, due to #include dependencies.
|
|
||||||
*/
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <asm/ptrace.h>
|
|
||||||
#include <asm/percpu.h>
|
|
||||||
|
|
||||||
#include <arch/spr_def.h>
|
|
||||||
|
|
||||||
struct task_struct;
|
|
||||||
struct thread_struct;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
unsigned long seg;
|
|
||||||
} mm_segment_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Default implementation of macro that returns current
|
|
||||||
* instruction pointer ("program counter").
|
|
||||||
*/
|
|
||||||
void *current_text_addr(void);
|
|
||||||
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
|
||||||
/* Capture the state of a suspended DMA. */
|
|
||||||
struct tile_dma_state {
|
|
||||||
int enabled;
|
|
||||||
unsigned long src;
|
|
||||||
unsigned long dest;
|
|
||||||
unsigned long strides;
|
|
||||||
unsigned long chunk_size;
|
|
||||||
unsigned long src_chunk;
|
|
||||||
unsigned long dest_chunk;
|
|
||||||
unsigned long byte;
|
|
||||||
unsigned long status;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A mask of the DMA status register for selecting only the 'running'
|
|
||||||
* and 'done' bits.
|
|
||||||
*/
|
|
||||||
#define DMA_STATUS_MASK \
|
|
||||||
(SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Track asynchronous TLB events (faults and access violations)
|
|
||||||
* that occur while we are in kernel mode from DMA or the SN processor.
|
|
||||||
*/
|
|
||||||
struct async_tlb {
|
|
||||||
short fault_num; /* original fault number; 0 if none */
|
|
||||||
char is_fault; /* was it a fault (vs an access violation) */
|
|
||||||
char is_write; /* for fault: was it caused by a write? */
|
|
||||||
unsigned long address; /* what address faulted? */
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_HARDWALL
|
|
||||||
struct hardwall_info;
|
|
||||||
struct hardwall_task {
|
|
||||||
/* Which hardwall is this task tied to? (or NULL if none) */
|
|
||||||
struct hardwall_info *info;
|
|
||||||
/* Chains this task into the list at info->task_head. */
|
|
||||||
struct list_head list;
|
|
||||||
};
|
|
||||||
#ifdef __tilepro__
|
|
||||||
#define HARDWALL_TYPES 1 /* udn */
|
|
||||||
#else
|
|
||||||
#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct thread_struct {
|
|
||||||
/* kernel stack pointer */
|
|
||||||
unsigned long ksp;
|
|
||||||
/* kernel PC */
|
|
||||||
unsigned long pc;
|
|
||||||
/* starting user stack pointer (for page migration) */
|
|
||||||
unsigned long usp0;
|
|
||||||
/* pid of process that created this one */
|
|
||||||
pid_t creator_pid;
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
|
||||||
/* DMA info for suspended threads (byte == 0 means no DMA state) */
|
|
||||||
struct tile_dma_state tile_dma_state;
|
|
||||||
#endif
|
|
||||||
/* User EX_CONTEXT registers */
|
|
||||||
unsigned long ex_context[2];
|
|
||||||
/* User SYSTEM_SAVE registers */
|
|
||||||
unsigned long system_save[4];
|
|
||||||
/* User interrupt mask */
|
|
||||||
unsigned long long interrupt_mask;
|
|
||||||
/* User interrupt-control 0 state */
|
|
||||||
unsigned long intctrl_0;
|
|
||||||
/* Any other miscellaneous processor state bits */
|
|
||||||
unsigned long proc_status;
|
|
||||||
#if !CHIP_HAS_FIXED_INTVEC_BASE()
|
|
||||||
/* Interrupt base for PL0 interrupts */
|
|
||||||
unsigned long interrupt_vector_base;
|
|
||||||
#endif
|
|
||||||
/* Tile cache retry fifo high-water mark */
|
|
||||||
unsigned long tile_rtf_hwm;
|
|
||||||
#if CHIP_HAS_DSTREAM_PF()
|
|
||||||
/* Data stream prefetch control */
|
|
||||||
unsigned long dstream_pf;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_HARDWALL
|
|
||||||
/* Hardwall information for various resources. */
|
|
||||||
struct hardwall_task hardwall[HARDWALL_TYPES];
|
|
||||||
#endif
|
|
||||||
#if CHIP_HAS_TILE_DMA()
|
|
||||||
/* Async DMA TLB fault information */
|
|
||||||
struct async_tlb dma_async_tlb;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Start with "sp" this many bytes below the top of the kernel stack.
|
|
||||||
* This allows us to be cache-aware when handling the initial save
|
|
||||||
* of the pt_regs value to the stack.
|
|
||||||
*/
|
|
||||||
#define STACK_TOP_DELTA 64
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When entering the kernel via a fault, start with the top of the
|
|
||||||
* pt_regs structure this many bytes below the top of the page.
|
|
||||||
* This aligns the pt_regs structure optimally for cache-line access.
|
|
||||||
*/
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define KSTK_PTREGS_GAP 48
|
|
||||||
#else
|
|
||||||
#define KSTK_PTREGS_GAP 56
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
|
|
||||||
#else
|
|
||||||
#define TASK_SIZE_MAX PAGE_OFFSET
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* TASK_SIZE and related variables are always checked in "current" context. */
|
|
||||||
#ifdef CONFIG_COMPAT
|
|
||||||
#define COMPAT_TASK_SIZE (1UL << 31)
|
|
||||||
#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
|
|
||||||
COMPAT_TASK_SIZE : TASK_SIZE_MAX)
|
|
||||||
#else
|
|
||||||
#define TASK_SIZE TASK_SIZE_MAX
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
|
|
||||||
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
|
|
||||||
|
|
||||||
#define STACK_TOP TASK_SIZE
|
|
||||||
|
|
||||||
/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
|
|
||||||
#define STACK_TOP_MAX TASK_SIZE_MAX
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This decides where the kernel will search for a free chunk of vm
|
|
||||||
* space during mmap's, if it is using bottom-up mapping.
|
|
||||||
*/
|
|
||||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
|
||||||
|
|
||||||
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
|
||||||
.ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
|
|
||||||
.interrupt_mask = -1ULL \
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Kernel stack top for the task that first boots on this cpu. */
|
|
||||||
DECLARE_PER_CPU(unsigned long, boot_sp);
|
|
||||||
|
|
||||||
/* PC to boot from on this cpu. */
|
|
||||||
DECLARE_PER_CPU(unsigned long, boot_pc);
|
|
||||||
|
|
||||||
/* Do necessary setup to start up a newly executed thread. */
|
|
||||||
static inline void start_thread(struct pt_regs *regs,
|
|
||||||
unsigned long pc, unsigned long usp)
|
|
||||||
{
|
|
||||||
regs->pc = pc;
|
|
||||||
regs->sp = usp;
|
|
||||||
single_step_execve();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Free all resources held by a thread. */
|
|
||||||
static inline void release_thread(struct task_struct *dead_task)
|
|
||||||
{
|
|
||||||
/* Nothing for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
|
|
||||||
|
|
||||||
unsigned long get_wchan(struct task_struct *p);
|
|
||||||
|
|
||||||
/* Return initial ksp value for given task. */
|
|
||||||
#define task_ksp0(task) \
|
|
||||||
((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
|
|
||||||
|
|
||||||
/* Return some info about the user process TASK. */
|
|
||||||
#define task_pt_regs(task) \
|
|
||||||
((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
|
|
||||||
#define current_pt_regs() \
|
|
||||||
((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
|
|
||||||
STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
|
|
||||||
#define task_sp(task) (task_pt_regs(task)->sp)
|
|
||||||
#define task_pc(task) (task_pt_regs(task)->pc)
|
|
||||||
/* Aliases for pc and sp (used in fs/proc/array.c) */
|
|
||||||
#define KSTK_EIP(task) task_pc(task)
|
|
||||||
#define KSTK_ESP(task) task_sp(task)
|
|
||||||
|
|
||||||
/* Fine-grained unaligned JIT support */
|
|
||||||
#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
|
|
||||||
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
|
|
||||||
|
|
||||||
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
|
|
||||||
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
|
|
||||||
|
|
||||||
/* Standard format for printing registers and other word-size data. */
|
|
||||||
#ifdef __tilegx__
|
|
||||||
# define REGFMT "0x%016lx"
|
|
||||||
#else
|
|
||||||
# define REGFMT "0x%08lx"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Do some slow action (e.g. read a slow SPR).
|
|
||||||
* Note that this must also have compiler-barrier semantics since
|
|
||||||
* it may be used in a busy loop reading memory.
|
|
||||||
*/
|
|
||||||
static inline void cpu_relax(void)
|
|
||||||
{
|
|
||||||
__insn_mfspr(SPR_PASS);
|
|
||||||
barrier();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
|
||||||
struct seq_operations;
|
|
||||||
extern const struct seq_operations cpuinfo_op;
|
|
||||||
|
|
||||||
/* Provide information about the chip model. */
|
|
||||||
extern char chip_model[64];
|
|
||||||
|
|
||||||
/* Data on which physical memory controller corresponds to which NUMA node. */
|
|
||||||
extern int node_controller[];
|
|
||||||
|
|
||||||
/* Does the heap allocator return hash-for-home pages by default? */
|
|
||||||
extern int hash_default;
|
|
||||||
|
|
||||||
/* Should kernel stack pages be hash-for-home? */
|
|
||||||
extern int kstack_hash;
|
|
||||||
|
|
||||||
/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
|
|
||||||
#define uheap_hash hash_default
|
|
||||||
|
|
||||||
|
|
||||||
/* Are we using huge pages in the TLB for kernel data? */
|
|
||||||
extern int kdata_huge;
|
|
||||||
|
|
||||||
/* Support standard Linux prefetching. */
|
|
||||||
#define ARCH_HAS_PREFETCH
|
|
||||||
#define prefetch(x) __builtin_prefetch(x)
|
|
||||||
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
|
|
||||||
|
|
||||||
/* Bring a value into the L1D, faulting the TLB if necessary. */
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
|
|
||||||
#else
|
|
||||||
#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
/* Do some slow action (e.g. read a slow SPR). */
|
|
||||||
#define CPU_RELAX mfspr zero, SPR_PASS
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
|
||||||
|
|
||||||
/* Assembly code assumes that the PL is in the low bits. */
|
|
||||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
|
|
||||||
# error Fix assembly assumptions about PL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
|
|
||||||
#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
|
|
||||||
SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
|
|
||||||
SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
|
|
||||||
SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
|
|
||||||
# error Fix assumptions that EX1 macros work for both PL0 and PL1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
|
|
||||||
#define EX1_PL(ex1) \
|
|
||||||
(((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
|
|
||||||
#define EX1_ICS(ex1) \
|
|
||||||
(((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
|
|
||||||
#define PL_ICS_EX1(pl, ics) \
|
|
||||||
(((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
|
|
||||||
((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Provide symbolic constants for PLs.
|
|
||||||
*/
|
|
||||||
#define USER_PL 0
|
|
||||||
#if CONFIG_KERNEL_PL == 2
|
|
||||||
#define GUEST_PL 1
|
|
||||||
#endif
|
|
||||||
#define KERNEL_PL CONFIG_KERNEL_PL
|
|
||||||
|
|
||||||
/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
|
|
||||||
#ifdef __tilegx__
|
|
||||||
#define CPU_SHIFT 48
|
|
||||||
#if CHIP_VA_WIDTH() > CPU_SHIFT
|
|
||||||
# error Too many VA bits!
|
|
||||||
#endif
|
|
||||||
#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
|
|
||||||
#define raw_smp_processor_id() \
|
|
||||||
((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
|
|
||||||
#define get_current_ksp0() \
|
|
||||||
((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
|
|
||||||
(64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
|
|
||||||
#define next_current_ksp0(task) ({ \
|
|
||||||
unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
|
|
||||||
unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
|
|
||||||
__ksp0 | __cpu; \
|
|
||||||
})
|
|
||||||
#else
|
|
||||||
#define LOG2_NR_CPU_IDS 6
|
|
||||||
#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
|
|
||||||
#define raw_smp_processor_id() \
|
|
||||||
((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
|
|
||||||
#define get_current_ksp0() \
|
|
||||||
(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
|
|
||||||
#define next_current_ksp0(task) ({ \
|
|
||||||
unsigned long __ksp0 = task_ksp0(task); \
|
|
||||||
int __cpu = raw_smp_processor_id(); \
|
|
||||||
BUG_ON(__ksp0 & MAX_CPU_ID); \
|
|
||||||
__ksp0 | __cpu; \
|
|
||||||
})
|
|
||||||
#endif
|
|
||||||
#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
|
|
||||||
# error Too many cpus!
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_PROCESSOR_H */
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue