Merge commit 'v2.6.37-rc4' into imx-for-2.6.38
Done to resolve merge conflict: Conflicts: arch/arm/mach-mx25/devices-imx25.h Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
This commit is contained in:
commit
0e44e05958
|
@ -79,10 +79,6 @@
|
|||
</sect2>
|
||||
</sect1>
|
||||
</chapter>
|
||||
<chapter id="clk">
|
||||
<title>Clock Framework Extensions</title>
|
||||
!Iinclude/linux/sh_clk.h
|
||||
</chapter>
|
||||
<chapter id="mach">
|
||||
<title>Machine Specific Interfaces</title>
|
||||
<sect1 id="dreamcast">
|
||||
|
|
|
@ -196,7 +196,7 @@ csrow3.
|
|||
The representation of the above is reflected in the directory tree
|
||||
in EDAC's sysfs interface. Starting in directory
|
||||
/sys/devices/system/edac/mc each memory controller will be represented
|
||||
by its own 'mcX' directory, where 'X" is the index of the MC.
|
||||
by its own 'mcX' directory, where 'X' is the index of the MC.
|
||||
|
||||
|
||||
..../edac/mc/
|
||||
|
@ -207,7 +207,7 @@ by its own 'mcX' directory, where 'X" is the index of the MC.
|
|||
....
|
||||
|
||||
Under each 'mcX' directory each 'csrowX' is again represented by a
|
||||
'csrowX', where 'X" is the csrow index:
|
||||
'csrowX', where 'X' is the csrow index:
|
||||
|
||||
|
||||
.../mc/mc0/
|
||||
|
@ -232,7 +232,7 @@ EDAC control and attribute files.
|
|||
|
||||
|
||||
In 'mcX' directories are EDAC control and attribute files for
|
||||
this 'X" instance of the memory controllers:
|
||||
this 'X' instance of the memory controllers:
|
||||
|
||||
|
||||
Counter reset control file:
|
||||
|
@ -343,7 +343,7 @@ Sdram memory scrubbing rate:
|
|||
'csrowX' DIRECTORIES
|
||||
|
||||
In the 'csrowX' directories are EDAC control and attribute files for
|
||||
this 'X" instance of csrow:
|
||||
this 'X' instance of csrow:
|
||||
|
||||
|
||||
Total Uncorrectable Errors count attribute file:
|
||||
|
|
|
@ -4,33 +4,41 @@ please mail me.
|
|||
Geert Uytterhoeven <geert@linux-m68k.org>
|
||||
|
||||
00-INDEX
|
||||
- this file
|
||||
- this file.
|
||||
arkfb.txt
|
||||
- info on the fbdev driver for ARK Logic chips.
|
||||
aty128fb.txt
|
||||
- info on the ATI Rage128 frame buffer driver.
|
||||
cirrusfb.txt
|
||||
- info on the driver for Cirrus Logic chipsets.
|
||||
cmap_xfbdev.txt
|
||||
- an introduction to fbdev's cmap structures.
|
||||
deferred_io.txt
|
||||
- an introduction to deferred IO.
|
||||
efifb.txt
|
||||
- info on the EFI platform driver for Intel based Apple computers.
|
||||
ep93xx-fb.txt
|
||||
- info on the driver for EP93xx LCD controller.
|
||||
fbcon.txt
|
||||
- intro to and usage guide for the framebuffer console (fbcon).
|
||||
framebuffer.txt
|
||||
- introduction to frame buffer devices.
|
||||
imacfb.txt
|
||||
- info on the generic EFI platform driver for Intel based Macs.
|
||||
gxfb.txt
|
||||
- info on the framebuffer driver for AMD Geode GX2 based processors.
|
||||
intel810.txt
|
||||
- documentation for the Intel 810/815 framebuffer driver.
|
||||
intelfb.txt
|
||||
- docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver.
|
||||
internals.txt
|
||||
- quick overview of frame buffer device internals.
|
||||
lxfb.txt
|
||||
- info on the framebuffer driver for AMD Geode LX based processors.
|
||||
matroxfb.txt
|
||||
- info on the Matrox framebuffer driver for Alpha, Intel and PPC.
|
||||
metronomefb.txt
|
||||
- info on the driver for the Metronome display controller.
|
||||
modedb.txt
|
||||
- info on the video mode database.
|
||||
matroxfb.txt
|
||||
- info on the Matrox frame buffer driver.
|
||||
pvr2fb.txt
|
||||
- info on the PowerVR 2 frame buffer driver.
|
||||
pxafb.txt
|
||||
|
@ -39,13 +47,23 @@ s3fb.txt
|
|||
- info on the fbdev driver for S3 Trio/Virge chips.
|
||||
sa1100fb.txt
|
||||
- information about the driver for the SA-1100 LCD controller.
|
||||
sh7760fb.txt
|
||||
- info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
|
||||
sisfb.txt
|
||||
- info on the framebuffer device driver for various SiS chips.
|
||||
sstfb.txt
|
||||
- info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
|
||||
tgafb.txt
|
||||
- info on the TGA (DECChip 21030) frame buffer driver
|
||||
- info on the TGA (DECChip 21030) frame buffer driver.
|
||||
tridentfb.txt
|
||||
info on the framebuffer driver for some Trident chip based cards.
|
||||
uvesafb.txt
|
||||
- info on the userspace VESA (VBE2+ compliant) frame buffer device.
|
||||
vesafb.txt
|
||||
- info on the VESA frame buffer device
|
||||
- info on the VESA frame buffer device.
|
||||
viafb.modes
|
||||
- list of modes for VIA Integration Graphic Chip.
|
||||
viafb.txt
|
||||
- info on the VIA Integration Graphic Chip console framebuffer driver.
|
||||
vt8623fb.txt
|
||||
- info on the fb driver for the graphics core in VIA VT8623 chipsets.
|
||||
|
|
|
@ -2385,6 +2385,11 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
improve throughput, but will also increase the
|
||||
amount of memory reserved for use by the client.
|
||||
|
||||
swapaccount[=0|1]
|
||||
[KNL] Enable accounting of swap in memory resource
|
||||
controller if no parameter or 1 is given or disable
|
||||
it if 0 is given (See Documentation/cgroups/memory.txt)
|
||||
|
||||
swiotlb= [IA-64] Number of I/O TLB slabs
|
||||
|
||||
switches= [HW,M68k]
|
||||
|
|
|
@ -144,6 +144,7 @@ tcp_adv_win_scale - INTEGER
|
|||
Count buffering overhead as bytes/2^tcp_adv_win_scale
|
||||
(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
|
||||
if it is <= 0.
|
||||
Possible values are [-31, 31], inclusive.
|
||||
Default: 2
|
||||
|
||||
tcp_allowed_congestion_control - STRING
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
Clock framework on SuperH architecture
|
||||
|
||||
The framework on SH extends existing API by the function clk_set_rate_ex,
|
||||
which prototype is as follows:
|
||||
|
||||
clk_set_rate_ex (struct clk *clk, unsigned long rate, int algo_id)
|
||||
|
||||
The algo_id parameter is used to specify algorithm used to recalculate clocks,
|
||||
adjanced to clock, specified as first argument. It is assumed that algo_id==0
|
||||
means no changes to adjanced clock
|
||||
|
||||
Internally, the clk_set_rate_ex forwards request to clk->ops->set_rate method,
|
||||
if it is present in ops structure. The method should set the clock rate and adjust
|
||||
all needed clocks according to the passed algo_id.
|
||||
Exact values for algo_id are machine-dependent. For the sh7722, the following
|
||||
values are defined:
|
||||
|
||||
NO_CHANGE = 0,
|
||||
IUS_N1_N1, /* I:U = N:1, U:Sh = N:1 */
|
||||
IUS_322, /* I:U:Sh = 3:2:2 */
|
||||
IUS_522, /* I:U:Sh = 5:2:2 */
|
||||
IUS_N11, /* I:U:Sh = N:1:1 */
|
||||
SB_N1, /* Sh:B = N:1 */
|
||||
SB3_N1, /* Sh:B3 = N:1 */
|
||||
SB3_32, /* Sh:B3 = 3:2 */
|
||||
SB3_43, /* Sh:B3 = 4:3 */
|
||||
SB3_54, /* Sh:B3 = 5:4 */
|
||||
BP_N1, /* B:P = N:1 */
|
||||
IP_N1 /* I:P = N:1 */
|
||||
|
||||
Each of these constants means relation between clocks that can be set via the FRQCR
|
||||
register
|
|
@ -1359,7 +1359,7 @@ F: include/net/bluetooth/
|
|||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||
L: bonding-devel@lists.sourceforge.net
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
S: Supported
|
||||
F: drivers/net/bonding/
|
||||
|
@ -2444,10 +2444,12 @@ F: drivers/net/wan/sdla.c
|
|||
FRAMEBUFFER LAYER
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
W: http://linux-fbdev.sourceforge.net/
|
||||
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
|
||||
S: Orphan
|
||||
F: Documentation/fb/
|
||||
F: drivers/video/fb*
|
||||
F: drivers/video/
|
||||
F: include/video/
|
||||
F: include/linux/fb.h
|
||||
|
||||
FREESCALE DMA DRIVER
|
||||
|
@ -5837,6 +5839,8 @@ M: Chris Metcalf <cmetcalf@tilera.com>
|
|||
W: http://www.tilera.com/scm/
|
||||
S: Supported
|
||||
F: arch/tile/
|
||||
F: drivers/char/hvc_tile.c
|
||||
F: drivers/net/tile/
|
||||
|
||||
TLAN NETWORK DRIVER
|
||||
M: Samuel Chessman <chessman@tux.org>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 37
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Flesh-Eating Bats with Fangs
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -1084,6 +1084,6 @@ memdump: mov r12, r0
|
|||
reloc_end:
|
||||
|
||||
.align
|
||||
.section ".stack", "w"
|
||||
.section ".stack", "aw", %nobits
|
||||
user_stack: .space 4096
|
||||
user_stack_end:
|
||||
|
|
|
@ -57,7 +57,7 @@ SECTIONS
|
|||
.bss : { *(.bss) }
|
||||
_end = .;
|
||||
|
||||
.stack (NOLOAD) : { *(.stack) }
|
||||
.stack : { *(.stack) }
|
||||
|
||||
.stab 0 : { *(.stab) }
|
||||
.stabstr 0 : { *(.stabstr) }
|
||||
|
|
|
@ -238,7 +238,7 @@
|
|||
@ Slightly optimised to avoid incrementing the pointer twice
|
||||
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
|
||||
.if \rept == 2
|
||||
usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
|
||||
usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
|
||||
.endif
|
||||
|
||||
add\cond \ptr, #\rept * \inc
|
||||
|
|
|
@ -13,6 +13,10 @@ typedef struct {
|
|||
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
#define ASID(mm) ((mm)->context.id & 255)
|
||||
|
||||
/* init_mm.context.id_lock should be initialized. */
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
|
||||
#else
|
||||
#define ASID(mm) (0)
|
||||
#endif
|
||||
|
|
|
@ -374,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|||
|
||||
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
|
||||
|
||||
/* we don't need complex calculations here as the pmd is folded into the pgd */
|
||||
#define pmd_addr_end(addr,end) (end)
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
|
|
@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
|
|||
*/
|
||||
.L_found:
|
||||
#if __LINUX_ARM_ARCH__ >= 5
|
||||
rsb r1, r3, #0
|
||||
and r3, r3, r1
|
||||
rsb r0, r3, #0
|
||||
and r3, r3, r0
|
||||
clz r3, r3
|
||||
rsb r3, r3, #31
|
||||
add r0, r2, r3
|
||||
|
@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
|
|||
addeq r2, r2, #1
|
||||
mov r0, r2
|
||||
#endif
|
||||
cmp r1, r0 @ Clamp to maxbit
|
||||
movlo r0, r1
|
||||
mov pc, lr
|
||||
|
||||
|
|
|
@ -11,6 +11,6 @@
|
|||
#ifndef __ASM_ARCH_VMALLOC_H
|
||||
#define __ASM_ARCH_VMALLOC_H
|
||||
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
||||
#endif /* __ASM_ARCH_VMALLOC_H */
|
||||
|
|
|
@ -22,4 +22,4 @@
|
|||
* 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
|
||||
* larger physical memory designs better.
|
||||
*/
|
||||
#define VMALLOC_END 0xf0000000
|
||||
#define VMALLOC_END 0xf0000000UL
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
|
|
@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = {
|
|||
CLK(NULL, "uart1", &uart1_clk),
|
||||
CLK(NULL, "uart2", &uart2_clk),
|
||||
CLK("i2c_davinci.1", NULL, &i2c_clk),
|
||||
CLK("davinci-asp.0", NULL, &asp0_clk),
|
||||
CLK("davinci-asp.1", NULL, &asp1_clk),
|
||||
CLK("davinci-mcbsp.0", NULL, &asp0_clk),
|
||||
CLK("davinci-mcbsp.1", NULL, &asp1_clk),
|
||||
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
|
||||
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
|
||||
CLK("spi_davinci.0", NULL, &spi0_clk),
|
||||
|
@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = {
|
|||
};
|
||||
|
||||
static struct platform_device dm355_asp1_device = {
|
||||
.name = "davinci-asp",
|
||||
.name = "davinci-mcbsp",
|
||||
.id = 1,
|
||||
.num_resources = ARRAY_SIZE(dm355_asp1_resources),
|
||||
.resource = dm355_asp1_resources,
|
||||
|
|
|
@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = {
|
|||
CLK(NULL, "usb", &usb_clk),
|
||||
CLK("davinci_emac.1", NULL, &emac_clk),
|
||||
CLK("davinci_voicecodec", NULL, &voicecodec_clk),
|
||||
CLK("davinci-asp.0", NULL, &asp0_clk),
|
||||
CLK("davinci-mcbsp", NULL, &asp0_clk),
|
||||
CLK(NULL, "rto", &rto_clk),
|
||||
CLK(NULL, "mjcp", &mjcp_clk),
|
||||
CLK(NULL, NULL, NULL),
|
||||
|
@ -922,8 +922,8 @@ static struct resource dm365_asp_resources[] = {
|
|||
};
|
||||
|
||||
static struct platform_device dm365_asp_device = {
|
||||
.name = "davinci-asp",
|
||||
.id = 0,
|
||||
.name = "davinci-mcbsp",
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(dm365_asp_resources),
|
||||
.resource = dm365_asp_resources,
|
||||
};
|
||||
|
|
|
@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = {
|
|||
CLK("davinci_emac.1", NULL, &emac_clk),
|
||||
CLK("i2c_davinci.1", NULL, &i2c_clk),
|
||||
CLK("palm_bk3710", NULL, &ide_clk),
|
||||
CLK("davinci-asp", NULL, &asp_clk),
|
||||
CLK("davinci-mcbsp", NULL, &asp_clk),
|
||||
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
|
||||
CLK(NULL, "spi", &spi_clk),
|
||||
CLK(NULL, "gpio", &gpio_clk),
|
||||
|
@ -580,7 +580,7 @@ static struct resource dm644x_asp_resources[] = {
|
|||
};
|
||||
|
||||
static struct platform_device dm644x_asp_device = {
|
||||
.name = "davinci-asp",
|
||||
.name = "davinci-mcbsp",
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(dm644x_asp_resources),
|
||||
.resource = dm644x_asp_resources,
|
||||
|
|
|
@ -7,4 +7,4 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#define VMALLOC_END 0xdf000000
|
||||
#define VMALLOC_END 0xdf000000UL
|
||||
|
|
|
@ -7,4 +7,4 @@
|
|||
*/
|
||||
|
||||
|
||||
#define VMALLOC_END 0xf0000000
|
||||
#define VMALLOC_END 0xf0000000UL
|
||||
|
|
|
@ -5,6 +5,6 @@
|
|||
#ifndef __ARCH_ARM_VMALLOC_H
|
||||
#define __ARCH_ARM_VMALLOC_H
|
||||
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
||||
#endif
|
||||
|
|
|
@ -79,7 +79,7 @@ imx25_sdhci_esdhc_imx_data[] __initconst;
|
|||
#define imx25_add_sdhci_esdhc_imx(id, pdata) \
|
||||
imx_add_sdhci_esdhc_imx(&imx25_sdhci_esdhc_imx_data[id], pdata)
|
||||
|
||||
extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst;
|
||||
extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst;
|
||||
#define imx25_add_spi_imx(id, pdata) \
|
||||
imx_add_spi_imx(&imx25_spi_imx_data[id], pdata)
|
||||
#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata)
|
||||
|
|
|
@ -247,9 +247,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
|
|||
.flags = IMXUART_HAVE_RTSCTS,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
|
||||
|| defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
|
||||
|
||||
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
|
||||
|
||||
static void ads7846_dev_init(void)
|
||||
|
@ -270,9 +267,7 @@ static struct ads7846_platform_data ads7846_config __initdata = {
|
|||
.get_pendown_state = ads7846_get_pendown_state,
|
||||
.keep_vref_on = 1,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
|
||||
static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
|
||||
[0] = {
|
||||
.modalias = "ads7846",
|
||||
|
@ -291,7 +286,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
|
|||
.chipselect = eukrea_mbimx27_spi_cs,
|
||||
.num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
|
||||
{
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#ifndef __ASM_ARCH_MSM_VMALLOC_H
|
||||
#define __ASM_ARCH_MSM_VMALLOC_H
|
||||
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <mach/common.h>
|
||||
#include <mach/iomux-mx3.h>
|
||||
#include <mach/spi.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
|
@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = {
|
|||
};
|
||||
|
||||
/* Platform Data for MXC CSPI */
|
||||
#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
|
||||
static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
|
||||
|
||||
static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
|
||||
.chipselect = pcm037_spi1_cs,
|
||||
.num_chipselect = ARRAY_SIZE(pcm037_spi1_cs),
|
||||
};
|
||||
#endif
|
||||
|
||||
/* GPIO-keys input device */
|
||||
static struct gpio_keys_button pcm037_gpio_keys[] = {
|
||||
|
|
|
@ -16,4 +16,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xd8000000
|
||||
#define VMALLOC_END 0xd8000000UL
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xf8000000
|
||||
#define VMALLOC_END 0xf8000000UL
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
@ -118,6 +119,10 @@ static void omap2_enter_full_retention(void)
|
|||
if (omap_irq_pending())
|
||||
goto no_sleep;
|
||||
|
||||
/* Block console output in case it is on one of the OMAP UARTs */
|
||||
if (try_acquire_console_sem())
|
||||
goto no_sleep;
|
||||
|
||||
omap_uart_prepare_idle(0);
|
||||
omap_uart_prepare_idle(1);
|
||||
omap_uart_prepare_idle(2);
|
||||
|
@ -131,6 +136,8 @@ static void omap2_enter_full_retention(void)
|
|||
omap_uart_resume_idle(1);
|
||||
omap_uart_resume_idle(0);
|
||||
|
||||
release_console_sem();
|
||||
|
||||
no_sleep:
|
||||
if (omap2_pm_debug) {
|
||||
unsigned long long tmp;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <plat/sram.h>
|
||||
#include <plat/clockdomain.h>
|
||||
|
@ -385,6 +386,12 @@ void omap_sram_idle(void)
|
|||
omap3_enable_io_chain();
|
||||
}
|
||||
|
||||
/* Block console output in case it is on one of the OMAP UARTs */
|
||||
if (per_next_state < PWRDM_POWER_ON ||
|
||||
core_next_state < PWRDM_POWER_ON)
|
||||
if (try_acquire_console_sem())
|
||||
goto console_still_active;
|
||||
|
||||
/* PER */
|
||||
if (per_next_state < PWRDM_POWER_ON) {
|
||||
omap_uart_prepare_idle(2);
|
||||
|
@ -463,6 +470,9 @@ void omap_sram_idle(void)
|
|||
omap_uart_resume_idle(3);
|
||||
}
|
||||
|
||||
release_console_sem();
|
||||
|
||||
console_still_active:
|
||||
/* Disable IO-PAD and IO-CHAIN wakeup */
|
||||
if (omap3_has_io_wakeup() &&
|
||||
(per_next_state < PWRDM_POWER_ON ||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#ifdef CONFIG_SERIAL_OMAP
|
||||
#include <plat/omap-serial.h>
|
||||
|
@ -406,7 +407,7 @@ void omap_uart_resume_idle(int num)
|
|||
struct omap_uart_state *uart;
|
||||
|
||||
list_for_each_entry(uart, &uart_list, node) {
|
||||
if (num == uart->num) {
|
||||
if (num == uart->num && uart->can_sleep) {
|
||||
omap_uart_enable_clocks(uart);
|
||||
|
||||
/* Check for IO pad wakeup */
|
||||
|
@ -807,6 +808,8 @@ void __init omap_serial_init_port(int port)
|
|||
|
||||
oh->dev_attr = uart;
|
||||
|
||||
acquire_console_sem(); /* in case the earlycon is on the UART */
|
||||
|
||||
/*
|
||||
* Because of early UART probing, UART did not get idled
|
||||
* on init. Now that omap_device is ready, ensure full idle
|
||||
|
@ -831,6 +834,8 @@ void __init omap_serial_init_port(int port)
|
|||
omap_uart_block_sleep(uart);
|
||||
uart->timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
release_console_sem();
|
||||
|
||||
if ((cpu_is_omap34xx() && uart->padconf) ||
|
||||
(uart->wk_en && uart->wk_mask)) {
|
||||
device_init_wakeup(&od->pdev.dev, true);
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
||||
* area for the same reason. ;)
|
||||
*/
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
|
|
@ -7,4 +7,4 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#define VMALLOC_END 0xdc000000
|
||||
#define VMALLOC_END 0xdc000000UL
|
||||
|
|
|
@ -77,13 +77,13 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
|
|||
|
||||
/* Configures BT serial port GPIOs */
|
||||
s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
|
||||
s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
|
||||
s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
|
||||
s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
|
||||
s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
|
||||
|
||||
|
||||
rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
|
||||
|
|
|
@ -168,12 +168,11 @@ static struct irq_chip s3c2416_irq_dma = {
|
|||
|
||||
static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
s3c2416_irq_demux(IRQ_S3C2443_UART3, 3);
|
||||
s3c2416_irq_demux(IRQ_S3C2443_RX3, 3);
|
||||
}
|
||||
|
||||
#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
|
||||
#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
|
||||
|
||||
#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
|
||||
|
||||
static void s3c2416_irq_uart3_mask(unsigned int irqno)
|
||||
{
|
||||
|
|
|
@ -166,12 +166,11 @@ static struct irq_chip s3c2443_irq_dma = {
|
|||
|
||||
static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
s3c2443_irq_demux(IRQ_S3C2443_UART3, 3);
|
||||
s3c2443_irq_demux(IRQ_S3C2443_RX3, 3);
|
||||
}
|
||||
|
||||
#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
|
||||
#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
|
||||
|
||||
#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
|
||||
|
||||
static void s3c2443_irq_uart3_mask(unsigned int irqno)
|
||||
{
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
|
||||
#include <video/platform_lcd.h>
|
||||
|
||||
#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
|
||||
#define UCON S3C2410_UCON_DEFAULT
|
||||
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
|
||||
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
#include <video/platform_lcd.h>
|
||||
|
||||
#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
|
||||
#define UCON S3C2410_UCON_DEFAULT
|
||||
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
|
||||
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/sysdev.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/sysdev.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/*
|
||||
* arch/arm/mach-shark/include/mach/vmalloc.h
|
||||
*/
|
||||
#define VMALLOC_END 0xd0000000
|
||||
#define VMALLOC_END 0xd0000000UL
|
||||
|
|
|
@ -567,38 +567,127 @@ static struct platform_device *qhd_devices[] __initdata = {
|
|||
|
||||
/* FSI */
|
||||
#define IRQ_FSI evt2irq(0x1840)
|
||||
static int __fsi_set_rate(struct clk *clk, long rate, int enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
static int fsi_set_rate(int is_porta, int rate)
|
||||
if (rate <= 0)
|
||||
return ret;
|
||||
|
||||
if (enable) {
|
||||
ret = clk_set_rate(clk, rate);
|
||||
if (0 == ret)
|
||||
ret = clk_enable(clk);
|
||||
} else {
|
||||
clk_disable(clk);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
|
||||
{
|
||||
return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable);
|
||||
}
|
||||
|
||||
static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable)
|
||||
{
|
||||
struct clk *fsia_ick;
|
||||
struct clk *fsiack;
|
||||
int ret = -EIO;
|
||||
|
||||
fsia_ick = clk_get(dev, "icka");
|
||||
if (IS_ERR(fsia_ick))
|
||||
return PTR_ERR(fsia_ick);
|
||||
|
||||
/*
|
||||
* FSIACK is connected to AK4642,
|
||||
* and use external clock pin from it.
|
||||
* it is parent of fsia_ick now.
|
||||
*/
|
||||
fsiack = clk_get_parent(fsia_ick);
|
||||
if (!fsiack)
|
||||
goto fsia_ick_out;
|
||||
|
||||
/*
|
||||
* we get 1/1 divided clock by setting same rate to fsiack and fsia_ick
|
||||
*
|
||||
** FIXME **
|
||||
* Because the freq_table of external clk (fsiack) are all 0,
|
||||
* the return value of clk_round_rate became 0.
|
||||
* So, it use __fsi_set_rate here.
|
||||
*/
|
||||
ret = __fsi_set_rate(fsiack, rate, enable);
|
||||
if (ret < 0)
|
||||
goto fsiack_out;
|
||||
|
||||
ret = __fsi_set_round_rate(fsia_ick, rate, enable);
|
||||
if ((ret < 0) && enable)
|
||||
__fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */
|
||||
|
||||
fsiack_out:
|
||||
clk_put(fsiack);
|
||||
|
||||
fsia_ick_out:
|
||||
clk_put(fsia_ick);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
|
||||
{
|
||||
struct clk *fsib_clk;
|
||||
struct clk *fdiv_clk = &sh7372_fsidivb_clk;
|
||||
long fsib_rate = 0;
|
||||
long fdiv_rate = 0;
|
||||
int ackmd_bpfmd;
|
||||
int ret;
|
||||
|
||||
/* set_rate is not needed if port A */
|
||||
if (is_porta)
|
||||
return 0;
|
||||
|
||||
fsib_clk = clk_get(NULL, "fsib_clk");
|
||||
if (IS_ERR(fsib_clk))
|
||||
return -EINVAL;
|
||||
|
||||
switch (rate) {
|
||||
case 44100:
|
||||
clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000));
|
||||
ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
|
||||
fsib_rate = rate * 256;
|
||||
ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
|
||||
break;
|
||||
case 48000:
|
||||
clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
|
||||
clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
|
||||
ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
|
||||
fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
|
||||
fdiv_rate = rate * 256;
|
||||
ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
|
||||
break;
|
||||
default:
|
||||
pr_err("unsupported rate in FSI2 port B\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FSI B setting */
|
||||
fsib_clk = clk_get(dev, "ickb");
|
||||
if (IS_ERR(fsib_clk))
|
||||
return -EIO;
|
||||
|
||||
ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
|
||||
clk_put(fsib_clk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* FSI DIV setting */
|
||||
ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
|
||||
if (ret < 0) {
|
||||
/* disable FSI B */
|
||||
if (enable)
|
||||
__fsi_set_round_rate(fsib_clk, fsib_rate, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ackmd_bpfmd;
|
||||
}
|
||||
|
||||
static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (is_porta)
|
||||
ret = fsi_ak4642_set_rate(dev, rate, enable);
|
||||
else
|
||||
ret = fsi_hdmi_set_rate(dev, rate, enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -880,6 +969,11 @@ static int __init hdmi_init_pm_clock(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = clk_enable(&sh7372_pllc2_clk);
|
||||
if (ret < 0) {
|
||||
pr_err("Cannot enable pllc2 clock\n");
|
||||
goto out;
|
||||
}
|
||||
pr_debug("PLLC2 set frequency %lu\n", rate);
|
||||
|
||||
ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
|
||||
|
@ -896,23 +990,11 @@ out:
|
|||
|
||||
device_initcall(hdmi_init_pm_clock);
|
||||
|
||||
#define FSIACK_DUMMY_RATE 48000
|
||||
static int __init fsi_init_pm_clock(void)
|
||||
{
|
||||
struct clk *fsia_ick;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* FSIACK is connected to AK4642,
|
||||
* and the rate is depend on playing sound rate.
|
||||
* So, set dummy rate (= 48k) here
|
||||
*/
|
||||
ret = clk_set_rate(&sh7372_fsiack_clk, FSIACK_DUMMY_RATE);
|
||||
if (ret < 0) {
|
||||
pr_err("Cannot set FSIACK dummy rate: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
fsia_ick = clk_get(&fsi_device.dev, "icka");
|
||||
if (IS_ERR(fsia_ick)) {
|
||||
ret = PTR_ERR(fsia_ick);
|
||||
|
@ -921,16 +1003,9 @@ static int __init fsi_init_pm_clock(void)
|
|||
}
|
||||
|
||||
ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
|
||||
if (ret < 0) {
|
||||
pr_err("Cannot set FSI-A parent: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = clk_set_rate(fsia_ick, FSIACK_DUMMY_RATE);
|
||||
if (ret < 0)
|
||||
pr_err("Cannot set FSI-A rate: %d\n", ret);
|
||||
pr_err("Cannot set FSI-A parent: %d\n", ret);
|
||||
|
||||
out:
|
||||
clk_put(fsia_ick);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -220,8 +220,7 @@ static void pllc2_disable(struct clk *clk)
|
|||
__raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR);
|
||||
}
|
||||
|
||||
static int pllc2_set_rate(struct clk *clk,
|
||||
unsigned long rate, int algo_id)
|
||||
static int pllc2_set_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned long value;
|
||||
int idx;
|
||||
|
@ -230,21 +229,13 @@ static int pllc2_set_rate(struct clk *clk,
|
|||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
if (rate == clk->parent->rate) {
|
||||
pllc2_disable(clk);
|
||||
return 0;
|
||||
}
|
||||
if (rate == clk->parent->rate)
|
||||
return -EINVAL;
|
||||
|
||||
value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
|
||||
|
||||
if (value & 0x80000000)
|
||||
pllc2_disable(clk);
|
||||
|
||||
__raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
|
||||
|
||||
if (value & 0x80000000)
|
||||
return pllc2_enable(clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -453,32 +444,24 @@ static int fsidiv_enable(struct clk *clk)
|
|||
unsigned long value;
|
||||
|
||||
value = __raw_readl(clk->mapping->base) >> 16;
|
||||
if (value < 2) {
|
||||
fsidiv_disable(clk);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (value < 2)
|
||||
return -EIO;
|
||||
|
||||
__raw_writel((value << 16) | 0x3, clk->mapping->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsidiv_set_rate(struct clk *clk,
|
||||
unsigned long rate, int algo_id)
|
||||
static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (clk->parent->rate == rate) {
|
||||
fsidiv_disable(clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
idx = (clk->parent->rate / rate) & 0xffff;
|
||||
if (idx < 2)
|
||||
return -ENOENT;
|
||||
return -EINVAL;
|
||||
|
||||
__raw_writel(idx << 16, clk->mapping->base);
|
||||
return fsidiv_enable(clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct clk_ops fsidiv_clk_ops = {
|
||||
|
@ -609,8 +592,6 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
|
||||
CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
|
||||
CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
|
||||
CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FSIA]),
|
||||
CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FSIB]),
|
||||
CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
|
||||
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
|
||||
CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
|
||||
|
@ -647,8 +628,8 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
|
||||
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
|
||||
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
|
||||
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP323]), /* USB0 */
|
||||
CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP323]), /* USB0 */
|
||||
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
|
||||
CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
|
||||
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
|
||||
|
|
|
@ -75,14 +75,14 @@ void __init ux500_init_irq(void)
|
|||
static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
|
||||
{
|
||||
/* wait for the operation to complete */
|
||||
while (readl(reg) & mask)
|
||||
while (readl_relaxed(reg) & mask)
|
||||
;
|
||||
}
|
||||
|
||||
static inline void ux500_cache_sync(void)
|
||||
{
|
||||
void __iomem *base = __io_address(UX500_L2CC_BASE);
|
||||
writel(0, base + L2X0_CACHE_SYNC);
|
||||
writel_relaxed(0, base + L2X0_CACHE_SYNC);
|
||||
ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ static void ux500_l2x0_inv_all(void)
|
|||
uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
|
||||
|
||||
/* invalidate all ways */
|
||||
writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
||||
ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
|
||||
ux500_cache_sync();
|
||||
}
|
||||
|
|
|
@ -18,4 +18,4 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#define VMALLOC_END 0xd8000000
|
||||
#define VMALLOC_END 0xd8000000UL
|
||||
|
|
|
@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|||
*/
|
||||
if (pfn_valid(pfn)) {
|
||||
printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
|
||||
KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
|
||||
KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
|
||||
"to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
|
||||
"will fail in the next kernel release. Please fix your driver.\n");
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,15 +12,7 @@
|
|||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/devices-common.h>
|
||||
#ifdef SDMA_IS_MERGED
|
||||
#include <mach/sdma.h>
|
||||
#else
|
||||
struct sdma_platform_data {
|
||||
int sdma_version;
|
||||
char *cpu_name;
|
||||
int to_version;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct imx_imx_sdma_data {
|
||||
resource_size_t iobase;
|
||||
|
|
|
@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
|
|||
imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K)
|
||||
imx21_cspi_data_entry(0, 1),
|
||||
imx21_cspi_data_entry(1, 2),
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOC_IMX25
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
*
|
||||
* Copyright (C) 2008 STMicroelectronics
|
||||
* Copyright (C) 2010 Alessandro Rubini
|
||||
* Copyright (C) 2010 Linus Walleij for ST-Ericsson
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2, as
|
||||
|
@ -16,11 +17,13 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/cnt32_to_63.h>
|
||||
#include <linux/timer.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
#include <plat/mtu.h>
|
||||
|
||||
void __iomem *mtu_base; /* ssigned by machine code */
|
||||
void __iomem *mtu_base; /* Assigned by machine code */
|
||||
|
||||
/*
|
||||
* Kernel assumes that sched_clock can be called early
|
||||
|
@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = {
|
|||
/*
|
||||
* Override the global weak sched_clock symbol with this
|
||||
* local implementation which uses the clocksource to get some
|
||||
* better resolution when scheduling the kernel. We accept that
|
||||
* this wraps around for now, since it is just a relative time
|
||||
* stamp. (Inspired by OMAP implementation.)
|
||||
* better resolution when scheduling the kernel.
|
||||
*
|
||||
* Because the hardware timer period may be quite short
|
||||
* (32.3 secs on the 133 MHz MTU timer selection on ux500)
|
||||
* and because cnt32_to_63() needs to be called at least once per
|
||||
* half period to work properly, a kernel keepwarm() timer is set up
|
||||
* to ensure this requirement is always met.
|
||||
*
|
||||
* Also the sched_clock timer will wrap around at some point,
|
||||
* here we set it to run continously for a year.
|
||||
*/
|
||||
#define SCHED_CLOCK_MIN_WRAP 3600*24*365
|
||||
static struct timer_list cnt32_to_63_keepwarm_timer;
|
||||
static u32 sched_mult;
|
||||
static u32 sched_shift;
|
||||
|
||||
unsigned long long notrace sched_clock(void)
|
||||
{
|
||||
return clocksource_cyc2ns(nmdk_clksrc.read(
|
||||
&nmdk_clksrc),
|
||||
nmdk_clksrc.mult,
|
||||
nmdk_clksrc.shift);
|
||||
u64 cycles;
|
||||
|
||||
if (unlikely(!mtu_base))
|
||||
return 0;
|
||||
|
||||
cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
|
||||
/*
|
||||
* sched_mult is guaranteed to be even so will
|
||||
* shift out bit 63
|
||||
*/
|
||||
return (cycles * sched_mult) >> sched_shift;
|
||||
}
|
||||
|
||||
/* Just kick sched_clock every so often */
|
||||
static void cnt32_to_63_keepwarm(unsigned long data)
|
||||
{
|
||||
mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
|
||||
(void) sched_clock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
|
||||
* once in half a 32bit timer wrap interval.
|
||||
*/
|
||||
static void __init nmdk_sched_clock_init(unsigned long rate)
|
||||
{
|
||||
u32 v;
|
||||
unsigned long delta;
|
||||
u64 days;
|
||||
|
||||
/* Find the apropriate mult and shift factors */
|
||||
clocks_calc_mult_shift(&sched_mult, &sched_shift,
|
||||
rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
|
||||
/* We need to multiply by an even number to get rid of bit 63 */
|
||||
if (sched_mult & 1)
|
||||
sched_mult++;
|
||||
|
||||
/* Let's see what we get, take max counter and scale it */
|
||||
days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
|
||||
do_div(days, NSEC_PER_SEC);
|
||||
do_div(days, (3600*24));
|
||||
|
||||
pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
|
||||
(64 - sched_shift), rate, (unsigned long) days);
|
||||
|
||||
/*
|
||||
* Program a timer to kick us at half 32bit wraparound
|
||||
* Formula: seconds per wrap = (2^32) / f
|
||||
*/
|
||||
v = 0xFFFFFFFFUL / rate;
|
||||
/* We want half of the wrap time to keep cnt32_to_63 warm */
|
||||
v /= 2;
|
||||
pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
|
||||
"initialize keepwarm timer every %d seconds\n", rate, v);
|
||||
/* Convert seconds to jiffies */
|
||||
delta = msecs_to_jiffies(v*1000);
|
||||
setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
|
||||
mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
|
||||
}
|
||||
|
||||
/* Clockevent device: use one-shot mode */
|
||||
|
@ -161,13 +230,15 @@ void __init nmdk_timer_init(void)
|
|||
writel(0, mtu_base + MTU_BGLR(0));
|
||||
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
|
||||
|
||||
/* Now the scheduling clock is ready */
|
||||
/* Now the clock source is ready */
|
||||
nmdk_clksrc.read = nmdk_read_timer;
|
||||
|
||||
if (clocksource_register(&nmdk_clksrc))
|
||||
pr_err("timer: failed to initialize clock source %s\n",
|
||||
nmdk_clksrc.name);
|
||||
|
||||
nmdk_sched_clock_init(rate);
|
||||
|
||||
/* Timer 1 is used for events */
|
||||
|
||||
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
/* Require clock free running */
|
||||
#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
|
||||
|
||||
/* Board design supports 8-bit data on SD/SDIO BUS */
|
||||
#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
|
||||
|
||||
/*
|
||||
* struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
|
||||
* @max_speed: the maximum speed supported
|
||||
|
|
|
@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi,
|
|||
} else {
|
||||
s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi,
|
|||
} else {
|
||||
s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
|
|||
} else {
|
||||
s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
|
||||
s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
|
||||
s3c_gpio_setpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ static void pte_free_rcu_callback(struct rcu_head *head)
|
|||
|
||||
static void pte_free_submit(struct pte_freelist_batch *batch)
|
||||
{
|
||||
call_rcu(&batch->rcu, pte_free_rcu_callback);
|
||||
call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
|
||||
}
|
||||
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
|
||||
|
|
|
@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
|
|||
static int notrace s390_revalidate_registers(struct mci *mci)
|
||||
{
|
||||
int kill_task;
|
||||
u64 tmpclock;
|
||||
u64 zero;
|
||||
void *fpt_save_area, *fpt_creg_save_area;
|
||||
|
||||
|
@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
|
|||
: "0", "cc");
|
||||
#endif
|
||||
/* Revalidate clock comparator register */
|
||||
asm volatile(
|
||||
" stck 0(%1)\n"
|
||||
" sckc 0(%1)"
|
||||
: "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
|
||||
|
||||
if (S390_lowcore.clock_comparator == -1)
|
||||
set_clock_comparator(S390_lowcore.mcck_clock);
|
||||
else
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
/* Check if old PSW is valid */
|
||||
if (!mci->wp)
|
||||
/*
|
||||
|
|
|
@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs)
|
|||
{
|
||||
unsigned long mask, cr0, cr0_saved;
|
||||
u64 clock_saved;
|
||||
u64 end;
|
||||
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
|
||||
end = get_clock() + (usecs << 12);
|
||||
clock_saved = local_tick_disable();
|
||||
set_clock_comparator(get_clock() + (usecs << 12));
|
||||
__ctl_store(cr0_saved, 0, 0);
|
||||
cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
|
||||
__ctl_load(cr0 , 0, 0);
|
||||
mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
|
||||
lockdep_off();
|
||||
do {
|
||||
set_clock_comparator(end);
|
||||
trace_hardirqs_on();
|
||||
__load_psw_mask(mask);
|
||||
local_irq_disable();
|
||||
} while (get_clock() < end);
|
||||
lockdep_on();
|
||||
__ctl_load(cr0_saved, 0, 0);
|
||||
local_tick_enable(clock_saved);
|
||||
|
|
|
@ -199,10 +199,13 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define ARCH_HAS_PREFETCHW
|
||||
static inline void prefetch(void *x)
|
||||
{
|
||||
__asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory");
|
||||
__builtin_prefetch(x, 0, 3);
|
||||
}
|
||||
|
||||
#define prefetchw(x) prefetch(x)
|
||||
static inline void prefetchw(void *x)
|
||||
{
|
||||
__builtin_prefetch(x, 1, 3);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
|
||||
static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned long frqcr3;
|
||||
unsigned int tmp;
|
||||
|
|
|
@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
|
|||
}
|
||||
|
||||
if (op & CACHEFLUSH_I)
|
||||
flush_cache_all();
|
||||
flush_icache_range(addr, addr+len);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return 0;
|
||||
|
|
|
@ -8,9 +8,9 @@ __kernel_vsyscall:
|
|||
* fill out .eh_frame -- PFM. */
|
||||
.LEND_vsyscall:
|
||||
.size __kernel_vsyscall,.-.LSTART_vsyscall
|
||||
.previous
|
||||
|
||||
.section .eh_frame,"a",@progbits
|
||||
.previous
|
||||
.LCIE:
|
||||
.ualong .LCIE_end - .LCIE_start
|
||||
.LCIE_start:
|
||||
|
|
|
@ -329,6 +329,18 @@ endmenu # Tilera-specific configuration
|
|||
|
||||
menu "Bus options"
|
||||
|
||||
config PCI
|
||||
bool "PCI support"
|
||||
default y
|
||||
select PCI_DOMAINS
|
||||
---help---
|
||||
Enable PCI root complex support, so PCIe endpoint devices can
|
||||
be attached to the Tile chip. Many, but not all, PCI devices
|
||||
are supported under Tilera's root complex driver.
|
||||
|
||||
config PCI_DOMAINS
|
||||
bool
|
||||
|
||||
config NO_IOMEM
|
||||
def_bool !PCI
|
||||
|
||||
|
|
|
@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size)
|
|||
mb_incoherent();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush & invalidate a VA range that is homed remotely on a single core,
|
||||
* waiting until the memory controller holds the flushed values.
|
||||
*/
|
||||
static inline void finv_buffer_remote(void *buffer, size_t size)
|
||||
{
|
||||
char *p;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Flush and invalidate the buffer out of the local L1/L2
|
||||
* and request the home cache to flush and invalidate as well.
|
||||
*/
|
||||
__finv_buffer(buffer, size);
|
||||
|
||||
/*
|
||||
* Wait for the home cache to acknowledge that it has processed
|
||||
* all the flush-and-invalidate requests. This does not mean
|
||||
* that the flushed data has reached the memory controller yet,
|
||||
* but it does mean the home cache is processing the flushes.
|
||||
*/
|
||||
__insn_mf();
|
||||
|
||||
/*
|
||||
* Issue a load to the last cache line, which can't complete
|
||||
* until all the previously-issued flushes to the same memory
|
||||
* controller have also completed. If we weren't striping
|
||||
* memory, that one load would be sufficient, but since we may
|
||||
* be, we also need to back up to the last load issued to
|
||||
* another memory controller, which would be the point where
|
||||
* we crossed an 8KB boundary (the granularity of striping
|
||||
* across memory controllers). Keep backing up and doing this
|
||||
* until we are before the beginning of the buffer, or have
|
||||
* hit all the controllers.
|
||||
*/
|
||||
for (i = 0, p = (char *)buffer + size - 1;
|
||||
i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
|
||||
++i) {
|
||||
const unsigned long STRIPE_WIDTH = 8192;
|
||||
|
||||
/* Force a load instruction to issue. */
|
||||
*(volatile char *)p;
|
||||
|
||||
/* Jump to end of previous stripe. */
|
||||
p -= STRIPE_WIDTH;
|
||||
p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
|
||||
}
|
||||
|
||||
/* Wait for the loads (and thus flushes) to have completed. */
|
||||
__insn_mf();
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_CACHEFLUSH_H */
|
||||
|
|
|
@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr);
|
|||
#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
|
||||
#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
|
||||
|
||||
void __iomem *ioport_map(unsigned long port, unsigned int len);
|
||||
extern inline void ioport_unmap(void __iomem *addr) {}
|
||||
|
||||
#define mmiowb()
|
||||
|
||||
/* Conversion between virtual and physical mappings. */
|
||||
|
@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
|
|||
* we never run, uses them unconditionally.
|
||||
*/
|
||||
|
||||
static inline int ioport_panic(void)
|
||||
static inline long ioport_panic(void)
|
||||
{
|
||||
panic("inb/outb and friends do not exist on tile");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
|
||||
{
|
||||
return (void __iomem *) ioport_panic();
|
||||
}
|
||||
|
||||
static inline void ioport_unmap(void __iomem *addr)
|
||||
{
|
||||
ioport_panic();
|
||||
}
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return ioport_panic();
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_PCI_BRIDGE_H
|
||||
#define _ASM_TILE_PCI_BRIDGE_H
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
struct device_node;
|
||||
struct pci_controller;
|
||||
|
||||
/*
|
||||
* pci_io_base returns the memory address at which you can access
|
||||
* the I/O space for PCI bus number `bus' (or NULL on error).
|
||||
*/
|
||||
extern void __iomem *pci_bus_io_base(unsigned int bus);
|
||||
extern unsigned long pci_bus_io_base_phys(unsigned int bus);
|
||||
extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
|
||||
|
||||
/* Allocate a new PCI host bridge structure */
|
||||
extern struct pci_controller *pcibios_alloc_controller(void);
|
||||
|
||||
/* Helper function for setting up resources */
|
||||
extern void pci_init_resource(struct resource *res, unsigned long start,
|
||||
unsigned long end, int flags, char *name);
|
||||
|
||||
/* Get the PCI host controller for a bus */
|
||||
extern struct pci_controller *pci_bus_to_hose(int bus);
|
||||
|
||||
/*
|
||||
* Structure of a PCI controller (host bridge)
|
||||
*/
|
||||
struct pci_controller {
|
||||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
int first_busno;
|
||||
int last_busno;
|
||||
|
||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
||||
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
|
||||
|
||||
struct pci_ops *ops;
|
||||
|
||||
int irq_base; /* Base IRQ from the Hypervisor */
|
||||
int plx_gen1; /* flag for PLX Gen 1 configuration */
|
||||
|
||||
/* Address ranges that are routed to this controller/bridge. */
|
||||
struct resource mem_resources[3];
|
||||
};
|
||||
|
||||
static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
|
||||
{
|
||||
return bus->sysdata;
|
||||
}
|
||||
|
||||
extern void setup_indirect_pci_nomap(struct pci_controller *hose,
|
||||
void __iomem *cfg_addr, void __iomem *cfg_data);
|
||||
extern void setup_indirect_pci(struct pci_controller *hose,
|
||||
u32 cfg_addr, u32 cfg_data);
|
||||
extern void setup_grackle(struct pci_controller *hose);
|
||||
|
||||
extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
|
||||
|
||||
/*
|
||||
* The following code swizzles for exactly one bridge. The routine
|
||||
* common_swizzle below handles multiple bridges. But there are a
|
||||
* some boards that don't follow the PCI spec's suggestion so we
|
||||
* break this piece out separately.
|
||||
*/
|
||||
static inline unsigned char bridge_swizzle(unsigned char pin,
|
||||
unsigned char idsel)
|
||||
{
|
||||
return (((pin-1) + idsel) % 4) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following macro is used to lookup irqs in a standard table
|
||||
* format for those PPC systems that do not already have PCI
|
||||
* interrupts properly routed.
|
||||
*/
|
||||
/* FIXME - double check this */
|
||||
#define PCI_IRQ_TABLE_LOOKUP ({ \
|
||||
long _ctl_ = -1; \
|
||||
if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
|
||||
_ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
|
||||
_ctl_; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Scan the buses below a given PCI host bridge and assign suitable
|
||||
* resources to all devices found.
|
||||
*/
|
||||
extern int pciauto_bus_scan(struct pci_controller *, int);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned long pci_address_to_pio(phys_addr_t address);
|
||||
#else
|
||||
static inline unsigned long pci_address_to_pio(phys_addr_t address)
|
||||
{
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_PCI_BRIDGE_H */
|
|
@ -15,7 +15,29 @@
|
|||
#ifndef _ASM_TILE_PCI_H
|
||||
#define _ASM_TILE_PCI_H
|
||||
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
/*
|
||||
* Structure of a PCI controller (host bridge)
|
||||
*/
|
||||
struct pci_controller {
|
||||
int index; /* PCI domain number */
|
||||
struct pci_bus *root_bus;
|
||||
|
||||
int first_busno;
|
||||
int last_busno;
|
||||
|
||||
int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
|
||||
int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
|
||||
|
||||
struct pci_ops *ops;
|
||||
|
||||
int irq_base; /* Base IRQ from the Hypervisor */
|
||||
int plx_gen1; /* flag for PLX Gen 1 configuration */
|
||||
|
||||
/* Address ranges that are routed to this controller/bridge. */
|
||||
struct resource mem_resources[3];
|
||||
};
|
||||
|
||||
/*
|
||||
* The hypervisor maps the entirety of CPA-space as bus addresses, so
|
||||
|
@ -24,57 +46,13 @@
|
|||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS 1
|
||||
|
||||
struct pci_controller *pci_bus_to_hose(int bus);
|
||||
unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp);
|
||||
int __init tile_pci_init(void);
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
|
||||
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
|
||||
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
|
||||
|
||||
void __devinit pcibios_fixup_bus(struct pci_bus *bus);
|
||||
|
||||
int __devinit _tile_cfg_read(struct pci_controller *hose,
|
||||
int bus,
|
||||
int slot,
|
||||
int function,
|
||||
int offset,
|
||||
int size,
|
||||
u32 *val);
|
||||
int __devinit _tile_cfg_write(struct pci_controller *hose,
|
||||
int bus,
|
||||
int slot,
|
||||
int function,
|
||||
int offset,
|
||||
int size,
|
||||
u32 val);
|
||||
|
||||
/*
|
||||
* These are used to to config reads and writes in the early stages of
|
||||
* setup before the driver infrastructure has been set up enough to be
|
||||
* able to do config reads and writes.
|
||||
*/
|
||||
#define early_cfg_read(where, size, value) \
|
||||
_tile_cfg_read(controller, \
|
||||
current_bus, \
|
||||
pci_slot, \
|
||||
pci_fn, \
|
||||
where, \
|
||||
size, \
|
||||
value)
|
||||
|
||||
#define early_cfg_write(where, size, value) \
|
||||
_tile_cfg_write(controller, \
|
||||
current_bus, \
|
||||
pci_slot, \
|
||||
pci_fn, \
|
||||
where, \
|
||||
size, \
|
||||
value)
|
||||
|
||||
|
||||
|
||||
#define PCICFG_BYTE 1
|
||||
#define PCICFG_WORD 2
|
||||
#define PCICFG_DWORD 4
|
||||
|
||||
#define TILE_NUM_PCIE 2
|
||||
|
||||
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
|
||||
|
@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus)
|
|||
}
|
||||
|
||||
/*
|
||||
* I/O space is currently not supported.
|
||||
* pcibios_assign_all_busses() tells whether or not the bus numbers
|
||||
* should be reassigned, in case the BIOS didn't do it correctly, or
|
||||
* in case we don't have a BIOS and we want to let Linux do it.
|
||||
*/
|
||||
static inline int pcibios_assign_all_busses(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define TILE_PCIE_LOWER_IO 0x0
|
||||
#define TILE_PCIE_UPPER_IO 0x10000
|
||||
#define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF
|
||||
|
||||
#define _PAGE_NO_CACHE 0
|
||||
#define _PAGE_GUARDED 0
|
||||
|
||||
|
||||
#define pcibios_assign_all_busses() pci_assign_all_buses
|
||||
extern int pci_assign_all_buses;
|
||||
|
||||
/*
|
||||
* No special bus mastering setup handling.
|
||||
*/
|
||||
static inline void pcibios_set_master(struct pci_dev *dev)
|
||||
{
|
||||
/* No special bus mastering setup handling */
|
||||
}
|
||||
|
||||
#define PCIBIOS_MIN_MEM 0
|
||||
#define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO
|
||||
#define PCIBIOS_MIN_IO 0
|
||||
|
||||
/*
|
||||
* This flag tells if the platform is TILEmpower that needs
|
||||
* special configuration for the PLX switch chip.
|
||||
*/
|
||||
extern int blade_pci;
|
||||
extern int tile_plx_gen1;
|
||||
|
||||
/* Use any cpu for PCI. */
|
||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
||||
|
||||
/* implement the pci_ DMA API in terms of the generic device dma_ one */
|
||||
#include <asm-generic/pci-dma-compat.h>
|
||||
|
@ -122,7 +100,4 @@ extern int blade_pci;
|
|||
/* generic pci stuff */
|
||||
#include <asm-generic/pci.h>
|
||||
|
||||
/* Use any cpu for PCI. */
|
||||
#define cpumask_of_pcibus(bus) cpu_online_mask
|
||||
|
||||
#endif /* _ASM_TILE_PCI_H */
|
||||
|
|
|
@ -292,8 +292,18 @@ extern int kstack_hash;
|
|||
/* Are we using huge pages in the TLB for kernel data? */
|
||||
extern int kdata_huge;
|
||||
|
||||
/* Support standard Linux prefetching. */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define prefetch(x) __builtin_prefetch(x)
|
||||
#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
|
||||
|
||||
/* Bring a value into the L1D, faulting the TLB if necessary. */
|
||||
#ifdef __tilegx__
|
||||
#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
|
||||
#else
|
||||
#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
|
||||
#endif
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/* Do some slow action (e.g. read a slow SPR). */
|
||||
|
|
|
@ -0,0 +1,300 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file drivers/xgbe/impl.h
|
||||
* Implementation details for the NetIO library.
|
||||
*/
|
||||
|
||||
#ifndef __DRV_XGBE_IMPL_H__
|
||||
#define __DRV_XGBE_IMPL_H__
|
||||
|
||||
#include <hv/netio_errors.h>
|
||||
#include <hv/netio_intf.h>
|
||||
#include <hv/drv_xgbe_intf.h>
|
||||
|
||||
|
||||
/** How many groups we have (log2). */
|
||||
#define LOG2_NUM_GROUPS (12)
|
||||
/** How many groups we have. */
|
||||
#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
|
||||
|
||||
/** Number of output requests we'll buffer per tile. */
|
||||
#define EPP_REQS_PER_TILE (32)
|
||||
|
||||
/** Words used in an eDMA command without checksum acceleration. */
|
||||
#define EDMA_WDS_NO_CSUM 8
|
||||
/** Words used in an eDMA command with checksum acceleration. */
|
||||
#define EDMA_WDS_CSUM 10
|
||||
/** Total available words in the eDMA command FIFO. */
|
||||
#define EDMA_WDS_TOTAL 128
|
||||
|
||||
|
||||
/*
|
||||
* FIXME: These definitions are internal and should have underscores!
|
||||
* NOTE: The actual numeric values here are intentional and allow us to
|
||||
* optimize the concept "if small ... else if large ... else ...", by
|
||||
* checking for the low bit being set, and then for non-zero.
|
||||
* These are used as array indices, so they must have the values (0, 1, 2)
|
||||
* in some order.
|
||||
*/
|
||||
#define SIZE_SMALL (1) /**< Small packet queue. */
|
||||
#define SIZE_LARGE (2) /**< Large packet queue. */
|
||||
#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
|
||||
|
||||
/** The number of "SIZE_xxx" values. */
|
||||
#define NETIO_NUM_SIZES 3
|
||||
|
||||
|
||||
/*
|
||||
* Default numbers of packets for IPP drivers. These values are chosen
|
||||
* such that CIPP1 will not overflow its L2 cache.
|
||||
*/
|
||||
|
||||
/** The default number of small packets. */
|
||||
#define NETIO_DEFAULT_SMALL_PACKETS 2750
|
||||
/** The default number of large packets. */
|
||||
#define NETIO_DEFAULT_LARGE_PACKETS 2500
|
||||
/** The default number of jumbo packets. */
|
||||
#define NETIO_DEFAULT_JUMBO_PACKETS 250
|
||||
|
||||
|
||||
/** Log2 of the size of a memory arena. */
|
||||
#define NETIO_ARENA_SHIFT 24 /* 16 MB */
|
||||
/** Size of a memory arena. */
|
||||
#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
|
||||
|
||||
|
||||
/** A queue of packets.
|
||||
*
|
||||
* This structure partially defines a queue of packets waiting to be
|
||||
* processed. The queue as a whole is written to by an interrupt handler and
|
||||
* read by non-interrupt code; this data structure is what's touched by the
|
||||
* interrupt handler. The other part of the queue state, the read offset, is
|
||||
* kept in user space, not in hypervisor space, so it is in a separate data
|
||||
* structure.
|
||||
*
|
||||
* The read offset (__packet_receive_read in the user part of the queue
|
||||
* structure) points to the next packet to be read. When the read offset is
|
||||
* equal to the write offset, the queue is empty; therefore the queue must
|
||||
* contain one more slot than the required maximum queue size.
|
||||
*
|
||||
* Here's an example of all 3 state variables and what they mean. All
|
||||
* pointers move left to right.
|
||||
*
|
||||
* @code
|
||||
* I I V V V V I I I I
|
||||
* 0 1 2 3 4 5 6 7 8 9 10
|
||||
* ^ ^ ^ ^
|
||||
* | | |
|
||||
* | | __last_packet_plus_one
|
||||
* | __buffer_write
|
||||
* __packet_receive_read
|
||||
* @endcode
|
||||
*
|
||||
* This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
|
||||
* = 10). The read pointer is at 2, and the write pointer is at 6; thus,
|
||||
* there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
|
||||
* slots are invalid (do not contain a packet).
|
||||
*/
|
||||
typedef struct {
|
||||
/** Byte offset of the next notify packet to be written: zero for the first
|
||||
* packet on the queue, sizeof (netio_pkt_t) for the second packet on the
|
||||
* queue, etc. */
|
||||
volatile uint32_t __packet_write;
|
||||
|
||||
/** Offset of the packet after the last valid packet (i.e., when any
|
||||
* pointer is incremented to this value, it wraps back to zero). */
|
||||
uint32_t __last_packet_plus_one;
|
||||
}
|
||||
__netio_packet_queue_t;
|
||||
|
||||
|
||||
/** A queue of buffers.
|
||||
*
|
||||
* This structure partially defines a queue of empty buffers which have been
|
||||
* obtained via requests to the IPP. (The elements of the queue are packet
|
||||
* handles, which are transformed into a full netio_pkt_t when the buffer is
|
||||
* retrieved.) The queue as a whole is written to by an interrupt handler and
|
||||
* read by non-interrupt code; this data structure is what's touched by the
|
||||
* interrupt handler. The other parts of the queue state, the read offset and
|
||||
* requested write offset, are kept in user space, not in hypervisor space, so
|
||||
* they are in a separate data structure.
|
||||
*
|
||||
* The read offset (__buffer_read in the user part of the queue structure)
|
||||
* points to the next buffer to be read. When the read offset is equal to the
|
||||
* write offset, the queue is empty; therefore the queue must contain one more
|
||||
* slot than the required maximum queue size.
|
||||
*
|
||||
* The requested write offset (__buffer_requested_write in the user part of
|
||||
* the queue structure) points to the slot which will hold the next buffer we
|
||||
* request from the IPP, once we get around to sending such a request. When
|
||||
* the requested write offset is equal to the write offset, no requests for
|
||||
* new buffers are outstanding; when the requested write offset is one greater
|
||||
* than the read offset, no more requests may be sent.
|
||||
*
|
||||
* Note that, unlike the packet_queue, the buffer_queue places incoming
|
||||
* buffers at decreasing addresses. This makes the check for "is it time to
|
||||
* wrap the buffer pointer" cheaper in the assembly code which receives new
|
||||
* buffers, and means that the value which defines the queue size,
|
||||
* __last_buffer, is different than in the packet queue. Also, the offset
|
||||
* used in the packet_queue is already scaled by the size of a packet; here we
|
||||
* use unscaled slot indices for the offsets. (These differences are
|
||||
* historical, and in the future it's possible that the packet_queue will look
|
||||
* more like this queue.)
|
||||
*
|
||||
* @code
|
||||
* Here's an example of all 4 state variables and what they mean. Remember:
|
||||
* all pointers move right to left.
|
||||
*
|
||||
* V V V I I R R V V V
|
||||
* 0 1 2 3 4 5 6 7 8 9
|
||||
* ^ ^ ^ ^
|
||||
* | | | |
|
||||
* | | | __last_buffer
|
||||
* | | __buffer_write
|
||||
* | __buffer_requested_write
|
||||
* __buffer_read
|
||||
* @endcode
|
||||
*
|
||||
* This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
|
||||
* The read pointer is at 2, and the write pointer is at 6; thus, there are
|
||||
* valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
|
||||
* pointer is at 4; thus, requests have been made to the IPP for buffers which
|
||||
* will be placed in slots 6 and 5 when they arrive. Finally, the remaining
|
||||
* slots are invalid (do not contain a buffer).
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/** Ordinal number of the next buffer to be written: 0 for the first slot in
|
||||
* the queue, 1 for the second slot in the queue, etc. */
|
||||
volatile uint32_t __buffer_write;
|
||||
|
||||
/** Ordinal number of the last buffer (i.e., when any pointer is decremented
|
||||
* below zero, it is reloaded with this value). */
|
||||
uint32_t __last_buffer;
|
||||
}
|
||||
__netio_buffer_queue_t;
|
||||
|
||||
|
||||
/**
|
||||
* An object for providing Ethernet packets to a process.
|
||||
*/
|
||||
typedef struct __netio_queue_impl_t
|
||||
{
|
||||
/** The queue of packets waiting to be received. */
|
||||
__netio_packet_queue_t __packet_receive_queue;
|
||||
/** The intr bit mask that IDs this device. */
|
||||
unsigned int __intr_id;
|
||||
/** Offset to queues of empty buffers, one per size. */
|
||||
uint32_t __buffer_queue[NETIO_NUM_SIZES];
|
||||
/** The address of the first EPP tile, or -1 if no EPP. */
|
||||
/* ISSUE: Actually this is always "0" or "~0". */
|
||||
uint32_t __epp_location;
|
||||
/** The queue ID that this queue represents. */
|
||||
unsigned int __queue_id;
|
||||
/** Number of acknowledgements received. */
|
||||
volatile uint32_t __acks_received;
|
||||
/** Last completion number received for packet_sendv. */
|
||||
volatile uint32_t __last_completion_rcv;
|
||||
/** Number of packets allowed to be outstanding. */
|
||||
uint32_t __max_outstanding;
|
||||
/** First VA available for packets. */
|
||||
void* __va_0;
|
||||
/** First VA in second range available for packets. */
|
||||
void* __va_1;
|
||||
/** Padding to align the "__packets" field to the size of a netio_pkt_t. */
|
||||
uint32_t __padding[3];
|
||||
/** The packets themselves. */
|
||||
netio_pkt_t __packets[0];
|
||||
}
|
||||
netio_queue_impl_t;
|
||||
|
||||
|
||||
/**
|
||||
* An object for managing the user end of a NetIO queue.
|
||||
*/
|
||||
typedef struct __netio_queue_user_impl_t
|
||||
{
|
||||
/** The next incoming packet to be read. */
|
||||
uint32_t __packet_receive_read;
|
||||
/** The next empty buffers to be read, one index per size. */
|
||||
uint8_t __buffer_read[NETIO_NUM_SIZES];
|
||||
/** Where the empty buffer we next request from the IPP will go, one index
|
||||
* per size. */
|
||||
uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
|
||||
/** PCIe interface flag. */
|
||||
uint8_t __pcie;
|
||||
/** Number of packets left to be received before we send a credit update. */
|
||||
uint32_t __receive_credit_remaining;
|
||||
/** Value placed in __receive_credit_remaining when it reaches zero. */
|
||||
uint32_t __receive_credit_interval;
|
||||
/** First fast I/O routine index. */
|
||||
uint32_t __fastio_index;
|
||||
/** Number of acknowledgements expected. */
|
||||
uint32_t __acks_outstanding;
|
||||
/** Last completion number requested. */
|
||||
uint32_t __last_completion_req;
|
||||
/** File descriptor for driver. */
|
||||
int __fd;
|
||||
}
|
||||
netio_queue_user_impl_t;
|
||||
|
||||
|
||||
#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
|
||||
#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
|
||||
|
||||
|
||||
/** Internal structure used to convey packet send information to the
|
||||
* hypervisor. FIXME: Actually, it's not used for that anymore, but
|
||||
* netio_packet_send() still uses it internally.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
|
||||
uint16_t transfer_size; /**< Size of packet */
|
||||
uint32_t va; /**< VA of start of packet */
|
||||
__netio_pkt_handle_t handle; /**< Packet handle */
|
||||
uint32_t csum0; /**< First checksum word */
|
||||
uint32_t csum1; /**< Second checksum word */
|
||||
}
|
||||
__netio_send_cmd_t;
|
||||
|
||||
|
||||
/** Flags used in two contexts:
|
||||
* - As the "flags" member in the __netio_send_cmd_t, above; used only
|
||||
* for netio_pkt_send_{prepare,commit}.
|
||||
* - As part of the flags passed to the various send packet fast I/O calls.
|
||||
*/
|
||||
|
||||
/** Need acknowledgement on this packet. Note that some code in the
|
||||
* normal send_pkt fast I/O handler assumes that this is equal to 1. */
|
||||
#define __NETIO_SEND_FLG_ACK 0x1
|
||||
|
||||
/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
|
||||
* normal packet sends use a special fast I/O index to denote checksumming,
|
||||
* and multi-segment sends test the checksum descriptor.) */
|
||||
#define __NETIO_SEND_FLG_CSUM 0x2
|
||||
|
||||
/** Get a completion on this packet. Only used with multi-segment sends. */
|
||||
#define __NETIO_SEND_FLG_COMPLETION 0x4
|
||||
|
||||
/** Position of the number-of-extra-segments value in the flags word.
|
||||
Only used with multi-segment sends. */
|
||||
#define __NETIO_SEND_FLG_XSEG_SHIFT 3
|
||||
|
||||
/** Width of the number-of-extra-segments value in the flags word. */
|
||||
#define __NETIO_SEND_FLG_XSEG_WIDTH 2
|
||||
|
||||
#endif /* __DRV_XGBE_IMPL_H__ */
|
|
@ -0,0 +1,615 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file drv_xgbe_intf.h
|
||||
* Interface to the hypervisor XGBE driver.
|
||||
*/
|
||||
|
||||
#ifndef __DRV_XGBE_INTF_H__
|
||||
#define __DRV_XGBE_INTF_H__
|
||||
|
||||
/**
|
||||
* An object for forwarding VAs and PAs to the hypervisor.
|
||||
* @ingroup types
|
||||
*
|
||||
* This allows the supervisor to specify a number of areas of memory to
|
||||
* store packet buffers.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/** The physical address of the memory. */
|
||||
HV_PhysAddr pa;
|
||||
/** Page table entry for the memory. This is only used to derive the
|
||||
* memory's caching mode; the PA bits are ignored. */
|
||||
HV_PTE pte;
|
||||
/** The virtual address of the memory. */
|
||||
HV_VirtAddr va;
|
||||
/** Size (in bytes) of the memory area. */
|
||||
int size;
|
||||
|
||||
}
|
||||
netio_ipp_address_t;
|
||||
|
||||
/** The various pread/pwrite offsets into the hypervisor-level driver.
|
||||
* @ingroup types
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
/** Inform the Linux driver of the address of the NetIO arena memory.
|
||||
* This offset is actually only used to convey information from netio
|
||||
* to the Linux driver; it never makes it from there to the hypervisor.
|
||||
* Write-only; takes a uint32_t specifying the VA address. */
|
||||
NETIO_FIXED_ADDR = 0x5000000000000000ULL,
|
||||
|
||||
/** Inform the Linux driver of the size of the NetIO arena memory.
|
||||
* This offset is actually only used to convey information from netio
|
||||
* to the Linux driver; it never makes it from there to the hypervisor.
|
||||
* Write-only; takes a uint32_t specifying the VA size. */
|
||||
NETIO_FIXED_SIZE = 0x5100000000000000ULL,
|
||||
|
||||
/** Register current tile with IPP. Write then read: write, takes a
|
||||
* netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
|
||||
NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
|
||||
|
||||
/** Unregister current tile from IPP. Write-only, takes a dummy argument. */
|
||||
NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
|
||||
|
||||
/** Start packets flowing. Write-only, takes a dummy argument. */
|
||||
NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
|
||||
|
||||
/** Stop packets flowing. Write-only, takes a dummy argument. */
|
||||
NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
|
||||
|
||||
/** Configure group (typically we group on VLAN). Write-only: takes an
|
||||
* array of netio_group_t's, low 24 bits of the offset is the base group
|
||||
* number times the size of a netio_group_t. */
|
||||
NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
|
||||
|
||||
/** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
|
||||
* 24 bits of the offset is the base bucket number times the size of a
|
||||
* netio_bucket_t. */
|
||||
NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
|
||||
|
||||
/** Get/set a parameter. Read or write: read or write data is the parameter
|
||||
* value, low 32 bits of the offset is a __netio_getset_offset_t. */
|
||||
NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
|
||||
|
||||
/** Get fast I/O index. Read-only; returns a 4-byte base index value. */
|
||||
NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
|
||||
|
||||
/** Configure hijack IP address. Packets with this IPv4 dest address
|
||||
* go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
|
||||
* in some standard form. FIXME: Define the form! */
|
||||
NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
|
||||
|
||||
/**
|
||||
* Offsets beyond this point are reserved for the supervisor (although that
|
||||
* enforcement must be done by the supervisor driver itself).
|
||||
*/
|
||||
NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
|
||||
|
||||
/** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
|
||||
NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
|
||||
|
||||
/** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
|
||||
NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
|
||||
|
||||
/* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
|
||||
* userspace code due to limitations in the pread/pwrite syscalls. */
|
||||
|
||||
/** Drain LIPP buffers. */
|
||||
NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
|
||||
|
||||
/** Supply a netio_ipp_address_t to be used as shared memory for the
|
||||
* LEPP command queue. */
|
||||
NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
|
||||
|
||||
/* 0xFC... is currently unused. */
|
||||
|
||||
/** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
|
||||
NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
|
||||
|
||||
/** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
|
||||
NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
|
||||
|
||||
/** Supply packet arena. Write-only, takes an array of
|
||||
* netio_ipp_address_t values. */
|
||||
NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
|
||||
} netio_hv_offset_t;
|
||||
|
||||
/** Extract the base offset from an offset */
|
||||
#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
|
||||
/** Extract the local offset from an offset */
|
||||
#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
|
||||
|
||||
|
||||
/**
|
||||
* Get/set offset.
|
||||
*/
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
uint64_t addr:48; /**< Class-specific address */
|
||||
unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
|
||||
unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
|
||||
}
|
||||
bits; /**< Bitfields */
|
||||
uint64_t word; /**< Aggregated value to use as the offset */
|
||||
}
|
||||
__netio_getset_offset_t;
|
||||
|
||||
/**
|
||||
* Fast I/O index offsets (must be contiguous).
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
|
||||
NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
|
||||
NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
|
||||
NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
|
||||
NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
|
||||
NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
|
||||
NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
|
||||
NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
|
||||
} netio_fastio_index_t;
|
||||
|
||||
/** 3-word return type for Fast I/O call. */
|
||||
typedef struct
|
||||
{
|
||||
int err; /**< Error code. */
|
||||
uint32_t val0; /**< Value. Meaning depends upon the specific call. */
|
||||
uint32_t val1; /**< Value. Meaning depends upon the specific call. */
|
||||
} netio_fastio_rv3_t;
|
||||
|
||||
/** 0-argument fast I/O call */
|
||||
int __netio_fastio0(uint32_t fastio_index);
|
||||
/** 1-argument fast I/O call */
|
||||
int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
|
||||
/** 3-argument fast I/O call, 2-word return value */
|
||||
netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
|
||||
uint32_t arg1, uint32_t arg2);
|
||||
/** 4-argument fast I/O call */
|
||||
int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
|
||||
uint32_t arg2, uint32_t arg3);
|
||||
/** 6-argument fast I/O call */
|
||||
int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
|
||||
uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
|
||||
/** 9-argument fast I/O call */
|
||||
int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
|
||||
uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
|
||||
uint32_t arg6, uint32_t arg7, uint32_t arg8);
|
||||
|
||||
/** Allocate an empty packet.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param size Size of the packet to allocate.
|
||||
*/
|
||||
#define __netio_fastio_allocate(fastio_index, size) \
|
||||
__netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
|
||||
|
||||
/** Free a buffer.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param handle Handle for the packet to free.
|
||||
*/
|
||||
#define __netio_fastio_free_buffer(fastio_index, handle) \
|
||||
__netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
|
||||
|
||||
/** Increment our receive credits.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param credits Number of credits to add.
|
||||
*/
|
||||
#define __netio_fastio_return_credits(fastio_index, credits) \
|
||||
__netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
|
||||
|
||||
/** Send packet, no checksum.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param ackflag Nonzero if we want an ack.
|
||||
* @param size Size of the packet.
|
||||
* @param va Virtual address of start of packet.
|
||||
* @param handle Packet handle.
|
||||
*/
|
||||
#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
|
||||
__netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
|
||||
size, va, handle)
|
||||
|
||||
/** Send packet, calculate checksum.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param ackflag Nonzero if we want an ack.
|
||||
* @param size Size of the packet.
|
||||
* @param va Virtual address of start of packet.
|
||||
* @param handle Packet handle.
|
||||
* @param csum0 Shim checksum header.
|
||||
* @param csum1 Checksum seed.
|
||||
*/
|
||||
#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
|
||||
csum0, csum1) \
|
||||
__netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
|
||||
size, va, handle, csum0, csum1)
|
||||
|
||||
|
||||
/** Format for the "csum0" argument to the __netio_fastio_send routines
|
||||
* and LEPP. Note that this is currently exactly identical to the
|
||||
* ShimProtocolOffloadHeader.
|
||||
*/
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
unsigned int start_byte:7; /**< The first byte to be checksummed */
|
||||
unsigned int count:14; /**< Number of bytes to be checksummed. */
|
||||
unsigned int destination_byte:7; /**< The byte to write the checksum to. */
|
||||
unsigned int reserved:4; /**< Reserved. */
|
||||
} bits; /**< Decomposed method of access. */
|
||||
unsigned int word; /**< To send out the IDN. */
|
||||
} __netio_checksum_header_t;
|
||||
|
||||
|
||||
/** Sendv packet with 1 or 2 segments.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
|
||||
* 1 in next 2 bits; expected checksum in high 16 bits.
|
||||
* @param confno Confirmation number to request, if notify flag set.
|
||||
* @param csum0 Checksum descriptor; if zero, no checksum.
|
||||
* @param va_F Virtual address of first segment.
|
||||
* @param va_L Virtual address of last segment, if 2 segments.
|
||||
* @param len_F_L Length of first segment in low 16 bits; length of last
|
||||
* segment, if 2 segments, in high 16 bits.
|
||||
*/
|
||||
#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
|
||||
va_F, va_L, len_F_L) \
|
||||
__netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
|
||||
csum0, va_F, va_L, len_F_L)
|
||||
|
||||
/** Send packet on PCIe interface.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param flags Ack/csum/notify flags in low 3 bits.
|
||||
* @param confno Confirmation number to request, if notify flag set.
|
||||
* @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
|
||||
* @param va_F Virtual address of the packet buffer.
|
||||
* @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
|
||||
* @param len_F_L Length of the packet buffer in low 16 bits.
|
||||
*/
|
||||
#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
|
||||
va_F, va_L, len_F_L) \
|
||||
__netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
|
||||
csum0, va_F, va_L, len_F_L)
|
||||
|
||||
/** Sendv packet with 3 or 4 segments.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
|
||||
* 1 in next 2 bits; expected checksum in high 16 bits.
|
||||
* @param confno Confirmation number to request, if notify flag set.
|
||||
* @param csum0 Checksum descriptor; if zero, no checksum.
|
||||
* @param va_F Virtual address of first segment.
|
||||
* @param va_L Virtual address of last segment (third segment if 3 segments,
|
||||
* fourth segment if 4 segments).
|
||||
* @param len_F_L Length of first segment in low 16 bits; length of last
|
||||
* segment in high 16 bits.
|
||||
* @param va_M0 Virtual address of "middle 0" segment; this segment is sent
|
||||
* second when there are three segments, and third if there are four.
|
||||
* @param va_M1 Virtual address of "middle 1" segment; this segment is sent
|
||||
* second when there are four segments.
|
||||
* @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
|
||||
* 1 segment, if 4 segments, in high 16 bits.
|
||||
*/
|
||||
#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
|
||||
va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
|
||||
__netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
|
||||
csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
|
||||
|
||||
/** Send vector of packets.
|
||||
* @param fastio_index Fast I/O index.
|
||||
* @param seqno Number of packets transmitted so far on this interface;
|
||||
* used to decide which packets should be acknowledged.
|
||||
* @param nentries Number of entries in vector.
|
||||
* @param va Virtual address of start of vector entry array.
|
||||
* @return 3-word netio_fastio_rv3_t structure. The structure's err member
|
||||
* is an error code, or zero if no error. The val0 member is the
|
||||
* updated value of seqno; it has been incremented by 1 for each
|
||||
* packet sent. That increment may be less than nentries if an
|
||||
* error occured, or if some of the entries in the vector contain
|
||||
* handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
|
||||
* updated value of nentries; it has been decremented by 1 for each
|
||||
* vector entry processed. Again, that decrement may be less than
|
||||
* nentries (leaving the returned value positive) if an error
|
||||
* occurred.
|
||||
*/
|
||||
#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
|
||||
__netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
|
||||
nentries, va)
|
||||
|
||||
|
||||
/** An egress DMA command for LEPP. */
|
||||
typedef struct
|
||||
{
|
||||
/** Is this a TSO transfer?
|
||||
*
|
||||
* NOTE: This field is always 0, to distinguish it from
|
||||
* lepp_tso_cmd_t. It must come first!
|
||||
*/
|
||||
uint8_t tso : 1;
|
||||
|
||||
/** Unused padding bits. */
|
||||
uint8_t _unused : 3;
|
||||
|
||||
/** Should this packet be sent directly from caches instead of DRAM,
|
||||
* using hash-for-home to locate the packet data?
|
||||
*/
|
||||
uint8_t hash_for_home : 1;
|
||||
|
||||
/** Should we compute a checksum? */
|
||||
uint8_t compute_checksum : 1;
|
||||
|
||||
/** Is this the final buffer for this packet?
|
||||
*
|
||||
* A single packet can be split over several input buffers (a "gather"
|
||||
* operation). This flag indicates that this is the last buffer
|
||||
* in a packet.
|
||||
*/
|
||||
uint8_t end_of_packet : 1;
|
||||
|
||||
/** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
|
||||
uint8_t send_completion : 1;
|
||||
|
||||
/** High bits of Client Physical Address of the start of the buffer
|
||||
* to be egressed.
|
||||
*
|
||||
* NOTE: Only 6 bits are actually needed here, as CPAs are
|
||||
* currently 38 bits. So two bits could be scavenged from this.
|
||||
*/
|
||||
uint8_t cpa_hi;
|
||||
|
||||
/** The number of bytes to be egressed. */
|
||||
uint16_t length;
|
||||
|
||||
/** Low 32 bits of Client Physical Address of the start of the buffer
|
||||
* to be egressed.
|
||||
*/
|
||||
uint32_t cpa_lo;
|
||||
|
||||
/** Checksum information (only used if 'compute_checksum'). */
|
||||
__netio_checksum_header_t checksum_data;
|
||||
|
||||
} lepp_cmd_t;
|
||||
|
||||
|
||||
/** A chunk of physical memory for a TSO egress. */
|
||||
typedef struct
|
||||
{
|
||||
/** The low bits of the CPA. */
|
||||
uint32_t cpa_lo;
|
||||
/** The high bits of the CPA. */
|
||||
uint16_t cpa_hi : 15;
|
||||
/** Should this packet be sent directly from caches instead of DRAM,
|
||||
* using hash-for-home to locate the packet data?
|
||||
*/
|
||||
uint16_t hash_for_home : 1;
|
||||
/** The length in bytes. */
|
||||
uint16_t length;
|
||||
} lepp_frag_t;
|
||||
|
||||
|
||||
/** An LEPP command that handles TSO. */
|
||||
typedef struct
|
||||
{
|
||||
/** Is this a TSO transfer?
|
||||
*
|
||||
* NOTE: This field is always 1, to distinguish it from
|
||||
* lepp_cmd_t. It must come first!
|
||||
*/
|
||||
uint8_t tso : 1;
|
||||
|
||||
/** Unused padding bits. */
|
||||
uint8_t _unused : 7;
|
||||
|
||||
/** Size of the header[] array in bytes. It must be in the range
|
||||
* [40, 127], which are the smallest header for a TCP packet over
|
||||
* Ethernet and the maximum possible prepend size supported by
|
||||
* hardware, respectively. Note that the array storage must be
|
||||
* padded out to a multiple of four bytes so that the following
|
||||
* LEPP command is aligned properly.
|
||||
*/
|
||||
uint8_t header_size;
|
||||
|
||||
/** Byte offset of the IP header in header[]. */
|
||||
uint8_t ip_offset;
|
||||
|
||||
/** Byte offset of the TCP header in header[]. */
|
||||
uint8_t tcp_offset;
|
||||
|
||||
/** The number of bytes to use for the payload of each packet,
|
||||
* except of course the last one, which may not have enough bytes.
|
||||
* This means that each Ethernet packet except the last will have a
|
||||
* size of header_size + payload_size.
|
||||
*/
|
||||
uint16_t payload_size;
|
||||
|
||||
/** The length of the 'frags' array that follows this struct. */
|
||||
uint16_t num_frags;
|
||||
|
||||
/** The actual frags. */
|
||||
lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
|
||||
|
||||
/*
|
||||
* The packet header template logically follows frags[],
|
||||
* but you can't declare that in C.
|
||||
*
|
||||
* uint32_t header[header_size_in_words_rounded_up];
|
||||
*/
|
||||
|
||||
} lepp_tso_cmd_t;
|
||||
|
||||
|
||||
/** An LEPP completion ring entry. */
|
||||
typedef void* lepp_comp_t;
|
||||
|
||||
|
||||
/** Maximum number of frags for one TSO command. This is adapted from
|
||||
* linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
|
||||
* our page size of exactly 65536. We add one for a "body" fragment.
|
||||
*/
|
||||
#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
|
||||
|
||||
/** Total number of bytes needed for an lepp_tso_cmd_t. */
|
||||
#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
|
||||
(sizeof(lepp_tso_cmd_t) + \
|
||||
(num_frags) * sizeof(lepp_frag_t) + \
|
||||
(((header_size) + 3) & -4))
|
||||
|
||||
/** The size of the lepp "cmd" queue. */
|
||||
#define LEPP_CMD_QUEUE_BYTES \
|
||||
(((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
|
||||
(sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
|
||||
|
||||
/** The largest possible command that can go in lepp_queue_t::cmds[]. */
|
||||
#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
|
||||
|
||||
/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
|
||||
*/
|
||||
#define LEPP_CMD_LIMIT \
|
||||
(LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
|
||||
|
||||
/** The maximum number of completions in an LEPP queue. */
|
||||
#define LEPP_COMP_QUEUE_SIZE \
|
||||
((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
|
||||
|
||||
/** Increment an index modulo the queue size. */
|
||||
#define LEPP_QINC(var) \
|
||||
(var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
|
||||
|
||||
/** A queue used to convey egress commands from the client to LEPP. */
|
||||
typedef struct
|
||||
{
|
||||
/** Index of first completion not yet processed by user code.
|
||||
* If this is equal to comp_busy, there are no such completions.
|
||||
*
|
||||
* NOTE: This is only read/written by the user.
|
||||
*/
|
||||
unsigned int comp_head;
|
||||
|
||||
/** Index of first completion record not yet completed.
|
||||
* If this is equal to comp_tail, there are no such completions.
|
||||
* This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
|
||||
* a command with the 'completion' bit set is finished.
|
||||
*
|
||||
* NOTE: This is only written by LEPP, only read by the user.
|
||||
*/
|
||||
volatile unsigned int comp_busy;
|
||||
|
||||
/** Index of the first empty slot in the completion ring.
|
||||
* Entries from this up to but not including comp_head (in ring order)
|
||||
* can be filled in with completion data.
|
||||
*
|
||||
* NOTE: This is only read/written by the user.
|
||||
*/
|
||||
unsigned int comp_tail;
|
||||
|
||||
/** Byte index of first command enqueued for LEPP but not yet processed.
|
||||
*
|
||||
* This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
|
||||
*
|
||||
* NOTE: LEPP advances this counter as soon as it no longer needs
|
||||
* the cmds[] storage for this entry, but the transfer is not actually
|
||||
* complete (i.e. the buffer pointed to by the command is no longer
|
||||
* needed) until comp_busy advances.
|
||||
*
|
||||
* If this is equal to cmd_tail, the ring is empty.
|
||||
*
|
||||
* NOTE: This is only written by LEPP, only read by the user.
|
||||
*/
|
||||
volatile unsigned int cmd_head;
|
||||
|
||||
/** Byte index of first empty slot in the command ring. This field can
|
||||
* be incremented up to but not equal to cmd_head (because that would
|
||||
* mean the ring is empty).
|
||||
*
|
||||
* This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
|
||||
*
|
||||
* NOTE: This is read/written by the user, only read by LEPP.
|
||||
*/
|
||||
volatile unsigned int cmd_tail;
|
||||
|
||||
/** A ring of variable-sized egress DMA commands.
|
||||
*
|
||||
* NOTE: Only written by the user, only read by LEPP.
|
||||
*/
|
||||
char cmds[LEPP_CMD_QUEUE_BYTES]
|
||||
__attribute__((aligned(CHIP_L2_LINE_SIZE())));
|
||||
|
||||
/** A ring of user completion data.
|
||||
* NOTE: Only read/written by the user.
|
||||
*/
|
||||
lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
|
||||
__attribute__((aligned(CHIP_L2_LINE_SIZE())));
|
||||
} lepp_queue_t;
|
||||
|
||||
|
||||
/** An internal helper function for determining the number of entries
|
||||
* available in a ring buffer, given that there is one sentinel.
|
||||
*/
|
||||
static inline unsigned int
|
||||
_lepp_num_free_slots(unsigned int head, unsigned int tail)
|
||||
{
|
||||
/*
|
||||
* One entry is reserved for use as a sentinel, to distinguish
|
||||
* "empty" from "full". So we compute
|
||||
* (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
|
||||
*/
|
||||
return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
|
||||
}
|
||||
|
||||
|
||||
/** Returns how many new comp entries can be enqueued. */
|
||||
static inline unsigned int
|
||||
lepp_num_free_comp_slots(const lepp_queue_t* q)
|
||||
{
|
||||
return _lepp_num_free_slots(q->comp_head, q->comp_tail);
|
||||
}
|
||||
|
||||
static inline int
|
||||
lepp_qsub(int v1, int v2)
|
||||
{
|
||||
int delta = v1 - v2;
|
||||
return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
/** FIXME: Check this from linux, via a new "pwrite()" call. */
|
||||
#define LIPP_VERSION 1
|
||||
|
||||
|
||||
/** We use exactly two bytes of alignment padding. */
|
||||
#define LIPP_PACKET_PADDING 2
|
||||
|
||||
/** The minimum size of a "small" buffer (including the padding). */
|
||||
#define LIPP_SMALL_PACKET_SIZE 128
|
||||
|
||||
/*
|
||||
* NOTE: The following two values should total to less than around
|
||||
* 13582, to keep the total size used for "lipp_state_t" below 64K.
|
||||
*/
|
||||
|
||||
/** The maximum number of "small" buffers.
|
||||
* This is enough for 53 network cpus with 128 credits. Note that
|
||||
* if these are exhausted, we will fall back to using large buffers.
|
||||
*/
|
||||
#define LIPP_SMALL_BUFFERS 6785
|
||||
|
||||
/** The maximum number of "large" buffers.
|
||||
* This is enough for 53 network cpus with 128 credits.
|
||||
*/
|
||||
#define LIPP_LARGE_BUFFERS 6785
|
||||
|
||||
#endif /* __DRV_XGBE_INTF_H__ */
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Error codes returned from NetIO routines.
|
||||
*/
|
||||
|
||||
#ifndef __NETIO_ERRORS_H__
|
||||
#define __NETIO_ERRORS_H__
|
||||
|
||||
/**
|
||||
* @addtogroup error
|
||||
*
|
||||
* @brief The error codes returned by NetIO functions.
|
||||
*
|
||||
* NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
|
||||
* a negative value if an error occurs.
|
||||
*
|
||||
* In cases where a NetIO function failed due to a error reported by
|
||||
* system libraries, the error code will be the negation of the
|
||||
* system errno at the time of failure. The @ref netio_strerror()
|
||||
* function will deliver error strings for both NetIO and system error
|
||||
* codes.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** The set of all NetIO errors. */
|
||||
typedef enum
|
||||
{
|
||||
/** Operation successfully completed. */
|
||||
NETIO_NO_ERROR = 0,
|
||||
|
||||
/** A packet was successfully retrieved from an input queue. */
|
||||
NETIO_PKT = 0,
|
||||
|
||||
/** Largest NetIO error number. */
|
||||
NETIO_ERR_MAX = -701,
|
||||
|
||||
/** The tile is not registered with the IPP. */
|
||||
NETIO_NOT_REGISTERED = -701,
|
||||
|
||||
/** No packet was available to retrieve from the input queue. */
|
||||
NETIO_NOPKT = -702,
|
||||
|
||||
/** The requested function is not implemented. */
|
||||
NETIO_NOT_IMPLEMENTED = -703,
|
||||
|
||||
/** On a registration operation, the target queue already has the maximum
|
||||
* number of tiles registered for it, and no more may be added. On a
|
||||
* packet send operation, the output queue is full and nothing more can
|
||||
* be queued until some of the queued packets are actually transmitted. */
|
||||
NETIO_QUEUE_FULL = -704,
|
||||
|
||||
/** The calling process or thread is not bound to exactly one CPU. */
|
||||
NETIO_BAD_AFFINITY = -705,
|
||||
|
||||
/** Cannot allocate memory on requested controllers. */
|
||||
NETIO_CANNOT_HOME = -706,
|
||||
|
||||
/** On a registration operation, the IPP specified is not configured
|
||||
* to support the options requested; for instance, the application
|
||||
* wants a specific type of tagged headers which the configured IPP
|
||||
* doesn't support. Or, the supplied configuration information is
|
||||
* not self-consistent, or is out of range; for instance, specifying
|
||||
* both NETIO_RECV and NETIO_NO_RECV, or asking for more than
|
||||
* NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
|
||||
* configure operation, the number of items, or the base item, was
|
||||
* out of range.
|
||||
*/
|
||||
NETIO_BAD_CONFIG = -707,
|
||||
|
||||
/** Too many tiles have registered to transmit packets. */
|
||||
NETIO_TOOMANY_XMIT = -708,
|
||||
|
||||
/** Packet transmission was attempted on a queue which was registered
|
||||
with transmit disabled. */
|
||||
NETIO_UNREG_XMIT = -709,
|
||||
|
||||
/** This tile is already registered with the IPP. */
|
||||
NETIO_ALREADY_REGISTERED = -710,
|
||||
|
||||
/** The Ethernet link is down. The application should try again later. */
|
||||
NETIO_LINK_DOWN = -711,
|
||||
|
||||
/** An invalid memory buffer has been specified. This may be an unmapped
|
||||
* virtual address, or one which does not meet alignment requirements.
|
||||
* For netio_input_register(), this error may be returned when multiple
|
||||
* processes specify different memory regions to be used for NetIO
|
||||
* buffers. That can happen if these processes specify explicit memory
|
||||
* regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
|
||||
* has not been called by a common ancestor of the processes.
|
||||
*/
|
||||
NETIO_FAULT = -712,
|
||||
|
||||
/** Cannot combine user-managed shared memory and cache coherence. */
|
||||
NETIO_BAD_CACHE_CONFIG = -713,
|
||||
|
||||
/** Smallest NetIO error number. */
|
||||
NETIO_ERR_MIN = -713,
|
||||
|
||||
#ifndef __DOXYGEN__
|
||||
/** Used internally to mean that no response is needed; never returned to
|
||||
* an application. */
|
||||
NETIO_NO_RESPONSE = 1
|
||||
#endif
|
||||
} netio_error_t;
|
||||
|
||||
/** @} */
|
||||
|
||||
#endif /* __NETIO_ERRORS_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
|
|||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_PCI) += pci.o
|
||||
|
|
|
@ -0,0 +1,621 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/hv_driver.h>
|
||||
#include <hv/drv_pcie_rc_intf.h>
|
||||
|
||||
|
||||
/*
|
||||
* Initialization flow and process
|
||||
* -------------------------------
|
||||
*
|
||||
* This files containes the routines to search for PCI buses,
|
||||
* enumerate the buses, and configure any attached devices.
|
||||
*
|
||||
* There are two entry points here:
|
||||
* 1) tile_pci_init
|
||||
* This sets up the pci_controller structs, and opens the
|
||||
* FDs to the hypervisor. This is called from setup_arch() early
|
||||
* in the boot process.
|
||||
* 2) pcibios_init
|
||||
* This probes the PCI bus(es) for any attached hardware. It's
|
||||
* called by subsys_initcall. All of the real work is done by the
|
||||
* generic Linux PCI layer.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* This flag tells if the platform is TILEmpower that needs
|
||||
* special configuration for the PLX switch chip.
|
||||
*/
|
||||
int __write_once tile_plx_gen1;
|
||||
|
||||
static struct pci_controller controllers[TILE_NUM_PCIE];
|
||||
static int num_controllers;
|
||||
|
||||
static struct pci_ops tile_cfg_ops;
|
||||
|
||||
|
||||
/*
|
||||
* We don't need to worry about the alignment of resources.
|
||||
*/
|
||||
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
||||
resource_size_t size, resource_size_t align)
|
||||
{
|
||||
return res->start;
|
||||
}
|
||||
EXPORT_SYMBOL(pcibios_align_resource);
|
||||
|
||||
/*
|
||||
* Open a FD to the hypervisor PCI device.
|
||||
*
|
||||
* controller_id is the controller number, config type is 0 or 1 for
|
||||
* config0 or config1 operations.
|
||||
*/
|
||||
static int __init tile_pcie_open(int controller_id, int config_type)
|
||||
{
|
||||
char filename[32];
|
||||
int fd;
|
||||
|
||||
sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
|
||||
|
||||
fd = hv_dev_open((HV_VirtAddr)filename, 0);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get the IRQ numbers from the HV and set up the handlers for them.
|
||||
*/
|
||||
static int __init tile_init_irqs(int controller_id,
|
||||
struct pci_controller *controller)
|
||||
{
|
||||
char filename[32];
|
||||
int fd;
|
||||
int ret;
|
||||
int x;
|
||||
struct pcie_rc_config rc_config;
|
||||
|
||||
sprintf(filename, "pcie/%d/ctl", controller_id);
|
||||
fd = hv_dev_open((HV_VirtAddr)filename, 0);
|
||||
if (fd < 0) {
|
||||
pr_err("PCI: hv_dev_open(%s) failed\n", filename);
|
||||
return -1;
|
||||
}
|
||||
ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
|
||||
sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
|
||||
hv_dev_close(fd);
|
||||
if (ret != sizeof(rc_config)) {
|
||||
pr_err("PCI: wanted %zd bytes, got %d\n",
|
||||
sizeof(rc_config), ret);
|
||||
return -1;
|
||||
}
|
||||
/* Record irq_base so that we can map INTx to IRQ # later. */
|
||||
controller->irq_base = rc_config.intr;
|
||||
|
||||
for (x = 0; x < 4; x++)
|
||||
tile_irq_activate(rc_config.intr + x,
|
||||
TILE_IRQ_HW_CLEAR);
|
||||
|
||||
if (rc_config.plx_gen1)
|
||||
controller->plx_gen1 = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* First initialization entry point, called from setup_arch().
|
||||
*
|
||||
* Find valid controllers and fill in pci_controller structs for each
|
||||
* of them.
|
||||
*
|
||||
* Returns the number of controllers discovered.
|
||||
*/
|
||||
int __init tile_pci_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("PCI: Searching for controllers...\n");
|
||||
|
||||
/* Do any configuration we need before using the PCIe */
|
||||
|
||||
for (i = 0; i < TILE_NUM_PCIE; i++) {
|
||||
int hv_cfg_fd0 = -1;
|
||||
int hv_cfg_fd1 = -1;
|
||||
int hv_mem_fd = -1;
|
||||
char name[32];
|
||||
struct pci_controller *controller;
|
||||
|
||||
/*
|
||||
* Open the fd to the HV. If it fails then this
|
||||
* device doesn't exist.
|
||||
*/
|
||||
hv_cfg_fd0 = tile_pcie_open(i, 0);
|
||||
if (hv_cfg_fd0 < 0)
|
||||
continue;
|
||||
hv_cfg_fd1 = tile_pcie_open(i, 1);
|
||||
if (hv_cfg_fd1 < 0) {
|
||||
pr_err("PCI: Couldn't open config fd to HV "
|
||||
"for controller %d\n", i);
|
||||
goto err_cont;
|
||||
}
|
||||
|
||||
sprintf(name, "pcie/%d/mem", i);
|
||||
hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
|
||||
if (hv_mem_fd < 0) {
|
||||
pr_err("PCI: Could not open mem fd to HV!\n");
|
||||
goto err_cont;
|
||||
}
|
||||
|
||||
pr_info("PCI: Found PCI controller #%d\n", i);
|
||||
|
||||
controller = &controllers[num_controllers];
|
||||
|
||||
if (tile_init_irqs(i, controller)) {
|
||||
pr_err("PCI: Could not initialize "
|
||||
"IRQs, aborting.\n");
|
||||
goto err_cont;
|
||||
}
|
||||
|
||||
controller->index = num_controllers;
|
||||
controller->hv_cfg_fd[0] = hv_cfg_fd0;
|
||||
controller->hv_cfg_fd[1] = hv_cfg_fd1;
|
||||
controller->hv_mem_fd = hv_mem_fd;
|
||||
controller->first_busno = 0;
|
||||
controller->last_busno = 0xff;
|
||||
controller->ops = &tile_cfg_ops;
|
||||
|
||||
num_controllers++;
|
||||
continue;
|
||||
|
||||
err_cont:
|
||||
if (hv_cfg_fd0 >= 0)
|
||||
hv_dev_close(hv_cfg_fd0);
|
||||
if (hv_cfg_fd1 >= 0)
|
||||
hv_dev_close(hv_cfg_fd1);
|
||||
if (hv_mem_fd >= 0)
|
||||
hv_dev_close(hv_mem_fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Before using the PCIe, see if we need to do any platform-specific
|
||||
* configuration, such as the PLX switch Gen 1 issue on TILEmpower.
|
||||
*/
|
||||
for (i = 0; i < num_controllers; i++) {
|
||||
struct pci_controller *controller = &controllers[i];
|
||||
|
||||
if (controller->plx_gen1)
|
||||
tile_plx_gen1 = 1;
|
||||
}
|
||||
|
||||
return num_controllers;
|
||||
}
|
||||
|
||||
/*
|
||||
* (pin - 1) converts from the PCI standard's [1:4] convention to
|
||||
* a normal [0:3] range.
|
||||
*/
|
||||
static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
struct pci_controller *controller =
|
||||
(struct pci_controller *)dev->sysdata;
|
||||
return (pin - 1) + controller->irq_base;
|
||||
}
|
||||
|
||||
|
||||
static void __init fixup_read_and_payload_sizes(void)
|
||||
{
|
||||
struct pci_dev *dev = NULL;
|
||||
int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
|
||||
int max_read_size = 0x2; /* Limit to 512 byte reads. */
|
||||
u16 new_values;
|
||||
|
||||
/* Scan for the smallest maximum payload size. */
|
||||
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||
int pcie_caps_offset;
|
||||
u32 devcap;
|
||||
int max_payload;
|
||||
|
||||
pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
|
||||
if (pcie_caps_offset == 0)
|
||||
continue;
|
||||
|
||||
pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
|
||||
&devcap);
|
||||
max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
|
||||
if (max_payload < smallest_max_payload)
|
||||
smallest_max_payload = max_payload;
|
||||
}
|
||||
|
||||
/* Now, set the max_payload_size for all devices to that value. */
|
||||
new_values = (max_read_size << 12) | (smallest_max_payload << 5);
|
||||
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||
int pcie_caps_offset;
|
||||
u16 devctl;
|
||||
|
||||
pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
|
||||
if (pcie_caps_offset == 0)
|
||||
continue;
|
||||
|
||||
pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
|
||||
&devctl);
|
||||
devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
|
||||
devctl |= new_values;
|
||||
pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
|
||||
devctl);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Second PCI initialization entry point, called by subsys_initcall.
|
||||
*
|
||||
* The controllers have been set up by the time we get here, by a call to
|
||||
* tile_pci_init.
|
||||
*/
|
||||
static int __init pcibios_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("PCI: Probing PCI hardware\n");
|
||||
|
||||
/*
|
||||
* Delay a bit in case devices aren't ready. Some devices are
|
||||
* known to require at least 20ms here, but we use a more
|
||||
* conservative value.
|
||||
*/
|
||||
mdelay(250);
|
||||
|
||||
/* Scan all of the recorded PCI controllers. */
|
||||
for (i = 0; i < num_controllers; i++) {
|
||||
struct pci_controller *controller = &controllers[i];
|
||||
struct pci_bus *bus;
|
||||
|
||||
pr_info("PCI: initializing controller #%d\n", i);
|
||||
|
||||
/*
|
||||
* This comes from the generic Linux PCI driver.
|
||||
*
|
||||
* It reads the PCI tree for this bus into the Linux
|
||||
* data structures.
|
||||
*
|
||||
* This is inlined in linux/pci.h and calls into
|
||||
* pci_scan_bus_parented() in probe.c.
|
||||
*/
|
||||
bus = pci_scan_bus(0, controller->ops, controller);
|
||||
controller->root_bus = bus;
|
||||
controller->last_busno = bus->subordinate;
|
||||
|
||||
}
|
||||
|
||||
/* Do machine dependent PCI interrupt routing */
|
||||
pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
|
||||
|
||||
/*
|
||||
* This comes from the generic Linux PCI driver.
|
||||
*
|
||||
* It allocates all of the resources (I/O memory, etc)
|
||||
* associated with the devices read in above.
|
||||
*/
|
||||
|
||||
pci_assign_unassigned_resources();
|
||||
|
||||
/* Configure the max_read_size and max_payload_size values. */
|
||||
fixup_read_and_payload_sizes();
|
||||
|
||||
/* Record the I/O resources in the PCI controller structure. */
|
||||
for (i = 0; i < num_controllers; i++) {
|
||||
struct pci_bus *root_bus = controllers[i].root_bus;
|
||||
struct pci_bus *next_bus;
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &root_bus->devices, bus_list) {
|
||||
/* Find the PCI host controller, ie. the 1st bridge. */
|
||||
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
|
||||
(PCI_SLOT(dev->devfn) == 0)) {
|
||||
next_bus = dev->subordinate;
|
||||
controllers[i].mem_resources[0] =
|
||||
*next_bus->resource[0];
|
||||
controllers[i].mem_resources[1] =
|
||||
*next_bus->resource[1];
|
||||
controllers[i].mem_resources[2] =
|
||||
*next_bus->resource[2];
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(pcibios_init);
|
||||
|
||||
/*
|
||||
* No bus fixups needed.
|
||||
*/
|
||||
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
|
||||
{
|
||||
/* Nothing needs to be done. */
|
||||
}
|
||||
|
||||
/*
|
||||
* This can be called from the generic PCI layer, but doesn't need to
|
||||
* do anything.
|
||||
*/
|
||||
char __devinit *pcibios_setup(char *str)
|
||||
{
|
||||
/* Nothing needs to be done. */
|
||||
return str;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from the generic Linux layer.
|
||||
*/
|
||||
void __init pcibios_update_irq(struct pci_dev *dev, int irq)
|
||||
{
|
||||
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable memory and/or address decoding, as appropriate, for the
|
||||
* device described by the 'dev' struct.
|
||||
*
|
||||
* This is called from the generic PCI layer, and can be called
|
||||
* for bridges or endpoints.
|
||||
*/
|
||||
int pcibios_enable_device(struct pci_dev *dev, int mask)
|
||||
{
|
||||
u16 cmd, old_cmd;
|
||||
u8 header_type;
|
||||
int i;
|
||||
struct resource *r;
|
||||
|
||||
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
|
||||
|
||||
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
||||
old_cmd = cmd;
|
||||
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
|
||||
/*
|
||||
* For bridges, we enable both memory and I/O decoding
|
||||
* in call cases.
|
||||
*/
|
||||
cmd |= PCI_COMMAND_IO;
|
||||
cmd |= PCI_COMMAND_MEMORY;
|
||||
} else {
|
||||
/*
|
||||
* For endpoints, we enable memory and/or I/O decoding
|
||||
* only if they have a memory resource of that type.
|
||||
*/
|
||||
for (i = 0; i < 6; i++) {
|
||||
r = &dev->resource[i];
|
||||
if (r->flags & IORESOURCE_UNSET) {
|
||||
pr_err("PCI: Device %s not available "
|
||||
"because of resource collisions\n",
|
||||
pci_name(dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (r->flags & IORESOURCE_IO)
|
||||
cmd |= PCI_COMMAND_IO;
|
||||
if (r->flags & IORESOURCE_MEM)
|
||||
cmd |= PCI_COMMAND_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We only write the command if it changed.
|
||||
*/
|
||||
if (cmd != old_cmd)
|
||||
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
|
||||
{
|
||||
unsigned long start = pci_resource_start(dev, bar);
|
||||
unsigned long len = pci_resource_len(dev, bar);
|
||||
unsigned long flags = pci_resource_flags(dev, bar);
|
||||
|
||||
if (!len)
|
||||
return NULL;
|
||||
if (max && len > max)
|
||||
len = max;
|
||||
|
||||
if (!(flags & IORESOURCE_MEM)) {
|
||||
pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
|
||||
start = 0;
|
||||
}
|
||||
|
||||
return (void __iomem *)start;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_iomap);
|
||||
|
||||
|
||||
/****************************************************************
|
||||
*
|
||||
* Tile PCI config space read/write routines
|
||||
*
|
||||
****************************************************************/
|
||||
|
||||
/*
|
||||
* These are the normal read and write ops
|
||||
* These are expanded with macros from pci_bus_read_config_byte() etc.
|
||||
*
|
||||
* devfn is the combined PCI slot & function.
|
||||
*
|
||||
* offset is in bytes, from the start of config space for the
|
||||
* specified bus & slot.
|
||||
*/
|
||||
|
||||
static int __devinit tile_cfg_read(struct pci_bus *bus,
|
||||
unsigned int devfn,
|
||||
int offset,
|
||||
int size,
|
||||
u32 *val)
|
||||
{
|
||||
struct pci_controller *controller = bus->sysdata;
|
||||
int busnum = bus->number & 0xff;
|
||||
int slot = (devfn >> 3) & 0x1f;
|
||||
int function = devfn & 0x7;
|
||||
u32 addr;
|
||||
int config_mode = 1;
|
||||
|
||||
/*
|
||||
* There is no bridge between the Tile and bus 0, so we
|
||||
* use config0 to talk to bus 0.
|
||||
*
|
||||
* If we're talking to a bus other than zero then we
|
||||
* must have found a bridge.
|
||||
*/
|
||||
if (busnum == 0) {
|
||||
/*
|
||||
* We fake an empty slot for (busnum == 0) && (slot > 0),
|
||||
* since there is only one slot on bus 0.
|
||||
*/
|
||||
if (slot) {
|
||||
*val = 0xFFFFFFFF;
|
||||
return 0;
|
||||
}
|
||||
config_mode = 0;
|
||||
}
|
||||
|
||||
addr = busnum << 20; /* Bus in 27:20 */
|
||||
addr |= slot << 15; /* Slot (device) in 19:15 */
|
||||
addr |= function << 12; /* Function is in 14:12 */
|
||||
addr |= (offset & 0xFFF); /* byte address in 0:11 */
|
||||
|
||||
return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
|
||||
(HV_VirtAddr)(val), size, addr);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* See tile_cfg_read() for relevent comments.
|
||||
* Note that "val" is the value to write, not a pointer to that value.
|
||||
*/
|
||||
static int __devinit tile_cfg_write(struct pci_bus *bus,
|
||||
unsigned int devfn,
|
||||
int offset,
|
||||
int size,
|
||||
u32 val)
|
||||
{
|
||||
struct pci_controller *controller = bus->sysdata;
|
||||
int busnum = bus->number & 0xff;
|
||||
int slot = (devfn >> 3) & 0x1f;
|
||||
int function = devfn & 0x7;
|
||||
u32 addr;
|
||||
int config_mode = 1;
|
||||
HV_VirtAddr valp = (HV_VirtAddr)&val;
|
||||
|
||||
/*
|
||||
* For bus 0 slot 0 we use config 0 accesses.
|
||||
*/
|
||||
if (busnum == 0) {
|
||||
/*
|
||||
* We fake an empty slot for (busnum == 0) && (slot > 0),
|
||||
* since there is only one slot on bus 0.
|
||||
*/
|
||||
if (slot)
|
||||
return 0;
|
||||
config_mode = 0;
|
||||
}
|
||||
|
||||
addr = busnum << 20; /* Bus in 27:20 */
|
||||
addr |= slot << 15; /* Slot (device) in 19:15 */
|
||||
addr |= function << 12; /* Function is in 14:12 */
|
||||
addr |= (offset & 0xFFF); /* byte address in 0:11 */
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* Point to the correct part of the 32-bit "val". */
|
||||
valp += 4 - size;
|
||||
#endif
|
||||
|
||||
return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
|
||||
valp, size, addr);
|
||||
}
|
||||
|
||||
|
||||
static struct pci_ops tile_cfg_ops = {
|
||||
.read = tile_cfg_read,
|
||||
.write = tile_cfg_write,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* In the following, each PCI controller's mem_resources[1]
|
||||
* represents its (non-prefetchable) PCI memory resource.
|
||||
* mem_resources[0] and mem_resources[2] refer to its PCI I/O and
|
||||
* prefetchable PCI memory resources, respectively.
|
||||
* For more details, see pci_setup_bridge() in setup-bus.c.
|
||||
* By comparing the target PCI memory address against the
|
||||
* end address of controller 0, we can determine the controller
|
||||
* that should accept the PCI memory access.
|
||||
*/
|
||||
#define TILE_READ(size, type) \
|
||||
type _tile_read##size(unsigned long addr) \
|
||||
{ \
|
||||
type val; \
|
||||
int idx = 0; \
|
||||
if (addr > controllers[0].mem_resources[1].end && \
|
||||
addr > controllers[0].mem_resources[2].end) \
|
||||
idx = 1; \
|
||||
if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
|
||||
(HV_VirtAddr)(&val), sizeof(type), addr)) \
|
||||
pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
|
||||
sizeof(type), addr); \
|
||||
return val; \
|
||||
} \
|
||||
EXPORT_SYMBOL(_tile_read##size)
|
||||
|
||||
TILE_READ(b, u8);
|
||||
TILE_READ(w, u16);
|
||||
TILE_READ(l, u32);
|
||||
TILE_READ(q, u64);
|
||||
|
||||
#define TILE_WRITE(size, type) \
|
||||
void _tile_write##size(type val, unsigned long addr) \
|
||||
{ \
|
||||
int idx = 0; \
|
||||
if (addr > controllers[0].mem_resources[1].end && \
|
||||
addr > controllers[0].mem_resources[2].end) \
|
||||
idx = 1; \
|
||||
if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
|
||||
(HV_VirtAddr)(&val), sizeof(type), addr)) \
|
||||
pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
|
||||
sizeof(type), addr); \
|
||||
} \
|
||||
EXPORT_SYMBOL(_tile_write##size)
|
||||
|
||||
TILE_WRITE(b, u8);
|
||||
TILE_WRITE(w, u16);
|
||||
TILE_WRITE(l, u32);
|
||||
TILE_WRITE(q, u64);
|
|
@ -840,7 +840,7 @@ static int __init topology_init(void)
|
|||
for_each_online_node(i)
|
||||
register_one_node(i);
|
||||
|
||||
for_each_present_cpu(i)
|
||||
for (i = 0; i < smp_height * smp_width; ++i)
|
||||
register_cpu(&cpu_devices[i], i);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -18,12 +18,24 @@
|
|||
|
||||
void *memchr(const void *s, int c, size_t n)
|
||||
{
|
||||
const uint32_t *last_word_ptr;
|
||||
const uint32_t *p;
|
||||
const char *last_byte_ptr;
|
||||
uintptr_t s_int;
|
||||
uint32_t goal, before_mask, v, bits;
|
||||
char *ret;
|
||||
|
||||
if (__builtin_expect(n == 0, 0)) {
|
||||
/* Don't dereference any memory if the array is empty. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get an aligned pointer. */
|
||||
const uintptr_t s_int = (uintptr_t) s;
|
||||
const uint32_t *p = (const uint32_t *)(s_int & -4);
|
||||
s_int = (uintptr_t) s;
|
||||
p = (const uint32_t *)(s_int & -4);
|
||||
|
||||
/* Create four copies of the byte for which we are looking. */
|
||||
const uint32_t goal = 0x01010101 * (uint8_t) c;
|
||||
goal = 0x01010101 * (uint8_t) c;
|
||||
|
||||
/* Read the first word, but munge it so that bytes before the array
|
||||
* will not match goal.
|
||||
|
@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n)
|
|||
* Note that this shift count expression works because we know
|
||||
* shift counts are taken mod 32.
|
||||
*/
|
||||
const uint32_t before_mask = (1 << (s_int << 3)) - 1;
|
||||
uint32_t v = (*p | before_mask) ^ (goal & before_mask);
|
||||
before_mask = (1 << (s_int << 3)) - 1;
|
||||
v = (*p | before_mask) ^ (goal & before_mask);
|
||||
|
||||
/* Compute the address of the last byte. */
|
||||
const char *const last_byte_ptr = (const char *)s + n - 1;
|
||||
last_byte_ptr = (const char *)s + n - 1;
|
||||
|
||||
/* Compute the address of the word containing the last byte. */
|
||||
const uint32_t *const last_word_ptr =
|
||||
(const uint32_t *)((uintptr_t) last_byte_ptr & -4);
|
||||
|
||||
uint32_t bits;
|
||||
char *ret;
|
||||
|
||||
if (__builtin_expect(n == 0, 0)) {
|
||||
/* Don't dereference any memory if the array is empty. */
|
||||
return NULL;
|
||||
}
|
||||
last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
|
||||
|
||||
while ((bits = __insn_seqb(v, goal)) == 0) {
|
||||
if (__builtin_expect(p == last_word_ptr, 0)) {
|
||||
|
|
|
@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
|
|||
* when we compare them.
|
||||
*/
|
||||
u32 my_ticket_;
|
||||
|
||||
/* Take out the next ticket; this will also stop would-be readers. */
|
||||
if (val & 1)
|
||||
val = get_rwlock(rwlock);
|
||||
rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
|
||||
|
||||
/* Extract my ticket value from the original word. */
|
||||
my_ticket_ = val >> WR_NEXT_SHIFT;
|
||||
u32 iterations = 0;
|
||||
|
||||
/*
|
||||
* Wait until the "current" field matches our ticket, and
|
||||
* there are no remaining readers.
|
||||
* Wait until there are no readers, then bump up the next
|
||||
* field and capture the ticket value.
|
||||
*/
|
||||
for (;;) {
|
||||
if (!(val & 1)) {
|
||||
if ((val >> RD_COUNT_SHIFT) == 0)
|
||||
break;
|
||||
rwlock->lock = val;
|
||||
}
|
||||
delay_backoff(iterations++);
|
||||
val = __insn_tns((int *)&rwlock->lock);
|
||||
}
|
||||
|
||||
/* Take out the next ticket and extract my ticket value. */
|
||||
rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
|
||||
my_ticket_ = val >> WR_NEXT_SHIFT;
|
||||
|
||||
/* Wait until the "current" field matches our ticket. */
|
||||
for (;;) {
|
||||
u32 curr_ = val >> WR_CURR_SHIFT;
|
||||
u32 readers = val >> RD_COUNT_SHIFT;
|
||||
u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers;
|
||||
u32 delta = ((my_ticket_ - curr_) & WR_MASK);
|
||||
if (likely(delta == 0))
|
||||
break;
|
||||
|
||||
|
|
|
@ -727,6 +727,9 @@ struct winch {
|
|||
|
||||
static void free_winch(struct winch *winch, int free_irq_ok)
|
||||
{
|
||||
if (free_irq_ok)
|
||||
free_irq(WINCH_IRQ, winch);
|
||||
|
||||
list_del(&winch->list);
|
||||
|
||||
if (winch->pid != -1)
|
||||
|
@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
|
|||
os_close_file(winch->fd);
|
||||
if (winch->stack != 0)
|
||||
free_stack(winch->stack, 0);
|
||||
if (free_irq_ok)
|
||||
free_irq(WINCH_IRQ, winch);
|
||||
kfree(winch);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ config X86
|
|||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_EVENTS if (!M386 && !M486)
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_IRQ_WORK
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_KPROBES
|
||||
|
|
|
@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
|
|||
}
|
||||
|
||||
/* Return an pointer with offset calculated */
|
||||
static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
static __always_inline unsigned long
|
||||
__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
__set_fixmap(idx, phys, flags);
|
||||
return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
|
||||
|
|
|
@ -128,7 +128,7 @@
|
|||
#define FAM10H_MMIO_CONF_ENABLE (1<<0)
|
||||
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
|
||||
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
|
||||
|
|
|
@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
|
|||
#define __PV_IS_CALLEE_SAVE(func) \
|
||||
((struct paravirt_callee_save) { func })
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
static inline notrace unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_restore(unsigned long f)
|
||||
static inline notrace void arch_local_irq_restore(unsigned long f)
|
||||
{
|
||||
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
static inline notrace void arch_local_irq_disable(void)
|
||||
{
|
||||
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
static inline notrace void arch_local_irq_enable(void)
|
||||
{
|
||||
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
static inline notrace unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long f;
|
||||
|
||||
|
|
|
@ -199,6 +199,8 @@ union uvh_apicid {
|
|||
#define UVH_APICID 0x002D0E00L
|
||||
#define UV_APIC_PNODE_SHIFT 6
|
||||
|
||||
#define UV_APICID_HIBIT_MASK 0xffff0000
|
||||
|
||||
/* Local Bus from cpu's perspective */
|
||||
#define LOCAL_BUS_BASE 0x1c00000
|
||||
#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
|
||||
|
@ -491,8 +493,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
|
|||
}
|
||||
}
|
||||
|
||||
extern unsigned int uv_apicid_hibits;
|
||||
static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
|
||||
{
|
||||
apicid |= uv_apicid_hibits;
|
||||
return (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||
(mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*
|
||||
* SGI UV MMR definitions
|
||||
*
|
||||
* Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
|
||||
* Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_UV_UV_MMRS_H
|
||||
|
@ -753,6 +753,23 @@ union uvh_lb_bau_sb_descriptor_base_u {
|
|||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */
|
||||
/* ========================================================================= */
|
||||
#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
|
||||
#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0
|
||||
|
||||
#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
|
||||
#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
|
||||
|
||||
union uvh_lb_target_physical_apic_id_mask_u {
|
||||
unsigned long v;
|
||||
struct uvh_lb_target_physical_apic_id_mask_s {
|
||||
unsigned long bit_enables : 32; /* RW */
|
||||
unsigned long rsvd_32_63 : 32; /* */
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_NODE_ID */
|
||||
/* ========================================================================= */
|
||||
|
|
|
@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void);
|
|||
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
|
||||
#endif
|
||||
|
||||
#ifndef machine_to_phys_mapping
|
||||
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
|
||||
#endif
|
||||
#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
|
||||
#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
|
||||
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
|
||||
|
||||
/* Maximum number of virtual CPUs in multi-processor guests. */
|
||||
#define MAX_VIRT_CPUS 32
|
||||
|
|
|
@ -32,6 +32,11 @@
|
|||
/* And the trap vector is... */
|
||||
#define TRAP_INSTR "int $0x82"
|
||||
|
||||
#define __MACH2PHYS_VIRT_START 0xF5800000
|
||||
#define __MACH2PHYS_VIRT_END 0xF6800000
|
||||
|
||||
#define __MACH2PHYS_SHIFT 2
|
||||
|
||||
/*
|
||||
* Virtual addresses beyond this are not modifiable by guest OSes. The
|
||||
* machine->physical mapping table starts at this address, read-only.
|
||||
|
|
|
@ -39,18 +39,7 @@
|
|||
#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
|
||||
#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
|
||||
#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
|
||||
|
||||
#ifndef HYPERVISOR_VIRT_START
|
||||
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
|
||||
#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
|
||||
#endif
|
||||
|
||||
#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
|
||||
#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
|
||||
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
|
||||
#ifndef machine_to_phys_mapping
|
||||
#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
|
||||
#endif
|
||||
#define __MACH2PHYS_SHIFT 3
|
||||
|
||||
/*
|
||||
* int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -35,6 +36,8 @@ typedef struct xpaddr {
|
|||
#define MAX_DOMAIN_PAGES \
|
||||
((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
|
||||
|
||||
extern unsigned long *machine_to_phys_mapping;
|
||||
extern unsigned int machine_to_phys_order;
|
||||
|
||||
extern unsigned long get_phys_to_machine(unsigned long pfn);
|
||||
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
|
||||
|
@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
|||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
#if 0
|
||||
if (unlikely((mfn >> machine_to_phys_order) != 0))
|
||||
return max_mapnr;
|
||||
#endif
|
||||
return ~0;
|
||||
|
||||
pfn = 0;
|
||||
/*
|
||||
|
|
|
@ -17,15 +17,16 @@
|
|||
#include <linux/nmi.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* For reliability, we're prepared to waste bits here. */
|
||||
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
||||
|
||||
u64 hw_nmi_get_sample_period(void)
|
||||
{
|
||||
return (u64)(cpu_khz) * 1000 * 60;
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_NMI_WATCHDOG
|
||||
|
||||
/* For reliability, we're prepared to waste bits here. */
|
||||
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -44,6 +44,8 @@ static u64 gru_start_paddr, gru_end_paddr;
|
|||
static union uvh_apicid uvh_apicid;
|
||||
int uv_min_hub_revision_id;
|
||||
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
|
||||
unsigned int uv_apicid_hibits;
|
||||
EXPORT_SYMBOL_GPL(uv_apicid_hibits);
|
||||
static DEFINE_SPINLOCK(uv_nmi_lock);
|
||||
|
||||
static inline bool is_GRU_range(u64 start, u64 end)
|
||||
|
@ -85,6 +87,23 @@ static void __init early_get_apic_pnode_shift(void)
|
|||
uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an extra bit as dictated by bios to the destination apicid of
|
||||
* interrupts potentially passing through the UV HUB. This prevents
|
||||
* a deadlock between interrupts and IO port operations.
|
||||
*/
|
||||
static void __init uv_set_apicid_hibit(void)
|
||||
{
|
||||
union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
|
||||
unsigned long *mmr;
|
||||
|
||||
mmr = early_ioremap(UV_LOCAL_MMR_BASE |
|
||||
UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr));
|
||||
apicid_mask.v = *mmr;
|
||||
early_iounmap(mmr, sizeof(*mmr));
|
||||
uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
|
||||
}
|
||||
|
||||
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
int nodeid;
|
||||
|
@ -102,6 +121,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
__get_cpu_var(x2apic_extra_bits) =
|
||||
nodeid << (uvh_apicid.s.pnode_shift - 1);
|
||||
uv_system_type = UV_NON_UNIQUE_APIC;
|
||||
uv_set_apicid_hibit();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -155,6 +175,7 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri
|
|||
int pnode;
|
||||
|
||||
pnode = uv_apicid_to_pnode(phys_apicid);
|
||||
phys_apicid |= uv_apicid_hibits;
|
||||
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||
((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
|
||||
|
@ -236,7 +257,7 @@ static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
|||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
@ -255,7 +276,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
|||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_get_apic_id(unsigned long x)
|
||||
|
|
|
@ -381,6 +381,20 @@ static void release_pmc_hardware(void) {}
|
|||
|
||||
#endif
|
||||
|
||||
static bool check_hw_exists(void)
|
||||
{
|
||||
u64 val, val_new = 0;
|
||||
int ret = 0;
|
||||
|
||||
val = 0xabcdUL;
|
||||
ret |= checking_wrmsrl(x86_pmu.perfctr, val);
|
||||
ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
|
||||
if (ret || val != val_new)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void reserve_ds_buffers(void);
|
||||
static void release_ds_buffers(void);
|
||||
|
||||
|
@ -1372,6 +1386,12 @@ void __init init_hw_perf_events(void)
|
|||
|
||||
pmu_check_apic();
|
||||
|
||||
/* sanity check that the hardware exists or is emulated */
|
||||
if (!check_hw_exists()) {
|
||||
pr_cont("Broken PMU hardware detected, software events only.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pr_cont("%s PMU driver.\n", x86_pmu.name);
|
||||
|
||||
if (x86_pmu.quirks)
|
||||
|
|
|
@ -395,7 +395,7 @@ sysenter_past_esp:
|
|||
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
|
||||
* pushed above; +8 corresponds to copy_thread's esp0 setting.
|
||||
*/
|
||||
pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp)
|
||||
pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
|
||||
CFI_REL_OFFSET eip, 0
|
||||
|
||||
pushl_cfi %eax
|
||||
|
|
|
@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64)
|
|||
.endm
|
||||
|
||||
/* save partial stack frame */
|
||||
.pushsection .kprobes.text, "ax"
|
||||
ENTRY(save_args)
|
||||
XCPT_FRAME
|
||||
cld
|
||||
|
@ -334,6 +335,7 @@ ENTRY(save_args)
|
|||
ret
|
||||
CFI_ENDPROC
|
||||
END(save_args)
|
||||
.popsection
|
||||
|
||||
ENTRY(save_rest)
|
||||
PARTIAL_FRAME 1 REST_SKIP+8
|
||||
|
|
|
@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
|
|||
dr6_p = (unsigned long *)ERR_PTR(args->err);
|
||||
dr6 = *dr6_p;
|
||||
|
||||
/* If it's a single step, TRAP bits are random */
|
||||
if (dr6 & DR_STEP)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Do an early return if no trap bits are set in DR6 */
|
||||
if ((dr6 & DR_TRAP_BITS) == 0)
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
@ -25,7 +25,6 @@ struct pci_hostbridge_probe {
|
|||
};
|
||||
|
||||
static u64 __cpuinitdata fam10h_pci_mmconf_base;
|
||||
static int __cpuinitdata fam10h_pci_mmconf_base_status;
|
||||
|
||||
static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
|
||||
{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
|
||||
|
@ -44,10 +43,12 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
|
|||
return start1 - start2;
|
||||
}
|
||||
|
||||
/*[47:0] */
|
||||
/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */
|
||||
#define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT)
|
||||
#define MMCONF_MASK (~(MMCONF_UNIT - 1))
|
||||
#define MMCONF_SIZE (MMCONF_UNIT << 8)
|
||||
/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
|
||||
#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
|
||||
#define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32)))
|
||||
#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
|
||||
static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -64,12 +65,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
|||
struct range range[8];
|
||||
|
||||
/* only try to get setting from BSP */
|
||||
/* -1 or 1 */
|
||||
if (fam10h_pci_mmconf_base_status)
|
||||
if (fam10h_pci_mmconf_base)
|
||||
return;
|
||||
|
||||
if (!early_pci_allowed())
|
||||
goto fail;
|
||||
return;
|
||||
|
||||
found = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
|
||||
|
@ -91,7 +91,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
|||
}
|
||||
|
||||
if (!found)
|
||||
goto fail;
|
||||
return;
|
||||
|
||||
/* SYS_CFG */
|
||||
address = MSR_K8_SYSCFG;
|
||||
|
@ -99,16 +99,16 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
|||
|
||||
/* TOP_MEM2 is not enabled? */
|
||||
if (!(val & (1<<21))) {
|
||||
tom2 = 0;
|
||||
tom2 = 1ULL << 32;
|
||||
} else {
|
||||
/* TOP_MEM2 */
|
||||
address = MSR_K8_TOP_MEM2;
|
||||
rdmsrl(address, val);
|
||||
tom2 = val & (0xffffULL<<32);
|
||||
tom2 = max(val & 0xffffff800000ULL, 1ULL << 32);
|
||||
}
|
||||
|
||||
if (base <= tom2)
|
||||
base = tom2 + (1ULL<<32);
|
||||
base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK;
|
||||
|
||||
/*
|
||||
* need to check if the range is in the high mmio range that is
|
||||
|
@ -123,11 +123,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
|||
if (!(reg & 3))
|
||||
continue;
|
||||
|
||||
start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
|
||||
start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/
|
||||
reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
|
||||
end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
|
||||
end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/
|
||||
|
||||
if (!end)
|
||||
if (end < tom2)
|
||||
continue;
|
||||
|
||||
range[hi_mmio_num].start = start;
|
||||
|
@ -143,32 +143,27 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
|
|||
|
||||
if (range[hi_mmio_num - 1].end < base)
|
||||
goto out;
|
||||
if (range[0].start > base)
|
||||
if (range[0].start > base + MMCONF_SIZE)
|
||||
goto out;
|
||||
|
||||
/* need to find one window */
|
||||
base = range[0].start - (1ULL << 32);
|
||||
base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT;
|
||||
if ((base > tom2) && BASE_VALID(base))
|
||||
goto out;
|
||||
base = range[hi_mmio_num - 1].end + (1ULL << 32);
|
||||
if ((base > tom2) && BASE_VALID(base))
|
||||
base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK;
|
||||
if (BASE_VALID(base))
|
||||
goto out;
|
||||
/* need to find window between ranges */
|
||||
if (hi_mmio_num > 1)
|
||||
for (i = 0; i < hi_mmio_num - 1; i++) {
|
||||
if (range[i + 1].start > (range[i].end + (1ULL << 32))) {
|
||||
base = range[i].end + (1ULL << 32);
|
||||
if ((base > tom2) && BASE_VALID(base))
|
||||
for (i = 1; i < hi_mmio_num; i++) {
|
||||
base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK;
|
||||
val = range[i].start & MMCONF_MASK;
|
||||
if (val >= base + MMCONF_SIZE && BASE_VALID(base))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
fam10h_pci_mmconf_base_status = -1;
|
||||
return;
|
||||
|
||||
out:
|
||||
fam10h_pci_mmconf_base = base;
|
||||
fam10h_pci_mmconf_base_status = 1;
|
||||
}
|
||||
|
||||
void __cpuinit fam10h_check_enable_mmcfg(void)
|
||||
|
@ -190,11 +185,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
|
|||
|
||||
/* only trust the one handle 256 buses, if acpi=off */
|
||||
if (!acpi_pci_disabled || busnbits >= 8) {
|
||||
u64 base;
|
||||
base = val & (0xffffULL << 32);
|
||||
if (fam10h_pci_mmconf_base_status <= 0) {
|
||||
u64 base = val & MMCONF_MASK;
|
||||
|
||||
if (!fam10h_pci_mmconf_base) {
|
||||
fam10h_pci_mmconf_base = base;
|
||||
fam10h_pci_mmconf_base_status = 1;
|
||||
return;
|
||||
} else if (fam10h_pci_mmconf_base == base)
|
||||
return;
|
||||
|
@ -206,8 +200,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
|
|||
* with 256 buses
|
||||
*/
|
||||
get_fam10h_pci_mmconf_base();
|
||||
if (fam10h_pci_mmconf_base_status <= 0)
|
||||
if (!fam10h_pci_mmconf_base) {
|
||||
pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF;
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n");
|
||||
val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
|
||||
|
|
|
@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
|
||||
static void __cpuinit calculate_tlb_offset(void)
|
||||
{
|
||||
int cpu, node, nr_node_vecs;
|
||||
int cpu, node, nr_node_vecs, idx = 0;
|
||||
/*
|
||||
* we are changing tlb_vector_offset for each CPU in runtime, but this
|
||||
* will not cause inconsistency, as the write is atomic under X86. we
|
||||
|
@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void)
|
|||
nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
|
||||
|
||||
for_each_online_node(node) {
|
||||
int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) *
|
||||
int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
|
||||
nr_node_vecs;
|
||||
int cpu_offset = 0;
|
||||
for_each_cpu(cpu, cpumask_of_node(node)) {
|
||||
|
@ -248,6 +248,7 @@ static void __cpuinit calculate_tlb_offset(void)
|
|||
cpu_offset++;
|
||||
cpu_offset = cpu_offset % nr_node_vecs;
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1455,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector)
|
|||
* the below initialization can't be in firmware because the
|
||||
* messaging IRQ will be determined by the OS
|
||||
*/
|
||||
apicid = uvhub_to_first_apicid(uvhub);
|
||||
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
|
||||
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
|
||||
((apicid << 32) | vector));
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@ static void uv_rtc_send_IPI(int cpu)
|
|||
|
||||
apicid = cpu_physical_id(cpu);
|
||||
pnode = uv_apicid_to_pnode(apicid);
|
||||
apicid |= uv_apicid_hibits;
|
||||
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
|
||||
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
|
||||
(X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
|
||||
|
@ -107,6 +108,7 @@ static int uv_intr_pending(int pnode)
|
|||
static int uv_setup_intr(int cpu, u64 expires)
|
||||
{
|
||||
u64 val;
|
||||
unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
|
||||
int pnode = uv_cpu_to_pnode(cpu);
|
||||
|
||||
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
|
||||
|
@ -117,7 +119,7 @@ static int uv_setup_intr(int cpu, u64 expires)
|
|||
UVH_EVENT_OCCURRED0_RTC1_MASK);
|
||||
|
||||
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
|
||||
((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
|
||||
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
|
||||
|
||||
/* Set configuration */
|
||||
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
|
||||
|
|
|
@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
|||
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
||||
EXPORT_SYMBOL_GPL(xen_domain_type);
|
||||
|
||||
unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
|
||||
EXPORT_SYMBOL(machine_to_phys_mapping);
|
||||
unsigned int machine_to_phys_order;
|
||||
EXPORT_SYMBOL(machine_to_phys_order);
|
||||
|
||||
struct start_info *xen_start_info;
|
||||
EXPORT_SYMBOL_GPL(xen_start_info);
|
||||
|
||||
|
@ -1090,6 +1095,8 @@ static void __init xen_setup_stackprotector(void)
|
|||
/* First C function to be called on Xen boot */
|
||||
asmlinkage void __init xen_start_kernel(void)
|
||||
{
|
||||
struct physdev_set_iopl set_iopl;
|
||||
int rc;
|
||||
pgd_t *pgd;
|
||||
|
||||
if (!xen_start_info)
|
||||
|
@ -1097,6 +1104,8 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
|
||||
xen_domain_type = XEN_PV_DOMAIN;
|
||||
|
||||
xen_setup_machphys_mapping();
|
||||
|
||||
/* Install Xen paravirt ops */
|
||||
pv_info = xen_info;
|
||||
pv_init_ops = xen_init_ops;
|
||||
|
@ -1191,8 +1200,6 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
/* Allocate and initialize top and mid mfn levels for p2m structure */
|
||||
xen_build_mfn_list_list();
|
||||
|
||||
init_mm.pgd = pgd;
|
||||
|
||||
/* keep using Xen gdt for now; no urgent need to change it */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -1202,10 +1209,18 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
#else
|
||||
pv_info.kernel_rpl = 0;
|
||||
#endif
|
||||
|
||||
/* set the limit of our address space */
|
||||
xen_reserve_top();
|
||||
|
||||
/* We used to do this in xen_arch_setup, but that is too late on AMD
|
||||
* were early_cpu_init (run before ->arch_setup()) calls early_amd_init
|
||||
* which pokes 0xcf8 port.
|
||||
*/
|
||||
set_iopl.iopl = 1;
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
|
||||
if (rc != 0)
|
||||
xen_raw_printk("physdev_op failed %d\n", rc);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* set up basic CPUID stuff */
|
||||
cpu_detect(&new_cpu_data);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue