Merge master.kernel.org:/home/rmk/linux-2.6-arm
* master.kernel.org:/home/rmk/linux-2.6-arm: ARM: GIC: don't disable software generated interrupts ARM: 6472/1: vexpress ct-ca9x4: only set twd_base if local timers are being used ARM: arch/arm/kernel/traps.c: Convert sprintf_symbol to %pS ARM: arch/arm/kernel/hw_breakpoint.c: Convert WARN_ON to WARN ARM: 6462/1: EP93xx: Document DMA M2P API ARM: 6470/1: atomic64: use generic implementation for OABI configurations ARM: 6469/1: perf-events: squash compiler warning ARM: 6468/1: backtrace: fix calculation of thread stack base ARM: Fix DMA coherent allocator alignment ARM: orion5x/kirkwood/mv78xx0: fix MPP configuration corner cases [ARM] TS-78xxx NAND resource type should be IORESOURCE_MEM ARM: pxa/saar: fix the building failure caused by typo ARM: pxa/cm-x2xx: remove duplicate call to pxa27x_init_irq ARM: pxa: fix the missing definition of IRQ_BOARD_END ARM: mmp: fix cpuid detection on mmp2 [ARM] Kirkwood: restrict the scope of the PCIe reset workaround [ARM] Kirkwood: fix timer initialization for LaCie boards [ARM] Kirkwood: enhance TCLK detection
This commit is contained in:
commit
a0a6da1a73
|
@ -6,7 +6,7 @@ config ARM
|
|||
select HAVE_MEMBLOCK
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
select GENERIC_ATOMIC64 if (!CPU_32v6K)
|
||||
select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
|
||||
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_KPROBES if (!XIP_KERNEL)
|
||||
|
|
|
@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
|||
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
||||
|
||||
/*
|
||||
* Set priority on all interrupts.
|
||||
* Set priority on all global interrupts.
|
||||
*/
|
||||
for (i = 0; i < max_irq; i += 4)
|
||||
for (i = 32; i < max_irq; i += 4)
|
||||
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
|
||||
|
||||
/*
|
||||
* Disable all interrupts.
|
||||
* Disable all interrupts. Leave the PPI and SGIs alone
|
||||
* as these enables are banked registers.
|
||||
*/
|
||||
for (i = 0; i < max_irq; i += 32)
|
||||
for (i = 32; i < max_irq; i += 32)
|
||||
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
|
||||
|
||||
/*
|
||||
|
@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
|||
|
||||
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
|
||||
{
|
||||
void __iomem *dist_base;
|
||||
int i;
|
||||
|
||||
if (gic_nr >= MAX_GIC_NR)
|
||||
BUG();
|
||||
|
||||
dist_base = gic_data[gic_nr].dist_base;
|
||||
BUG_ON(!dist_base);
|
||||
|
||||
gic_data[gic_nr].cpu_base = base;
|
||||
|
||||
/*
|
||||
* Deal with the banked PPI and SGI interrupts - disable all
|
||||
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||
*/
|
||||
writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
|
||||
writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
|
||||
|
||||
/*
|
||||
* Set priority on PPI and SGI interrupts
|
||||
*/
|
||||
for (i = 0; i < 32; i += 4)
|
||||
writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
|
||||
|
||||
writel(0xf0, base + GIC_CPU_PRIMASK);
|
||||
writel(1, base + GIC_CPU_CTRL);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
|
|||
IT8152_PD_IRQ(1) USB (USBR)
|
||||
IT8152_PD_IRQ(0) Audio controller (ACR)
|
||||
*/
|
||||
#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
|
||||
#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
|
||||
|
||||
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
|
||||
#define IT8152_LD_IRQ_COUNT 9
|
||||
|
|
|
@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
|||
breakpoint_handler(addr, regs);
|
||||
break;
|
||||
case ARM_ENTRY_ASYNC_WATCHPOINT:
|
||||
WARN_ON("Asynchronous watchpoint exception taken. "
|
||||
"Debugging results may be unreliable");
|
||||
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
|
||||
case ARM_ENTRY_SYNC_WATCHPOINT:
|
||||
watchpoint_handler(addr, regs);
|
||||
break;
|
||||
|
|
|
@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
|
|||
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
|
||||
enum armv7_counters counter)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (counter == ARMV7_CYCLE_COUNTER)
|
||||
ret = pmnc & ARMV7_FLAG_C;
|
||||
|
|
|
@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
|
|||
|
||||
/* only go to a higher address on the stack */
|
||||
low = frame->sp;
|
||||
high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
|
||||
high = ALIGN(low, THREAD_SIZE);
|
||||
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < (low + 12) || fp + 4 >= high)
|
||||
|
|
|
@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
|||
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
||||
{
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
|
||||
sprint_symbol(sym1, where);
|
||||
sprint_symbol(sym2, from);
|
||||
printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
|
||||
printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||
#else
|
||||
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
||||
#endif
|
||||
|
|
|
@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
|
|||
|
||||
/* only go to a higher address on the stack */
|
||||
low = frame->sp;
|
||||
high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
|
||||
high = ALIGN(low, THREAD_SIZE);
|
||||
|
||||
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
|
||||
frame->pc, frame->lr, frame->sp);
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
/*
|
||||
* arch/arm/mach-ep93xx/include/mach/dma.h
|
||||
/**
|
||||
* DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
|
||||
*
|
||||
* The EP93xx DMA M2P subsystem handles DMA transfers between memory and
|
||||
* peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
|
||||
* See chapter 10 of the EP93xx users guide for full details on the DMA M2P
|
||||
* engine.
|
||||
*
|
||||
* See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARCH_DMA_H
|
||||
|
@ -8,12 +16,34 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* struct ep93xx_dma_buffer - Information about a buffer to be transferred
|
||||
* using the DMA M2P engine
|
||||
*
|
||||
* @list: Entry in DMA buffer list
|
||||
* @bus_addr: Physical address of the buffer
|
||||
* @size: Size of the buffer in bytes
|
||||
*/
|
||||
struct ep93xx_dma_buffer {
|
||||
struct list_head list;
|
||||
u32 bus_addr;
|
||||
u16 size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ep93xx_dma_m2p_client - Information about a DMA M2P client
|
||||
*
|
||||
* @name: Unique name for this client
|
||||
* @flags: Client flags
|
||||
* @cookie: User data to pass to callback functions
|
||||
* @buffer_started: Non NULL function to call when a transfer is started.
|
||||
* The arguments are the user data cookie and the DMA
|
||||
* buffer which is starting.
|
||||
* @buffer_finished: Non NULL function to call when a transfer is completed.
|
||||
* The arguments are the user data cookie, the DMA buffer
|
||||
* which has completed, and a boolean flag indicating if
|
||||
* the transfer had an error.
|
||||
*/
|
||||
struct ep93xx_dma_m2p_client {
|
||||
char *name;
|
||||
u8 flags;
|
||||
|
@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
|
|||
struct ep93xx_dma_buffer *buf,
|
||||
int bytes, int error);
|
||||
|
||||
/* Internal to the DMA code. */
|
||||
/* private: Internal use only */
|
||||
void *channel;
|
||||
};
|
||||
|
||||
/* DMA M2P ports */
|
||||
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
|
||||
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
|
||||
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
|
||||
|
@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
|
|||
#define EP93XX_DMA_M2P_PORT_UART3 0x08
|
||||
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
|
||||
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
|
||||
#define EP93XX_DMA_M2P_TX 0x00
|
||||
#define EP93XX_DMA_M2P_RX 0x10
|
||||
#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
|
||||
#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
|
||||
#define EP93XX_DMA_M2P_ERROR_MASK 0x60
|
||||
|
||||
int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
|
||||
/* DMA M2P client flags */
|
||||
#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
|
||||
#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
|
||||
|
||||
/*
|
||||
* DMA M2P client error handling flags. See the EP93xx users guide
|
||||
* documentation on the DMA M2P CONTROL register for more details
|
||||
*/
|
||||
#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
|
||||
#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
|
||||
#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
|
||||
|
||||
/**
|
||||
* ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
|
||||
* subsystem
|
||||
*
|
||||
* @m2p: Client information to register
|
||||
* returns 0 on success
|
||||
*
|
||||
* The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
|
||||
* client
|
||||
*/
|
||||
int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
|
||||
|
||||
/**
|
||||
* ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
|
||||
* subsystem
|
||||
*
|
||||
* @m2p: Client to unregister
|
||||
*
|
||||
* Any transfers currently in progress will be completed in hardware, but
|
||||
* ignored in software.
|
||||
*/
|
||||
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
|
||||
|
||||
/**
|
||||
* ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
|
||||
*
|
||||
* @m2p: DMA Client to submit the transfer on
|
||||
* @buf: DMA Buffer to submit
|
||||
*
|
||||
* If the current or next transfer positions are free on the M2P client then
|
||||
* the transfer is started immediately. If not, the transfer is added to the
|
||||
* list of pending transfers. This function must not be called from the
|
||||
* buffer_finished callback for an M2P channel.
|
||||
*
|
||||
*/
|
||||
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
|
||||
struct ep93xx_dma_buffer *buf);
|
||||
|
||||
/**
|
||||
* ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
|
||||
* for an M2P channel
|
||||
*
|
||||
* @m2p: DMA Client to submit the transfer on
|
||||
* @buf: DMA Buffer to submit
|
||||
*
|
||||
* This function must only be called from the buffer_finished callback for an
|
||||
* M2P channel. It is commonly used to add the next transfer in a chained list
|
||||
* of DMA transfers.
|
||||
*/
|
||||
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
|
||||
struct ep93xx_dma_buffer *buf);
|
||||
|
||||
/**
|
||||
* ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
|
||||
*
|
||||
* @m2p: DMA client to flush transfers on
|
||||
*
|
||||
* Any transfers currently in progress will be completed in hardware, but
|
||||
* ignored in software.
|
||||
*
|
||||
*/
|
||||
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
|
||||
|
||||
#endif /* __ASM_ARCH_DMA_H */
|
||||
|
|
|
@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
|
|||
|
||||
kirkwood_pcie_id(&dev, &rev);
|
||||
|
||||
if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
|
||||
rev == MV88F6281_REV_A1)) ||
|
||||
(dev == MV88F6282_DEV_ID))
|
||||
return 200000000;
|
||||
if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
|
||||
if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
|
||||
return 200000000;
|
||||
|
||||
return 166666667;
|
||||
}
|
||||
|
|
|
@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
|
|||
.init_machine = d2net_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
|
|
|
@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
|
|||
pr_err("Failed to power up HDD%d\n", i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
* Timer
|
||||
****************************************************************************/
|
||||
|
||||
static void lacie_v2_timer_init(void)
|
||||
{
|
||||
kirkwood_tclk = 166666667;
|
||||
orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
|
||||
}
|
||||
|
||||
struct sys_timer lacie_v2_timer = {
|
||||
.init = lacie_v2_timer_init,
|
||||
};
|
||||
|
|
|
@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
|
|||
void lacie_v2_register_i2c_devices(void);
|
||||
void lacie_v2_hdd_power_init(int hdd_num);
|
||||
|
||||
extern struct sys_timer lacie_v2_timer;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
|
|||
}
|
||||
printk("\n");
|
||||
|
||||
while (*mpp_list) {
|
||||
for ( ; *mpp_list; mpp_list++) {
|
||||
unsigned int num = MPP_NUM(*mpp_list);
|
||||
unsigned int sel = MPP_SEL(*mpp_list);
|
||||
int shift, gpio_mode;
|
||||
|
@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
|
|||
if (sel != 0)
|
||||
gpio_mode = 0;
|
||||
orion_gpio_set_valid(num, gpio_mode);
|
||||
|
||||
mpp_list++;
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG " final MPP regs:");
|
||||
|
|
|
@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
|
|||
.init_machine = netspace_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
#endif
|
||||
|
||||
|
@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
|
|||
.init_machine = netspace_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
#endif
|
||||
|
||||
|
@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
|
|||
.init_machine = netspace_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
#endif
|
||||
|
|
|
@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
|
|||
.init_machine = netxbig_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
#endif
|
||||
|
||||
|
@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
|
|||
.init_machine = netxbig_v2_init,
|
||||
.map_io = kirkwood_map_io,
|
||||
.init_irq = kirkwood_init_irq,
|
||||
.timer = &lacie_v2_timer,
|
||||
.timer = &kirkwood_timer,
|
||||
MACHINE_END
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,10 @@
|
|||
#include "mpp.h"
|
||||
#include "tsx1x-common.h"
|
||||
|
||||
/* for the PCIe reset workaround */
|
||||
#include <plat/pcie.h>
|
||||
|
||||
|
||||
#define QNAP_TS41X_JUMPER_JP1 45
|
||||
|
||||
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
|
||||
|
@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
|
|||
|
||||
static int __init ts41x_pci_init(void)
|
||||
{
|
||||
if (machine_is_ts41x())
|
||||
if (machine_is_ts41x()) {
|
||||
/*
|
||||
* Without this explicit reset, the PCIe SATA controller
|
||||
* (Marvell 88sx7042/sata_mv) is known to stop working
|
||||
* after a few minutes.
|
||||
*/
|
||||
orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
|
||||
|
||||
kirkwood_pcie_init(KW_PCIE0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
|
|||
#ifdef CONFIG_CPU_MMP2
|
||||
static inline int cpu_is_mmp2(void)
|
||||
{
|
||||
return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
|
||||
return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
|
||||
}
|
||||
#else
|
||||
#define cpu_is_mmp2() (0)
|
||||
#endif
|
||||
|
|
|
@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
|
|||
}
|
||||
printk("\n");
|
||||
|
||||
while (*mpp_list) {
|
||||
for ( ; *mpp_list; mpp_list++) {
|
||||
unsigned int num = MPP_NUM(*mpp_list);
|
||||
unsigned int sel = MPP_SEL(*mpp_list);
|
||||
int shift, gpio_mode;
|
||||
|
@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
|
|||
if (sel != 0)
|
||||
gpio_mode = 0;
|
||||
orion_gpio_set_valid(num, gpio_mode);
|
||||
|
||||
mpp_list++;
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG " final MPP regs:");
|
||||
|
|
|
@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
|
|||
/* Initialize gpiolib. */
|
||||
orion_gpio_init();
|
||||
|
||||
while (mode->mpp >= 0) {
|
||||
for ( ; mode->mpp >= 0; mode++) {
|
||||
u32 *reg;
|
||||
int num_type;
|
||||
int shift;
|
||||
|
@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
|
|||
orion_gpio_set_unused(mode->mpp);
|
||||
|
||||
orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
|
||||
|
||||
mode++;
|
||||
}
|
||||
|
||||
writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
|
||||
|
|
|
@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
|
|||
static struct resource ts78xx_ts_nand_resources = {
|
||||
.start = TS_NAND_DATA,
|
||||
.end = TS_NAND_DATA + 4,
|
||||
.flags = IORESOURCE_IO,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
static struct platform_device ts78xx_ts_nand_device = {
|
||||
|
|
|
@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
|
|||
|
||||
static void __init cmx2xx_init_irq(void)
|
||||
{
|
||||
pxa27x_init_irq();
|
||||
|
||||
if (cpu_is_pxa25x()) {
|
||||
pxa25x_init_irq();
|
||||
cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
|
||||
|
|
|
@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
|
|||
},
|
||||
};
|
||||
|
||||
#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
|
||||
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
|
||||
static uint16_t lcd_power_on[] = {
|
||||
/* single frame */
|
||||
SMART_CMD_NOOP,
|
||||
|
|
|
@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
|
|||
|
||||
static void __init ct_ca9x4_map_io(void)
|
||||
{
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
twd_base = MMIO_P2V(A9_MPCORE_TWD);
|
||||
#endif
|
||||
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
|
||||
}
|
||||
|
||||
|
|
|
@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
|
|||
* fragmentation of the DMA space, and also prevents allocations
|
||||
* smaller than a section from crossing a section boundary.
|
||||
*/
|
||||
bit = fls(size - 1) + 1;
|
||||
bit = fls(size - 1);
|
||||
if (bit > SECTION_SHIFT)
|
||||
bit = SECTION_SHIFT;
|
||||
align = 1 << bit;
|
||||
|
|
|
@ -11,12 +11,15 @@
|
|||
#ifndef __PLAT_PCIE_H
|
||||
#define __PLAT_PCIE_H
|
||||
|
||||
struct pci_bus;
|
||||
|
||||
u32 orion_pcie_dev_id(void __iomem *base);
|
||||
u32 orion_pcie_rev(void __iomem *base);
|
||||
int orion_pcie_link_up(void __iomem *base);
|
||||
int orion_pcie_x4_mode(void __iomem *base);
|
||||
int orion_pcie_get_local_bus_nr(void __iomem *base);
|
||||
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
|
||||
void orion_pcie_reset(void __iomem *base);
|
||||
void orion_pcie_setup(void __iomem *base,
|
||||
struct mbus_dram_target_info *dram);
|
||||
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
|
||||
|
|
|
@ -181,11 +181,6 @@ void __init orion_pcie_setup(void __iomem *base,
|
|||
u16 cmd;
|
||||
u32 mask;
|
||||
|
||||
/*
|
||||
* soft reset PCIe unit
|
||||
*/
|
||||
orion_pcie_reset(base);
|
||||
|
||||
/*
|
||||
* Point PCIe unit MBUS decode windows to DRAM space.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue