ARM: tegra: switch to dmaengine
The Tegra code-base has contained both a legacy DMA and a dmaengine driver since v3.6-rcX. This series flips Tegra's defconfig to enable dmaengine rather than the legacy driver, and removes the legacy driver and all client code. The branch is based on v3.6-rc6 in order to pick up a bug-fix to the ASoC Tegra PCM driver that's required for audio to work correctly when using dmaengine. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.10 (GNU/Linux) iQIcBAABAgAGBQJQV0gxAAoJEMzrak5tbycx9mIP/0uU4fVrAyIgbRkJ7nrPS/K7 vRKEfYJlXqr4zM79i3flpD/QPK6ImWcj0RptrdU3851yjVGkSehp8wbozKoBVDXQ ZqPEBG039Vshmum/AD6Km3LSl4LBYurNJp/OC7ms5r0jIsU2IxZYaoofLGPXmgwn LTlsG35Y/Bug6P4bbSNPhR/9CFAe695oQgvkIMnYROwVZTmQwu7Xh1CE2moKMEJN top1Z3tZ+gtbb84eU1KR9BSNXAhQi7S7d4vWJe3RjnrhuSTVMIxiyNZSFjt8DrLL 7THzpmY/K2qV9k6CAO7bTl9X6m9cw8j+IbN6Ljc1NjbBiMcFe3TQRwFXicmt/Pma VPjppGIfTUzC9WJI5Tj8GOV6I6B6X5oCSILcXjeJpNE3TEvdLnVXhiclbhiVuB/0 j9x0+w1SMfRr8RtsMvZyZHy1XQ+WJg/rXojGxLEsKJrZmmJ7yRkfqIr/Q9nSrh87 KYHhy8lsOuSPXq1qEVKQLwenc1VPbbDcDow1fBURPmz1CFCvNnR/mWtY2uCu5gk/ XPcqZu5I/T7DlrNGTfYCZbOow67tfHgAxW5MYLPXV+Fqkj1l9EimUGW5fIq7S6bA 2ouTuCS1e79d9kFLjgAzdbfqtdjy93v7G5vlBV7gUIrMg5PtGnQvQK9ab/YzasOt XtP5p/eeV8NDo3MCw3+b =4eRL -----END PGP SIGNATURE----- Merge tag 'tegra-for-3.7-dmaengine' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into next/cleanup ARM: tegra: switch to dmaengine The Tegra code-base has contained both a legacy DMA and a dmaengine driver since v3.6-rcX. This series flips Tegra's defconfig to enable dmaengine rather than the legacy driver, and removes the legacy driver and all client code. * tag 'tegra-for-3.7-dmaengine' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra: ASoC: tegra: remove support of legacy DMA driver based access spi: tegra: remove support of legacy DMA driver based access ARM: tegra: apbio: remove support of legacy DMA driver based access ARM: tegra: dma: remove legacy APB DMA driver ARM: tegra: config: enable dmaengine based APB DMA driver + sync to 3.6-rc6
This commit is contained in:
commit
32dec75349
|
@ -21,6 +21,7 @@ Supported adapters:
|
|||
* Intel DH89xxCC (PCH)
|
||||
* Intel Panther Point (PCH)
|
||||
* Intel Lynx Point (PCH)
|
||||
* Intel Lynx Point-LP (PCH)
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||
|
|
|
@ -3387,7 +3387,7 @@ M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
|
|||
L: linux-i2c@vger.kernel.org
|
||||
W: http://i2c.wiki.kernel.org/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
|
||||
T: git git://git.fluff.org/bjdooks/linux.git
|
||||
T: git git://git.pengutronix.de/git/wsa/linux.git
|
||||
S: Maintained
|
||||
F: Documentation/i2c/
|
||||
F: drivers/i2c/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -356,15 +356,15 @@ choice
|
|||
is nothing connected to read from the DCC.
|
||||
|
||||
config DEBUG_SEMIHOSTING
|
||||
bool "Kernel low-level debug output via semihosting I"
|
||||
bool "Kernel low-level debug output via semihosting I/O"
|
||||
help
|
||||
Semihosting enables code running on an ARM target to use
|
||||
the I/O facilities on a host debugger/emulator through a
|
||||
simple SVC calls. The host debugger or emulator must have
|
||||
simple SVC call. The host debugger or emulator must have
|
||||
semihosting enabled for the special svc call to be trapped
|
||||
otherwise the kernel will crash.
|
||||
|
||||
This is known to work with OpenOCD, as wellas
|
||||
This is known to work with OpenOCD, as well as
|
||||
ARM's Fast Models, or any other controlling environment
|
||||
that implements semihosting.
|
||||
|
||||
|
|
|
@ -283,10 +283,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
|
|||
zinstall uinstall install: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
|
||||
|
||||
%.dtb:
|
||||
%.dtb: scripts
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
dtbs:
|
||||
dtbs: scripts
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
# We use MRPROPER_FILES and CLEAN_FILES now
|
||||
|
|
|
@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
|
|||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
|
||||
orrne r0, r0, #1 @ MMU enabled
|
||||
movne r1, #0xfffffffd @ domain 0 = client
|
||||
bic r6, r6, #1 << 31 @ 32-bit translation system
|
||||
bic r6, r6, #3 << 0 @ use only ttbr0
|
||||
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
||||
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
||||
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
|
||||
#endif
|
||||
mcr p15, 0, r0, c7, c5, 4 @ ISB
|
||||
mcr p15, 0, r0, c1, c0, 0 @ load control register
|
||||
|
|
|
@ -145,6 +145,8 @@ CONFIG_MMC_SDHCI_TEGRA=y
|
|||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_EM3027=y
|
||||
CONFIG_RTC_DRV_TEGRA=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_TEGRA20_APB_DMA=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_SENSORS_ISL29018=y
|
||||
CONFIG_SENSORS_ISL29028=y
|
||||
|
|
|
@ -320,4 +320,12 @@
|
|||
.size \name , . - \name
|
||||
.endm
|
||||
|
||||
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
adds \tmp, \addr, #\size - 1
|
||||
sbcccs \tmp, \tmp, \limit
|
||||
bcs \bad
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
|
|
@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
|||
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#ifdef PLAT_PHYS_OFFSET
|
||||
|
@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
* PFN 0 == physical address 0.
|
||||
|
|
|
@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|||
{
|
||||
pgtable_page_dtor(pte);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
tlb_add_flush(tlb, addr);
|
||||
#else
|
||||
/*
|
||||
* With the classic ARM MMU, a pte page has two corresponding pmd
|
||||
* entries, each covering 1MB.
|
||||
|
@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|||
addr &= PMD_MASK;
|
||||
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
|
||||
tlb_add_flush(tlb, addr + SZ_1M);
|
||||
#endif
|
||||
|
||||
tlb_remove_page(tlb, pte);
|
||||
}
|
||||
|
|
|
@ -101,28 +101,39 @@ extern int __get_user_1(void *);
|
|||
extern int __get_user_2(void *);
|
||||
extern int __get_user_4(void *);
|
||||
|
||||
#define __get_user_x(__r2,__p,__e,__s,__i...) \
|
||||
#define __GUP_CLOBBER_1 "lr", "cc"
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define __GUP_CLOBBER_2 "ip", "lr", "cc"
|
||||
#else
|
||||
#define __GUP_CLOBBER_2 "lr", "cc"
|
||||
#endif
|
||||
#define __GUP_CLOBBER_4 "lr", "cc"
|
||||
|
||||
#define __get_user_x(__r2,__p,__e,__l,__s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%1", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __get_user_" #__s \
|
||||
: "=&r" (__e), "=r" (__r2) \
|
||||
: "0" (__p) \
|
||||
: __i, "cc")
|
||||
: "0" (__p), "r" (__l) \
|
||||
: __GUP_CLOBBER_##__s)
|
||||
|
||||
#define get_user(x,p) \
|
||||
#define __get_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register unsigned long __r2 asm("r2"); \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__get_user_x(__r2, __p, __e, 1, "lr"); \
|
||||
break; \
|
||||
__get_user_x(__r2, __p, __e, __l, 1); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
|
||||
__get_user_x(__r2, __p, __e, __l, 2); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_x(__r2, __p, __e, 4, "lr"); \
|
||||
__get_user_x(__r2, __p, __e, __l, 4); \
|
||||
break; \
|
||||
default: __e = __get_user_bad(); break; \
|
||||
} \
|
||||
|
@ -130,42 +141,57 @@ extern int __get_user_4(void *);
|
|||
__e; \
|
||||
})
|
||||
|
||||
#define get_user(x,p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__get_user_check(x,p); \
|
||||
})
|
||||
|
||||
extern int __put_user_1(void *, unsigned int);
|
||||
extern int __put_user_2(void *, unsigned int);
|
||||
extern int __put_user_4(void *, unsigned int);
|
||||
extern int __put_user_8(void *, unsigned long long);
|
||||
|
||||
#define __put_user_x(__r2,__p,__e,__s) \
|
||||
#define __put_user_x(__r2,__p,__e,__l,__s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%2", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __put_user_" #__s \
|
||||
: "=&r" (__e) \
|
||||
: "0" (__p), "r" (__r2) \
|
||||
: "0" (__p), "r" (__r2), "r" (__l) \
|
||||
: "ip", "lr", "cc")
|
||||
|
||||
#define put_user(x,p) \
|
||||
#define __put_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__put_user_x(__r2, __p, __e, 1); \
|
||||
__put_user_x(__r2, __p, __e, __l, 1); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_x(__r2, __p, __e, 2); \
|
||||
__put_user_x(__r2, __p, __e, __l, 2); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_x(__r2, __p, __e, 4); \
|
||||
__put_user_x(__r2, __p, __e, __l, 4); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_x(__r2, __p, __e, 8); \
|
||||
__put_user_x(__r2, __p, __e, __l, 8); \
|
||||
break; \
|
||||
default: __e = __put_user_bad(); break; \
|
||||
} \
|
||||
__e; \
|
||||
})
|
||||
|
||||
#define put_user(x,p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user_check(x,p); \
|
||||
})
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
|
@ -219,6 +245,7 @@ do { \
|
|||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
unsigned long __gu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
|
||||
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
|
||||
|
@ -300,6 +327,7 @@ do { \
|
|||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
|
||||
|
|
|
@ -159,6 +159,12 @@ static int debug_arch_supported(void)
|
|||
arch >= ARM_DEBUG_ARCH_V7_1;
|
||||
}
|
||||
|
||||
/* Can we determine the watchpoint access type from the fsr? */
|
||||
static int debug_exception_updates_fsr(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
static int get_num_wrp_resources(void)
|
||||
{
|
||||
|
@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
|||
/* Aligned */
|
||||
break;
|
||||
case 1:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
case 2:
|
||||
/* Allow halfword watchpoints and breakpoints. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||
break;
|
||||
case 3:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
|||
info->address &= ~alignment_mask;
|
||||
info->ctrl.len <<= offset;
|
||||
|
||||
/*
|
||||
* Currently we rely on an overflow handler to take
|
||||
* care of single-stepping the breakpoint when it fires.
|
||||
* In the case of userspace breakpoints on a core with V7 debug,
|
||||
* we can use the mismatch feature as a poor-man's hardware
|
||||
* single-step, but this only works for per-task breakpoints.
|
||||
*/
|
||||
if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
|
||||
!core_has_mismatch_brps() || !bp->hw.bp_target)) {
|
||||
pr_warning("overflow handler required but none found\n");
|
||||
ret = -EINVAL;
|
||||
if (!bp->overflow_handler) {
|
||||
/*
|
||||
* Mismatch breakpoints are required for single-stepping
|
||||
* breakpoints.
|
||||
*/
|
||||
if (!core_has_mismatch_brps())
|
||||
return -EINVAL;
|
||||
|
||||
/* We don't allow mismatch breakpoints in kernel space. */
|
||||
if (arch_check_bp_in_kernelspace(bp))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Per-cpu breakpoints are not supported by our stepping
|
||||
* mechanism.
|
||||
*/
|
||||
if (!bp->hw.bp_target)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We only support specific access types if the fsr
|
||||
* reports them.
|
||||
*/
|
||||
if (!debug_exception_updates_fsr() &&
|
||||
(info->ctrl.type == ARM_BREAKPOINT_LOAD ||
|
||||
info->ctrl.type == ARM_BREAKPOINT_STORE))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
|||
goto unlock;
|
||||
|
||||
/* Check that the access type matches. */
|
||||
access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
|
||||
HW_BREAKPOINT_R;
|
||||
if (!(access & hw_breakpoint_type(wp)))
|
||||
goto unlock;
|
||||
if (debug_exception_updates_fsr()) {
|
||||
access = (fsr & ARM_FSR_ACCESS_MASK) ?
|
||||
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
|
||||
if (!(access & hw_breakpoint_type(wp)))
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* We have a winner. */
|
||||
info->trigger = addr;
|
||||
|
|
|
@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
|||
#endif
|
||||
instr = *(u32 *) pc;
|
||||
} else if (thumb_mode(regs)) {
|
||||
get_user(instr, (u16 __user *)pc);
|
||||
if (get_user(instr, (u16 __user *)pc))
|
||||
goto die_sig;
|
||||
if (is_wide_instruction(instr)) {
|
||||
unsigned int instr2;
|
||||
get_user(instr2, (u16 __user *)pc+1);
|
||||
if (get_user(instr2, (u16 __user *)pc+1))
|
||||
goto die_sig;
|
||||
instr <<= 16;
|
||||
instr |= instr2;
|
||||
}
|
||||
} else {
|
||||
get_user(instr, (u32 __user *)pc);
|
||||
} else if (get_user(instr, (u32 __user *)pc)) {
|
||||
goto die_sig;
|
||||
}
|
||||
|
||||
if (call_undef_hook(regs, instr) == 0)
|
||||
return;
|
||||
|
||||
die_sig:
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
if (user_debug & UDBG_UNDEFINED) {
|
||||
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
|
||||
|
|
|
@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
|
|||
{
|
||||
pr_info("Switching to timer-based delay loop\n");
|
||||
lpj_fine = freq / HZ;
|
||||
loops_per_jiffy = lpj_fine;
|
||||
arm_delay_ops.delay = __timer_delay;
|
||||
arm_delay_ops.const_udelay = __timer_const_udelay;
|
||||
arm_delay_ops.udelay = __timer_udelay;
|
||||
|
|
|
@ -16,8 +16,9 @@
|
|||
* __get_user_X
|
||||
*
|
||||
* Inputs: r0 contains the address
|
||||
* r1 contains the address limit, which must be preserved
|
||||
* Outputs: r0 is the error code
|
||||
* r2, r3 contains the zero-extended value
|
||||
* r2 contains the zero-extended value
|
||||
* lr corrupted
|
||||
*
|
||||
* No other registers must be altered. (see <asm/uaccess.h>
|
||||
|
@ -27,33 +28,39 @@
|
|||
* Note also that it is intended that __get_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__get_user_1)
|
||||
check_uaccess r0, 1, r1, r2, __get_user_bad
|
||||
1: TUSER(ldrb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
2: TUSER(ldrb) r2, [r0]
|
||||
3: TUSER(ldrb) r3, [r0, #1]
|
||||
check_uaccess r0, 2, r1, r2, __get_user_bad
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
rb .req ip
|
||||
2: ldrbt r2, [r0], #1
|
||||
3: ldrbt rb, [r0], #0
|
||||
#else
|
||||
2: TUSER(ldrb) r2, [r0], #1
|
||||
3: TUSER(ldrb) r3, [r0]
|
||||
rb .req r0
|
||||
2: ldrb r2, [r0]
|
||||
3: ldrb rb, [r0, #1]
|
||||
#endif
|
||||
#ifndef __ARMEB__
|
||||
orr r2, r2, r3, lsl #8
|
||||
orr r2, r2, rb, lsl #8
|
||||
#else
|
||||
orr r2, r3, r2, lsl #8
|
||||
orr r2, rb, r2, lsl #8
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
check_uaccess r0, 4, r1, r2, __get_user_bad
|
||||
4: TUSER(ldr) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* __put_user_X
|
||||
*
|
||||
* Inputs: r0 contains the address
|
||||
* r1 contains the address limit, which must be preserved
|
||||
* r2, r3 contains the value
|
||||
* Outputs: r0 is the error code
|
||||
* lr corrupted
|
||||
|
@ -27,16 +28,19 @@
|
|||
* Note also that it is intended that __put_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__put_user_1)
|
||||
check_uaccess r0, 1, r1, ip, __put_user_bad
|
||||
1: TUSER(strb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_1)
|
||||
|
||||
ENTRY(__put_user_2)
|
||||
check_uaccess r0, 2, r1, ip, __put_user_bad
|
||||
mov ip, r2, lsr #8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#ifndef __ARMEB__
|
||||
|
@ -60,12 +64,14 @@ ENTRY(__put_user_2)
|
|||
ENDPROC(__put_user_2)
|
||||
|
||||
ENTRY(__put_user_4)
|
||||
check_uaccess r0, 4, r1, ip, __put_user_bad
|
||||
4: TUSER(str) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_4)
|
||||
|
||||
ENTRY(__put_user_8)
|
||||
check_uaccess r0, 8, r1, ip, __put_user_bad
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: TUSER(str) r2, [r0]
|
||||
6: TUSER(str) r3, [r0, #4]
|
||||
|
|
|
@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
|
|||
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
|
||||
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
|
||||
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
|
||||
clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
|
||||
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
|
||||
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
|
||||
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
|
||||
|
|
|
@ -230,10 +230,8 @@ int __init mx35_clocks_init()
|
|||
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
|
||||
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
|
||||
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
|
||||
/* i.mx35 has the i.mx21 type uart */
|
||||
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
|
||||
|
|
|
@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
|
|||
select OMAP_PACKAGE_CBB
|
||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||
|
||||
config MACH_OMAP3_TOUCHBOOK
|
||||
config MACH_TOUCHBOOK
|
||||
bool "OMAP3 Touch Book"
|
||||
depends on ARCH_OMAP3
|
||||
default y
|
||||
select OMAP_PACKAGE_CBB
|
||||
|
||||
config MACH_OMAP_3430SDP
|
||||
bool "OMAP 3430 SDP board"
|
||||
|
|
|
@ -235,7 +235,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
|
|||
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
|
||||
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
|
||||
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
|
||||
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o
|
||||
obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
|
||||
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
|
||||
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
|
||||
|
||||
|
|
|
@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
|
|||
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
|
||||
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
|
||||
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
|
||||
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
|
||||
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
|
||||
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
|
||||
|
|
|
@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
|
|||
_clkdm_del_autodeps(clkdm);
|
||||
}
|
||||
|
||||
static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
|
||||
{
|
||||
bool hwsup = false;
|
||||
|
||||
if (!clkdm->clktrctrl_mask)
|
||||
return 0;
|
||||
|
||||
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
clkdm->clktrctrl_mask);
|
||||
|
||||
if (hwsup) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_disable_hwsup(clkdm);
|
||||
_clkdm_add_autodeps(clkdm);
|
||||
_enable_hwsup(clkdm);
|
||||
} else {
|
||||
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
|
||||
omap3_clkdm_wakeup(clkdm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
|
||||
{
|
||||
bool hwsup = false;
|
||||
|
||||
if (!clkdm->clktrctrl_mask)
|
||||
return 0;
|
||||
|
||||
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
clkdm->clktrctrl_mask);
|
||||
|
||||
if (hwsup) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_disable_hwsup(clkdm);
|
||||
_clkdm_del_autodeps(clkdm);
|
||||
_enable_hwsup(clkdm);
|
||||
} else {
|
||||
if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
|
||||
omap3_clkdm_sleep(clkdm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct clkdm_ops omap2_clkdm_operations = {
|
||||
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
|
||||
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
|
||||
|
@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
|
|||
.clkdm_wakeup = omap3_clkdm_wakeup,
|
||||
.clkdm_allow_idle = omap3_clkdm_allow_idle,
|
||||
.clkdm_deny_idle = omap3_clkdm_deny_idle,
|
||||
.clkdm_clk_enable = omap2_clkdm_clk_enable,
|
||||
.clkdm_clk_disable = omap2_clkdm_clk_disable,
|
||||
.clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
|
||||
.clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
|
||||
};
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
|
||||
|
||||
/* CM_IDLEST_IVA2 */
|
||||
#define OMAP3430_ST_IVA2_SHIFT 0
|
||||
#define OMAP3430_ST_IVA2_MASK (1 << 0)
|
||||
|
||||
/* CM_IDLEST_PLL_IVA2 */
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
static void __iomem *wakeupgen_base;
|
||||
static void __iomem *sar_base;
|
||||
static DEFINE_SPINLOCK(wakeupgen_lock);
|
||||
static unsigned int irq_target_cpu[NR_IRQS];
|
||||
static unsigned int irq_target_cpu[MAX_IRQS];
|
||||
static unsigned int irq_banks = MAX_NR_REG_BANKS;
|
||||
static unsigned int max_irqs = MAX_IRQS;
|
||||
static unsigned int omap_secure_apis;
|
||||
|
|
|
@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
|
|||
_enable_sysc(oh);
|
||||
}
|
||||
} else {
|
||||
_omap4_disable_module(oh);
|
||||
_disable_clocks(oh);
|
||||
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
|
||||
oh->name, r);
|
||||
|
|
|
@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
|
|||
|
||||
/* IVA2 (IVA2) */
|
||||
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
|
||||
{ .name = "logic", .rst_shift = 0 },
|
||||
{ .name = "seq0", .rst_shift = 1 },
|
||||
{ .name = "seq1", .rst_shift = 2 },
|
||||
{ .name = "logic", .rst_shift = 0, .st_shift = 8 },
|
||||
{ .name = "seq0", .rst_shift = 1, .st_shift = 9 },
|
||||
{ .name = "seq1", .rst_shift = 2, .st_shift = 10 },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap3xxx_iva_hwmod = {
|
||||
|
@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
|
|||
.rst_lines = omap3xxx_iva_resets,
|
||||
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
|
||||
.main_clk = "iva2_ck",
|
||||
.prcm = {
|
||||
.omap2 = {
|
||||
.module_offs = OMAP3430_IVA2_MOD,
|
||||
.prcm_reg_id = 1,
|
||||
.module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
|
||||
.idlest_reg_id = 1,
|
||||
.idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
/* timer class */
|
||||
|
|
|
@ -4209,7 +4209,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
|
|||
};
|
||||
|
||||
/* dsp -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
|
||||
.master = &omap44xx_dsp_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "dpll_iva_m5x2_ck",
|
||||
|
@ -4827,7 +4827,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
|
|||
};
|
||||
|
||||
/* iva -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
|
||||
.master = &omap44xx_iva_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "dpll_iva_m5x2_ck",
|
||||
|
@ -5361,7 +5361,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
|
|||
};
|
||||
|
||||
/* l3_main_2 -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
|
||||
.master = &omap44xx_l3_main_2_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "l3_div_ck",
|
||||
|
@ -6031,7 +6031,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_l4_abe__dmic,
|
||||
&omap44xx_l4_abe__dmic_dma,
|
||||
&omap44xx_dsp__iva,
|
||||
&omap44xx_dsp__sl2if,
|
||||
/* &omap44xx_dsp__sl2if, */
|
||||
&omap44xx_l4_cfg__dsp,
|
||||
&omap44xx_l3_main_2__dss,
|
||||
&omap44xx_l4_per__dss,
|
||||
|
@ -6067,7 +6067,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_l4_per__i2c4,
|
||||
&omap44xx_l3_main_2__ipu,
|
||||
&omap44xx_l3_main_2__iss,
|
||||
&omap44xx_iva__sl2if,
|
||||
/* &omap44xx_iva__sl2if, */
|
||||
&omap44xx_l3_main_2__iva,
|
||||
&omap44xx_l4_wkup__kbd,
|
||||
&omap44xx_l4_cfg__mailbox,
|
||||
|
@ -6098,7 +6098,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_l4_cfg__cm_core,
|
||||
&omap44xx_l4_wkup__prm,
|
||||
&omap44xx_l4_wkup__scrm,
|
||||
&omap44xx_l3_main_2__sl2if,
|
||||
/* &omap44xx_l3_main_2__sl2if, */
|
||||
&omap44xx_l4_abe__slimbus1,
|
||||
&omap44xx_l4_abe__slimbus1_dma,
|
||||
&omap44xx_l4_per__slimbus2,
|
||||
|
|
|
@ -262,6 +262,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OMAP_32K_TIMER
|
||||
/* Setup free-running counter for clocksource */
|
||||
static int __init omap2_sync32k_clocksource_init(void)
|
||||
{
|
||||
|
@ -301,6 +302,12 @@ static int __init omap2_sync32k_clocksource_init(void)
|
|||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline int omap2_sync32k_clocksource_init(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
|
||||
const char *fck_source)
|
||||
|
|
|
@ -110,13 +110,6 @@ config TEGRA_DEBUG_UART_AUTO_SCRATCH
|
|||
|
||||
endchoice
|
||||
|
||||
config TEGRA_SYSTEM_DMA
|
||||
bool "Enable system DMA driver for NVIDIA Tegra SoCs"
|
||||
default y
|
||||
help
|
||||
Adds system DMA functionality for NVIDIA Tegra SoCs, used by
|
||||
several Tegra device drivers
|
||||
|
||||
config TEGRA_EMC_SCALING_ENABLE
|
||||
bool "Enable scaling the memory frequency"
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o
|
|||
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
|
||||
obj-$(CONFIG_SMP) += reset.o
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
|
||||
obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
|
||||
obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
|
||||
obj-$(CONFIG_TEGRA_PCI) += pcie.o
|
||||
obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#include "apbio.h"
|
||||
|
||||
#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
|
||||
#if defined(CONFIG_TEGRA20_APB_DMA)
|
||||
static DEFINE_MUTEX(tegra_apb_dma_lock);
|
||||
static u32 *tegra_apb_bb;
|
||||
static dma_addr_t tegra_apb_bb_phys;
|
||||
|
@ -37,121 +37,6 @@ static DECLARE_COMPLETION(tegra_apb_wait);
|
|||
static u32 tegra_apb_readl_direct(unsigned long offset);
|
||||
static void tegra_apb_writel_direct(u32 value, unsigned long offset);
|
||||
|
||||
#if defined(CONFIG_TEGRA_SYSTEM_DMA)
|
||||
static struct tegra_dma_channel *tegra_apb_dma;
|
||||
|
||||
bool tegra_apb_init(void)
|
||||
{
|
||||
struct tegra_dma_channel *ch;
|
||||
|
||||
mutex_lock(&tegra_apb_dma_lock);
|
||||
|
||||
/* Check to see if we raced to setup */
|
||||
if (tegra_apb_dma)
|
||||
goto out;
|
||||
|
||||
ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
|
||||
TEGRA_DMA_SHARED);
|
||||
|
||||
if (!ch)
|
||||
goto out_fail;
|
||||
|
||||
tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
|
||||
&tegra_apb_bb_phys, GFP_KERNEL);
|
||||
if (!tegra_apb_bb) {
|
||||
pr_err("%s: can not allocate bounce buffer\n", __func__);
|
||||
tegra_dma_free_channel(ch);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
tegra_apb_dma = ch;
|
||||
out:
|
||||
mutex_unlock(&tegra_apb_dma_lock);
|
||||
return true;
|
||||
|
||||
out_fail:
|
||||
mutex_unlock(&tegra_apb_dma_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void apb_dma_complete(struct tegra_dma_req *req)
|
||||
{
|
||||
complete(&tegra_apb_wait);
|
||||
}
|
||||
|
||||
static u32 tegra_apb_readl_using_dma(unsigned long offset)
|
||||
{
|
||||
struct tegra_dma_req req;
|
||||
int ret;
|
||||
|
||||
if (!tegra_apb_dma && !tegra_apb_init())
|
||||
return tegra_apb_readl_direct(offset);
|
||||
|
||||
mutex_lock(&tegra_apb_dma_lock);
|
||||
req.complete = apb_dma_complete;
|
||||
req.to_memory = 1;
|
||||
req.dest_addr = tegra_apb_bb_phys;
|
||||
req.dest_bus_width = 32;
|
||||
req.dest_wrap = 1;
|
||||
req.source_addr = offset;
|
||||
req.source_bus_width = 32;
|
||||
req.source_wrap = 4;
|
||||
req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
|
||||
req.size = 4;
|
||||
|
||||
INIT_COMPLETION(tegra_apb_wait);
|
||||
|
||||
tegra_dma_enqueue_req(tegra_apb_dma, &req);
|
||||
|
||||
ret = wait_for_completion_timeout(&tegra_apb_wait,
|
||||
msecs_to_jiffies(50));
|
||||
|
||||
if (WARN(ret == 0, "apb read dma timed out")) {
|
||||
tegra_dma_dequeue_req(tegra_apb_dma, &req);
|
||||
*(u32 *)tegra_apb_bb = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&tegra_apb_dma_lock);
|
||||
return *((u32 *)tegra_apb_bb);
|
||||
}
|
||||
|
||||
static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
|
||||
{
|
||||
struct tegra_dma_req req;
|
||||
int ret;
|
||||
|
||||
if (!tegra_apb_dma && !tegra_apb_init()) {
|
||||
tegra_apb_writel_direct(value, offset);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&tegra_apb_dma_lock);
|
||||
*((u32 *)tegra_apb_bb) = value;
|
||||
req.complete = apb_dma_complete;
|
||||
req.to_memory = 0;
|
||||
req.dest_addr = offset;
|
||||
req.dest_wrap = 4;
|
||||
req.dest_bus_width = 32;
|
||||
req.source_addr = tegra_apb_bb_phys;
|
||||
req.source_bus_width = 32;
|
||||
req.source_wrap = 1;
|
||||
req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
|
||||
req.size = 4;
|
||||
|
||||
INIT_COMPLETION(tegra_apb_wait);
|
||||
|
||||
tegra_dma_enqueue_req(tegra_apb_dma, &req);
|
||||
|
||||
ret = wait_for_completion_timeout(&tegra_apb_wait,
|
||||
msecs_to_jiffies(50));
|
||||
|
||||
if (WARN(ret == 0, "apb write dma timed out"))
|
||||
tegra_dma_dequeue_req(tegra_apb_dma, &req);
|
||||
|
||||
mutex_unlock(&tegra_apb_dma_lock);
|
||||
}
|
||||
|
||||
#else
|
||||
static struct dma_chan *tegra_apb_dma_chan;
|
||||
static struct dma_slave_config dma_sconfig;
|
||||
|
||||
|
@ -279,7 +164,6 @@ static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
|
|||
pr_err("error in writing offset 0x%08lx using dma\n", offset);
|
||||
mutex_unlock(&tegra_apb_dma_lock);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
#define tegra_apb_readl_using_dma tegra_apb_readl_direct
|
||||
#define tegra_apb_writel_using_dma tegra_apb_writel_direct
|
||||
|
|
|
@ -1,823 +0,0 @@
|
|||
/*
|
||||
* arch/arm/mach-tegra/dma.c
|
||||
*
|
||||
* System DMA driver for NVIDIA Tegra SoCs
|
||||
*
|
||||
* Copyright (c) 2008-2009, NVIDIA Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/clk.h>
|
||||
#include <mach/dma.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <mach/iomap.h>
|
||||
#include <mach/suspend.h>
|
||||
|
||||
#include "apbio.h"
|
||||
|
||||
#define APB_DMA_GEN 0x000
|
||||
#define GEN_ENABLE (1<<31)
|
||||
|
||||
#define APB_DMA_CNTRL 0x010
|
||||
|
||||
#define APB_DMA_IRQ_MASK 0x01c
|
||||
|
||||
#define APB_DMA_IRQ_MASK_SET 0x020
|
||||
|
||||
#define APB_DMA_CHAN_CSR 0x000
|
||||
#define CSR_ENB (1<<31)
|
||||
#define CSR_IE_EOC (1<<30)
|
||||
#define CSR_HOLD (1<<29)
|
||||
#define CSR_DIR (1<<28)
|
||||
#define CSR_ONCE (1<<27)
|
||||
#define CSR_FLOW (1<<21)
|
||||
#define CSR_REQ_SEL_SHIFT 16
|
||||
#define CSR_WCOUNT_SHIFT 2
|
||||
#define CSR_WCOUNT_MASK 0xFFFC
|
||||
|
||||
#define APB_DMA_CHAN_STA 0x004
|
||||
#define STA_BUSY (1<<31)
|
||||
#define STA_ISE_EOC (1<<30)
|
||||
#define STA_HALT (1<<29)
|
||||
#define STA_PING_PONG (1<<28)
|
||||
#define STA_COUNT_SHIFT 2
|
||||
#define STA_COUNT_MASK 0xFFFC
|
||||
|
||||
#define APB_DMA_CHAN_AHB_PTR 0x010
|
||||
|
||||
#define APB_DMA_CHAN_AHB_SEQ 0x014
|
||||
#define AHB_SEQ_INTR_ENB (1<<31)
|
||||
#define AHB_SEQ_BUS_WIDTH_SHIFT 28
|
||||
#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define AHB_SEQ_DATA_SWAP (1<<27)
|
||||
#define AHB_SEQ_BURST_MASK (0x7<<24)
|
||||
#define AHB_SEQ_BURST_1 (4<<24)
|
||||
#define AHB_SEQ_BURST_4 (5<<24)
|
||||
#define AHB_SEQ_BURST_8 (6<<24)
|
||||
#define AHB_SEQ_DBL_BUF (1<<19)
|
||||
#define AHB_SEQ_WRAP_SHIFT 16
|
||||
#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
|
||||
|
||||
#define APB_DMA_CHAN_APB_PTR 0x018
|
||||
|
||||
#define APB_DMA_CHAN_APB_SEQ 0x01c
|
||||
#define APB_SEQ_BUS_WIDTH_SHIFT 28
|
||||
#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
|
||||
#define APB_SEQ_DATA_SWAP (1<<27)
|
||||
#define APB_SEQ_WRAP_SHIFT 16
|
||||
#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
|
||||
|
||||
#define TEGRA_SYSTEM_DMA_CH_NR 16
|
||||
#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
|
||||
#define TEGRA_SYSTEM_DMA_CH_MIN 0
|
||||
#define TEGRA_SYSTEM_DMA_CH_MAX \
|
||||
(TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
|
||||
|
||||
#define NV_DMA_MAX_TRASFER_SIZE 0x10000
|
||||
|
||||
static const unsigned int ahb_addr_wrap_table[8] = {
|
||||
0, 32, 64, 128, 256, 512, 1024, 2048
|
||||
};
|
||||
|
||||
static const unsigned int apb_addr_wrap_table[8] = {
|
||||
0, 1, 2, 4, 8, 16, 32, 64
|
||||
};
|
||||
|
||||
static const unsigned int bus_width_table[5] = {
|
||||
8, 16, 32, 64, 128
|
||||
};
|
||||
|
||||
#define TEGRA_DMA_NAME_SIZE 16
|
||||
struct tegra_dma_channel {
|
||||
struct list_head list;
|
||||
int id;
|
||||
spinlock_t lock;
|
||||
char name[TEGRA_DMA_NAME_SIZE];
|
||||
void __iomem *addr;
|
||||
int mode;
|
||||
int irq;
|
||||
int req_transfer_count;
|
||||
};
|
||||
|
||||
#define NV_DMA_MAX_CHANNELS 32
|
||||
|
||||
static bool tegra_dma_initialized;
|
||||
static DEFINE_MUTEX(tegra_dma_lock);
|
||||
static DEFINE_SPINLOCK(enable_lock);
|
||||
|
||||
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
|
||||
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
|
||||
|
||||
static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req);
|
||||
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req);
|
||||
static void tegra_dma_stop(struct tegra_dma_channel *ch);
|
||||
|
||||
void tegra_dma_flush(struct tegra_dma_channel *ch)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_flush);
|
||||
|
||||
void tegra_dma_dequeue(struct tegra_dma_channel *ch)
|
||||
{
|
||||
struct tegra_dma_req *req;
|
||||
|
||||
if (tegra_dma_is_empty(ch))
|
||||
return;
|
||||
|
||||
req = list_entry(ch->list.next, typeof(*req), node);
|
||||
|
||||
tegra_dma_dequeue_req(ch, req);
|
||||
return;
|
||||
}
|
||||
|
||||
static void tegra_dma_stop(struct tegra_dma_channel *ch)
|
||||
{
|
||||
u32 csr;
|
||||
u32 status;
|
||||
|
||||
csr = readl(ch->addr + APB_DMA_CHAN_CSR);
|
||||
csr &= ~CSR_IE_EOC;
|
||||
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
|
||||
|
||||
csr &= ~CSR_ENB;
|
||||
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
|
||||
|
||||
status = readl(ch->addr + APB_DMA_CHAN_STA);
|
||||
if (status & STA_ISE_EOC)
|
||||
writel(status, ch->addr + APB_DMA_CHAN_STA);
|
||||
}
|
||||
|
||||
static int tegra_dma_cancel(struct tegra_dma_channel *ch)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
while (!list_empty(&ch->list))
|
||||
list_del(ch->list.next);
|
||||
|
||||
tegra_dma_stop(ch);
|
||||
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int get_channel_status(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req, bool is_stop_dma)
|
||||
{
|
||||
void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
|
||||
unsigned int status;
|
||||
|
||||
if (is_stop_dma) {
|
||||
/*
|
||||
* STOP the DMA and get the transfer count.
|
||||
* Getting the transfer count is tricky.
|
||||
* - Globally disable DMA on all channels
|
||||
* - Read the channel's status register to know the number
|
||||
* of pending bytes to be transfered.
|
||||
* - Stop the dma channel
|
||||
* - Globally re-enable DMA to resume other transfers
|
||||
*/
|
||||
spin_lock(&enable_lock);
|
||||
writel(0, addr + APB_DMA_GEN);
|
||||
udelay(20);
|
||||
status = readl(ch->addr + APB_DMA_CHAN_STA);
|
||||
tegra_dma_stop(ch);
|
||||
writel(GEN_ENABLE, addr + APB_DMA_GEN);
|
||||
spin_unlock(&enable_lock);
|
||||
if (status & STA_ISE_EOC) {
|
||||
pr_err("Got Dma Int here clearing");
|
||||
writel(status, ch->addr + APB_DMA_CHAN_STA);
|
||||
}
|
||||
req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
|
||||
} else {
|
||||
status = readl(ch->addr + APB_DMA_CHAN_STA);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/* should be called with the channel lock held */
|
||||
static unsigned int dma_active_count(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req, unsigned int status)
|
||||
{
|
||||
unsigned int to_transfer;
|
||||
unsigned int req_transfer_count;
|
||||
unsigned int bytes_transferred;
|
||||
|
||||
to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
|
||||
req_transfer_count = ch->req_transfer_count + 1;
|
||||
bytes_transferred = req_transfer_count;
|
||||
if (status & STA_BUSY)
|
||||
bytes_transferred -= to_transfer;
|
||||
/*
|
||||
* In continuous transfer mode, DMA only tracks the count of the
|
||||
* half DMA buffer. So, if the DMA already finished half the DMA
|
||||
* then add the half buffer to the completed count.
|
||||
*/
|
||||
if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
|
||||
if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
|
||||
bytes_transferred += req_transfer_count;
|
||||
if (status & STA_ISE_EOC)
|
||||
bytes_transferred += req_transfer_count;
|
||||
}
|
||||
bytes_transferred *= 4;
|
||||
return bytes_transferred;
|
||||
}
|
||||
|
||||
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *_req)
|
||||
{
|
||||
unsigned int status;
|
||||
struct tegra_dma_req *req = NULL;
|
||||
int found = 0;
|
||||
unsigned long irq_flags;
|
||||
int stop = 0;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
|
||||
if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
|
||||
stop = 1;
|
||||
|
||||
list_for_each_entry(req, &ch->list, node) {
|
||||
if (req == _req) {
|
||||
list_del(&req->node);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!stop)
|
||||
goto skip_stop_dma;
|
||||
|
||||
status = get_channel_status(ch, req, true);
|
||||
req->bytes_transferred = dma_active_count(ch, req, status);
|
||||
|
||||
if (!list_empty(&ch->list)) {
|
||||
/* if the list is not empty, queue the next request */
|
||||
struct tegra_dma_req *next_req;
|
||||
next_req = list_entry(ch->list.next,
|
||||
typeof(*next_req), node);
|
||||
tegra_dma_update_hw(ch, next_req);
|
||||
}
|
||||
|
||||
skip_stop_dma:
|
||||
req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
|
||||
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
|
||||
/* Callback should be called without any lock */
|
||||
req->complete(req);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_dequeue_req);
|
||||
|
||||
bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
bool is_empty;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
if (list_empty(&ch->list))
|
||||
is_empty = true;
|
||||
else
|
||||
is_empty = false;
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return is_empty;
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_is_empty);
|
||||
|
||||
bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *_req)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct tegra_dma_req *req;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
list_for_each_entry(req, &ch->list, node) {
|
||||
if (req == _req) {
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_is_req_inflight);
|
||||
|
||||
int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct tegra_dma_req *_req;
|
||||
int start_dma = 0;
|
||||
|
||||
if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
|
||||
req->source_addr & 0x3 || req->dest_addr & 0x3) {
|
||||
pr_err("Invalid DMA request for channel %d\n", ch->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
|
||||
list_for_each_entry(_req, &ch->list, node) {
|
||||
if (req == _req) {
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
|
||||
req->bytes_transferred = 0;
|
||||
req->status = 0;
|
||||
req->buffer_status = 0;
|
||||
if (list_empty(&ch->list))
|
||||
start_dma = 1;
|
||||
|
||||
list_add_tail(&req->node, &ch->list);
|
||||
|
||||
if (start_dma)
|
||||
tegra_dma_update_hw(ch, req);
|
||||
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_enqueue_req);
|
||||
|
||||
struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
|
||||
{
|
||||
int channel;
|
||||
struct tegra_dma_channel *ch = NULL;
|
||||
|
||||
if (!tegra_dma_initialized)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&tegra_dma_lock);
|
||||
|
||||
/* first channel is the shared channel */
|
||||
if (mode & TEGRA_DMA_SHARED) {
|
||||
channel = TEGRA_SYSTEM_DMA_CH_MIN;
|
||||
} else {
|
||||
channel = find_first_zero_bit(channel_usage,
|
||||
ARRAY_SIZE(dma_channels));
|
||||
if (channel >= ARRAY_SIZE(dma_channels))
|
||||
goto out;
|
||||
}
|
||||
__set_bit(channel, channel_usage);
|
||||
ch = &dma_channels[channel];
|
||||
ch->mode = mode;
|
||||
|
||||
out:
|
||||
mutex_unlock(&tegra_dma_lock);
|
||||
return ch;
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_allocate_channel);
|
||||
|
||||
void tegra_dma_free_channel(struct tegra_dma_channel *ch)
|
||||
{
|
||||
if (ch->mode & TEGRA_DMA_SHARED)
|
||||
return;
|
||||
tegra_dma_cancel(ch);
|
||||
mutex_lock(&tegra_dma_lock);
|
||||
__clear_bit(ch->id, channel_usage);
|
||||
mutex_unlock(&tegra_dma_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(tegra_dma_free_channel);
|
||||
|
||||
static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req)
|
||||
{
|
||||
u32 apb_ptr;
|
||||
u32 ahb_ptr;
|
||||
|
||||
if (req->to_memory) {
|
||||
apb_ptr = req->source_addr;
|
||||
ahb_ptr = req->dest_addr;
|
||||
} else {
|
||||
apb_ptr = req->dest_addr;
|
||||
ahb_ptr = req->source_addr;
|
||||
}
|
||||
writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
|
||||
writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
|
||||
|
||||
req->status = TEGRA_DMA_REQ_INFLIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req)
|
||||
{
|
||||
int ahb_addr_wrap;
|
||||
int apb_addr_wrap;
|
||||
int ahb_bus_width;
|
||||
int apb_bus_width;
|
||||
int index;
|
||||
|
||||
u32 ahb_seq;
|
||||
u32 apb_seq;
|
||||
u32 ahb_ptr;
|
||||
u32 apb_ptr;
|
||||
u32 csr;
|
||||
|
||||
csr = CSR_IE_EOC | CSR_FLOW;
|
||||
ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
|
||||
apb_seq = 0;
|
||||
|
||||
csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
|
||||
|
||||
/* One shot mode is always single buffered,
|
||||
* continuous mode is always double buffered
|
||||
* */
|
||||
if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
|
||||
csr |= CSR_ONCE;
|
||||
ch->req_transfer_count = (req->size >> 2) - 1;
|
||||
} else {
|
||||
ahb_seq |= AHB_SEQ_DBL_BUF;
|
||||
|
||||
/* In double buffered mode, we set the size to half the
|
||||
* requested size and interrupt when half the buffer
|
||||
* is full */
|
||||
ch->req_transfer_count = (req->size >> 3) - 1;
|
||||
}
|
||||
|
||||
csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
|
||||
|
||||
if (req->to_memory) {
|
||||
apb_ptr = req->source_addr;
|
||||
ahb_ptr = req->dest_addr;
|
||||
|
||||
apb_addr_wrap = req->source_wrap;
|
||||
ahb_addr_wrap = req->dest_wrap;
|
||||
apb_bus_width = req->source_bus_width;
|
||||
ahb_bus_width = req->dest_bus_width;
|
||||
|
||||
} else {
|
||||
csr |= CSR_DIR;
|
||||
apb_ptr = req->dest_addr;
|
||||
ahb_ptr = req->source_addr;
|
||||
|
||||
apb_addr_wrap = req->dest_wrap;
|
||||
ahb_addr_wrap = req->source_wrap;
|
||||
apb_bus_width = req->dest_bus_width;
|
||||
ahb_bus_width = req->source_bus_width;
|
||||
}
|
||||
|
||||
apb_addr_wrap >>= 2;
|
||||
ahb_addr_wrap >>= 2;
|
||||
|
||||
/* set address wrap for APB size */
|
||||
index = 0;
|
||||
do {
|
||||
if (apb_addr_wrap_table[index] == apb_addr_wrap)
|
||||
break;
|
||||
index++;
|
||||
} while (index < ARRAY_SIZE(apb_addr_wrap_table));
|
||||
BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
|
||||
apb_seq |= index << APB_SEQ_WRAP_SHIFT;
|
||||
|
||||
/* set address wrap for AHB size */
|
||||
index = 0;
|
||||
do {
|
||||
if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
|
||||
break;
|
||||
index++;
|
||||
} while (index < ARRAY_SIZE(ahb_addr_wrap_table));
|
||||
BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
|
||||
ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
|
||||
|
||||
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
|
||||
if (bus_width_table[index] == ahb_bus_width)
|
||||
break;
|
||||
}
|
||||
BUG_ON(index == ARRAY_SIZE(bus_width_table));
|
||||
ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
|
||||
|
||||
for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
|
||||
if (bus_width_table[index] == apb_bus_width)
|
||||
break;
|
||||
}
|
||||
BUG_ON(index == ARRAY_SIZE(bus_width_table));
|
||||
apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
|
||||
|
||||
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
|
||||
writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
|
||||
writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
|
||||
writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
|
||||
writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
|
||||
|
||||
csr |= CSR_ENB;
|
||||
writel(csr, ch->addr + APB_DMA_CHAN_CSR);
|
||||
|
||||
req->status = TEGRA_DMA_REQ_INFLIGHT;
|
||||
}
|
||||
|
||||
static void handle_oneshot_dma(struct tegra_dma_channel *ch)
|
||||
{
|
||||
struct tegra_dma_req *req;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
if (list_empty(&ch->list)) {
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
req = list_entry(ch->list.next, typeof(*req), node);
|
||||
if (req) {
|
||||
int bytes_transferred;
|
||||
|
||||
bytes_transferred = ch->req_transfer_count;
|
||||
bytes_transferred += 1;
|
||||
bytes_transferred <<= 2;
|
||||
|
||||
list_del(&req->node);
|
||||
req->bytes_transferred = bytes_transferred;
|
||||
req->status = TEGRA_DMA_REQ_SUCCESS;
|
||||
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
/* Callback should be called without any lock */
|
||||
pr_debug("%s: transferred %d bytes\n", __func__,
|
||||
req->bytes_transferred);
|
||||
req->complete(req);
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
}
|
||||
|
||||
if (!list_empty(&ch->list)) {
|
||||
req = list_entry(ch->list.next, typeof(*req), node);
|
||||
/* the complete function we just called may have enqueued
|
||||
another req, in which case dma has already started */
|
||||
if (req->status != TEGRA_DMA_REQ_INFLIGHT)
|
||||
tegra_dma_update_hw(ch, req);
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
}
|
||||
|
||||
static void handle_continuous_dma(struct tegra_dma_channel *ch)
|
||||
{
|
||||
struct tegra_dma_req *req;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&ch->lock, irq_flags);
|
||||
if (list_empty(&ch->list)) {
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
req = list_entry(ch->list.next, typeof(*req), node);
|
||||
if (req) {
|
||||
if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
|
||||
bool is_dma_ping_complete;
|
||||
is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
|
||||
& STA_PING_PONG) ? true : false;
|
||||
if (req->to_memory)
|
||||
is_dma_ping_complete = !is_dma_ping_complete;
|
||||
/* Out of sync - Release current buffer */
|
||||
if (!is_dma_ping_complete) {
|
||||
int bytes_transferred;
|
||||
|
||||
bytes_transferred = ch->req_transfer_count;
|
||||
bytes_transferred += 1;
|
||||
bytes_transferred <<= 3;
|
||||
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
|
||||
req->bytes_transferred = bytes_transferred;
|
||||
req->status = TEGRA_DMA_REQ_SUCCESS;
|
||||
tegra_dma_stop(ch);
|
||||
|
||||
if (!list_is_last(&req->node, &ch->list)) {
|
||||
struct tegra_dma_req *next_req;
|
||||
|
||||
next_req = list_entry(req->node.next,
|
||||
typeof(*next_req), node);
|
||||
tegra_dma_update_hw(ch, next_req);
|
||||
}
|
||||
|
||||
list_del(&req->node);
|
||||
|
||||
/* DMA lock is NOT held when callbak is called */
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
req->complete(req);
|
||||
return;
|
||||
}
|
||||
/* Load the next request into the hardware, if available
|
||||
* */
|
||||
if (!list_is_last(&req->node, &ch->list)) {
|
||||
struct tegra_dma_req *next_req;
|
||||
|
||||
next_req = list_entry(req->node.next,
|
||||
typeof(*next_req), node);
|
||||
tegra_dma_update_hw_partial(ch, next_req);
|
||||
}
|
||||
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
|
||||
req->status = TEGRA_DMA_REQ_SUCCESS;
|
||||
/* DMA lock is NOT held when callback is called */
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
if (likely(req->threshold))
|
||||
req->threshold(req);
|
||||
return;
|
||||
|
||||
} else if (req->buffer_status ==
|
||||
TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
|
||||
/* Callback when the buffer is completely full (i.e on
|
||||
* the second interrupt */
|
||||
int bytes_transferred;
|
||||
|
||||
bytes_transferred = ch->req_transfer_count;
|
||||
bytes_transferred += 1;
|
||||
bytes_transferred <<= 3;
|
||||
|
||||
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
|
||||
req->bytes_transferred = bytes_transferred;
|
||||
req->status = TEGRA_DMA_REQ_SUCCESS;
|
||||
list_del(&req->node);
|
||||
|
||||
/* DMA lock is NOT held when callbak is called */
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
req->complete(req);
|
||||
return;
|
||||
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
||||
}
|
||||
|
||||
static irqreturn_t dma_isr(int irq, void *data)
|
||||
{
|
||||
struct tegra_dma_channel *ch = data;
|
||||
unsigned long status;
|
||||
|
||||
status = readl(ch->addr + APB_DMA_CHAN_STA);
|
||||
if (status & STA_ISE_EOC)
|
||||
writel(status, ch->addr + APB_DMA_CHAN_STA);
|
||||
else {
|
||||
pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
static irqreturn_t dma_thread_fn(int irq, void *data)
|
||||
{
|
||||
struct tegra_dma_channel *ch = data;
|
||||
|
||||
if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
|
||||
handle_oneshot_dma(ch);
|
||||
else
|
||||
handle_continuous_dma(ch);
|
||||
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int __init tegra_dma_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
unsigned int irq;
|
||||
void __iomem *addr;
|
||||
struct clk *c;
|
||||
|
||||
bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
|
||||
|
||||
c = clk_get_sys("tegra-apbdma", NULL);
|
||||
if (IS_ERR(c)) {
|
||||
pr_err("Unable to get clock for APB DMA\n");
|
||||
ret = PTR_ERR(c);
|
||||
goto fail;
|
||||
}
|
||||
ret = clk_prepare_enable(c);
|
||||
if (ret != 0) {
|
||||
pr_err("Unable to enable clock for APB DMA\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
|
||||
writel(GEN_ENABLE, addr + APB_DMA_GEN);
|
||||
writel(0, addr + APB_DMA_CNTRL);
|
||||
writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
|
||||
addr + APB_DMA_IRQ_MASK_SET);
|
||||
|
||||
for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
|
||||
struct tegra_dma_channel *ch = &dma_channels[i];
|
||||
|
||||
ch->id = i;
|
||||
snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
|
||||
|
||||
ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
|
||||
TEGRA_APB_DMA_CH0_SIZE * i);
|
||||
|
||||
spin_lock_init(&ch->lock);
|
||||
INIT_LIST_HEAD(&ch->list);
|
||||
|
||||
irq = INT_APB_DMA_CH0 + i;
|
||||
ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
|
||||
dma_channels[i].name, ch);
|
||||
if (ret) {
|
||||
pr_err("Failed to register IRQ %d for DMA %d\n",
|
||||
irq, i);
|
||||
goto fail;
|
||||
}
|
||||
ch->irq = irq;
|
||||
|
||||
__clear_bit(i, channel_usage);
|
||||
}
|
||||
/* mark the shared channel allocated */
|
||||
__set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
|
||||
|
||||
tegra_dma_initialized = true;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
writel(0, addr + APB_DMA_GEN);
|
||||
for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
|
||||
struct tegra_dma_channel *ch = &dma_channels[i];
|
||||
if (ch->irq)
|
||||
free_irq(ch->irq, ch);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
postcore_initcall(tegra_dma_init);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
|
||||
|
||||
void tegra_dma_suspend(void)
|
||||
{
|
||||
void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
|
||||
u32 *ctx = apb_dma;
|
||||
int i;
|
||||
|
||||
*ctx++ = readl(addr + APB_DMA_GEN);
|
||||
*ctx++ = readl(addr + APB_DMA_CNTRL);
|
||||
*ctx++ = readl(addr + APB_DMA_IRQ_MASK);
|
||||
|
||||
for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
|
||||
addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
|
||||
TEGRA_APB_DMA_CH0_SIZE * i);
|
||||
|
||||
*ctx++ = readl(addr + APB_DMA_CHAN_CSR);
|
||||
*ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
|
||||
*ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
|
||||
*ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
|
||||
*ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
|
||||
}
|
||||
}
|
||||
|
||||
void tegra_dma_resume(void)
|
||||
{
|
||||
void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
|
||||
u32 *ctx = apb_dma;
|
||||
int i;
|
||||
|
||||
writel(*ctx++, addr + APB_DMA_GEN);
|
||||
writel(*ctx++, addr + APB_DMA_CNTRL);
|
||||
writel(*ctx++, addr + APB_DMA_IRQ_MASK);
|
||||
|
||||
for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
|
||||
addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
|
||||
TEGRA_APB_DMA_CH0_SIZE * i);
|
||||
|
||||
writel(*ctx++, addr + APB_DMA_CHAN_CSR);
|
||||
writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
|
||||
writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
|
||||
writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
|
||||
writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -51,101 +51,4 @@
|
|||
#define TEGRA_DMA_REQ_SEL_OWR 25
|
||||
#define TEGRA_DMA_REQ_SEL_INVALID 31
|
||||
|
||||
struct tegra_dma_req;
|
||||
struct tegra_dma_channel;
|
||||
|
||||
enum tegra_dma_mode {
|
||||
TEGRA_DMA_SHARED = 1,
|
||||
TEGRA_DMA_MODE_CONTINOUS = 2,
|
||||
TEGRA_DMA_MODE_ONESHOT = 4,
|
||||
};
|
||||
|
||||
enum tegra_dma_req_error {
|
||||
TEGRA_DMA_REQ_SUCCESS = 0,
|
||||
TEGRA_DMA_REQ_ERROR_ABORTED,
|
||||
TEGRA_DMA_REQ_INFLIGHT,
|
||||
};
|
||||
|
||||
enum tegra_dma_req_buff_status {
|
||||
TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
|
||||
TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
|
||||
TEGRA_DMA_REQ_BUF_STATUS_FULL,
|
||||
};
|
||||
|
||||
struct tegra_dma_req {
|
||||
struct list_head node;
|
||||
unsigned int modid;
|
||||
int instance;
|
||||
|
||||
/* Called when the req is complete and from the DMA ISR context.
|
||||
* When this is called the req structure is no longer queued by
|
||||
* the DMA channel.
|
||||
*
|
||||
* State of the DMA depends on the number of req it has. If there are
|
||||
* no DMA requests queued up, then it will STOP the DMA. It there are
|
||||
* more requests in the DMA, then it will queue the next request.
|
||||
*/
|
||||
void (*complete)(struct tegra_dma_req *req);
|
||||
|
||||
/* This is a called from the DMA ISR context when the DMA is still in
|
||||
* progress and is actively filling same buffer.
|
||||
*
|
||||
* In case of continuous mode receive, this threshold is 1/2 the buffer
|
||||
* size. In other cases, this will not even be called as there is no
|
||||
* hardware support for it.
|
||||
*
|
||||
* In the case of continuous mode receive, if there is next req already
|
||||
* queued, DMA programs the HW to use that req when this req is
|
||||
* completed. If there is no "next req" queued, then DMA ISR doesn't do
|
||||
* anything before calling this callback.
|
||||
*
|
||||
* This is mainly used by the cases, where the clients has queued
|
||||
* only one req and want to get some sort of DMA threshold
|
||||
* callback to program the next buffer.
|
||||
*
|
||||
*/
|
||||
void (*threshold)(struct tegra_dma_req *req);
|
||||
|
||||
/* 1 to copy to memory.
|
||||
* 0 to copy from the memory to device FIFO */
|
||||
int to_memory;
|
||||
|
||||
void *virt_addr;
|
||||
|
||||
unsigned long source_addr;
|
||||
unsigned long dest_addr;
|
||||
unsigned long dest_wrap;
|
||||
unsigned long source_wrap;
|
||||
unsigned long source_bus_width;
|
||||
unsigned long dest_bus_width;
|
||||
unsigned long req_sel;
|
||||
unsigned int size;
|
||||
|
||||
/* Updated by the DMA driver on the conpletion of the request. */
|
||||
int bytes_transferred;
|
||||
int status;
|
||||
|
||||
/* DMA completion tracking information */
|
||||
int buffer_status;
|
||||
|
||||
/* Client specific data */
|
||||
void *dev;
|
||||
};
|
||||
|
||||
int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req);
|
||||
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req);
|
||||
void tegra_dma_dequeue(struct tegra_dma_channel *ch);
|
||||
void tegra_dma_flush(struct tegra_dma_channel *ch);
|
||||
|
||||
bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
|
||||
struct tegra_dma_req *req);
|
||||
bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
|
||||
|
||||
struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
|
||||
void tegra_dma_free_channel(struct tegra_dma_channel *ch);
|
||||
|
||||
int __init tegra_dma_init(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
|
|||
pid = task_pid_nr(thread->task) << ASID_BITS;
|
||||
asm volatile(
|
||||
" mrc p15, 0, %0, c13, c0, 1\n"
|
||||
" bfi %1, %0, #0, %2\n"
|
||||
" mcr p15, 0, %1, c13, c0, 1\n"
|
||||
" and %0, %0, %2\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c13, c0, 1\n"
|
||||
: "=r" (contextidr), "+r" (pid)
|
||||
: "I" (ASID_BITS));
|
||||
: "I" (~ASID_MASK));
|
||||
isb();
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size)
|
|||
void *pool_start = pool->vaddr;
|
||||
void *pool_end = pool->vaddr + pool->size;
|
||||
|
||||
if (start < pool_start || start > pool_end)
|
||||
if (start < pool_start || start >= pool_end)
|
||||
return false;
|
||||
|
||||
if (end <= pool_end)
|
||||
|
|
|
@ -990,8 +990,8 @@ void __init sanity_check_meminfo(void)
|
|||
* Check whether this memory bank would partially overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (__va(bank->start + bank->size) > vmalloc_min ||
|
||||
__va(bank->start + bank->size) < __va(bank->start)) {
|
||||
if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
|
||||
__va(bank->start + bank->size - 1) <= __va(bank->start)) {
|
||||
unsigned long newsize = vmalloc_min - __va(bank->start);
|
||||
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
|
||||
"to -%.8llx (vmalloc region overlap).\n",
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
|
||||
static unsigned long omap_sram_start;
|
||||
static void __iomem *omap_sram_base;
|
||||
static unsigned long omap_sram_skip;
|
||||
static unsigned long omap_sram_size;
|
||||
static void __iomem *omap_sram_ceil;
|
||||
|
||||
|
@ -105,6 +106,7 @@ static int is_sram_locked(void)
|
|||
*/
|
||||
static void __init omap_detect_sram(void)
|
||||
{
|
||||
omap_sram_skip = SRAM_BOOTLOADER_SZ;
|
||||
if (cpu_class_is_omap2()) {
|
||||
if (is_sram_locked()) {
|
||||
if (cpu_is_omap34xx()) {
|
||||
|
@ -112,6 +114,7 @@ static void __init omap_detect_sram(void)
|
|||
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
|
||||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
|
||||
omap_sram_size = 0x7000; /* 28K */
|
||||
omap_sram_skip += SZ_16K;
|
||||
} else {
|
||||
omap_sram_size = 0x8000; /* 32K */
|
||||
}
|
||||
|
@ -174,8 +177,10 @@ static void __init omap_map_sram(void)
|
|||
return;
|
||||
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
if (cpu_is_omap44xx()) {
|
||||
omap_sram_start += PAGE_SIZE;
|
||||
omap_sram_size -= SZ_16K;
|
||||
}
|
||||
#endif
|
||||
if (cpu_is_omap34xx()) {
|
||||
/*
|
||||
|
@ -202,8 +207,8 @@ static void __init omap_map_sram(void)
|
|||
* Looks like we need to preserve some bootloader code at the
|
||||
* beginning of SRAM for jumping to flash for reboot to work...
|
||||
*/
|
||||
memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
|
||||
omap_sram_size - SRAM_BOOTLOADER_SZ);
|
||||
memset_io(omap_sram_base + omap_sram_skip, 0,
|
||||
omap_sram_size - omap_sram_skip);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -217,7 +222,7 @@ void *omap_sram_push_address(unsigned long size)
|
|||
{
|
||||
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
|
||||
|
||||
available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
|
||||
available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
|
||||
|
||||
if (size > available) {
|
||||
pr_err("Not enough space in SRAM\n");
|
||||
|
|
|
@ -38,6 +38,7 @@ config BLACKFIN
|
|||
select GENERIC_ATOMIC64
|
||||
select GENERIC_IRQ_PROBE
|
||||
select IRQ_PER_CPU if SMP
|
||||
select USE_GENERIC_SMP_HELPERS if SMP
|
||||
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -20,7 +20,6 @@ endif
|
|||
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
LDFLAGS += -m elf32bfin
|
||||
KALLSYMS += --symbol-prefix=_
|
||||
|
||||
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#define raw_smp_processor_id() blackfin_core_id()
|
||||
|
||||
extern void bfin_relocate_coreb_l1_mem(void);
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
|
||||
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
|
||||
|
|
|
@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
|
|||
|
||||
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
|
||||
|
||||
#define BFIN_IPI_TIMER 0
|
||||
#define BFIN_IPI_RESCHEDULE 1
|
||||
#define BFIN_IPI_CALL_FUNC 2
|
||||
#define BFIN_IPI_CPU_STOP 3
|
||||
enum ipi_message_type {
|
||||
BFIN_IPI_TIMER,
|
||||
BFIN_IPI_RESCHEDULE,
|
||||
BFIN_IPI_CALL_FUNC,
|
||||
BFIN_IPI_CALL_FUNC_SINGLE,
|
||||
BFIN_IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
struct blackfin_flush_data {
|
||||
unsigned long start;
|
||||
|
@ -60,35 +63,20 @@ struct blackfin_flush_data {
|
|||
|
||||
void *secondary_stack;
|
||||
|
||||
|
||||
struct smp_call_struct {
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
cpumask_t *waitmask;
|
||||
};
|
||||
|
||||
static struct blackfin_flush_data smp_flush_data;
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
struct ipi_message {
|
||||
unsigned long type;
|
||||
struct smp_call_struct call_struct;
|
||||
};
|
||||
|
||||
/* A magic number - stress test shows this is safe for common cases */
|
||||
#define BFIN_IPI_MSGQ_LEN 5
|
||||
|
||||
/* Simple FIFO buffer, overflow leads to panic */
|
||||
struct ipi_message_queue {
|
||||
spinlock_t lock;
|
||||
struct ipi_data {
|
||||
unsigned long count;
|
||||
unsigned long head; /* head of the queue */
|
||||
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
|
||||
unsigned long bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
|
||||
static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
|
||||
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
|
@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
|
|||
blackfin_icache_flush_range(fdata->start, fdata->end);
|
||||
}
|
||||
|
||||
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
|
||||
{
|
||||
int wait;
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
func = msg->call_struct.func;
|
||||
info = msg->call_struct.info;
|
||||
wait = msg->call_struct.wait;
|
||||
func(info);
|
||||
if (wait) {
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* 'wait' usually means synchronization between CPUs.
|
||||
* Invalidate D cache in case shared data was changed
|
||||
* by func() to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
|
||||
}
|
||||
}
|
||||
|
||||
/* Use IRQ_SUPPLE_0 to request reschedule.
|
||||
* When returning from interrupt to user space,
|
||||
* there is chance to reschedule */
|
||||
|
@ -172,152 +138,95 @@ void ipi_timer(void)
|
|||
|
||||
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
{
|
||||
struct ipi_message *msg;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
unsigned long pending;
|
||||
unsigned long msg;
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
|
||||
|
||||
msg_queue = &__get_cpu_var(ipi_msg_queue);
|
||||
bfin_ipi_data = &__get_cpu_var(bfin_ipi);
|
||||
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
|
||||
msg = 0;
|
||||
do {
|
||||
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
|
||||
switch (msg) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
while (msg_queue->count) {
|
||||
msg = &msg_queue->ipi_message[msg_queue->head];
|
||||
switch (msg->type) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
ipi_call_function(cpu, msg);
|
||||
break;
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
|
||||
cpu, msg->type);
|
||||
break;
|
||||
}
|
||||
msg_queue->head++;
|
||||
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
|
||||
msg_queue->count--;
|
||||
case BFIN_IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
}
|
||||
} while (msg < BITS_PER_LONG);
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ipi_queue_init(void)
|
||||
static void bfin_ipi_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
for_each_possible_cpu(cpu) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_init(&msg_queue->lock);
|
||||
msg_queue->count = 0;
|
||||
msg_queue->head = 0;
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
bfin_ipi_data->bits = 0;
|
||||
bfin_ipi_data->count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
|
||||
void (*func) (void *info), void *info, int wait)
|
||||
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_message *msg;
|
||||
unsigned long flags, next_msg;
|
||||
cpumask_t waitmask; /* waitmask is shared by all cpus */
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned long flags;
|
||||
|
||||
cpumask_copy(&waitmask, &callmap);
|
||||
for_each_cpu(cpu, &callmap) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
|
||||
next_msg = (msg_queue->head + msg_queue->count)
|
||||
% BFIN_IPI_MSGQ_LEN;
|
||||
msg = &msg_queue->ipi_message[next_msg];
|
||||
msg->type = type;
|
||||
if (type == BFIN_IPI_CALL_FUNC) {
|
||||
msg->call_struct.func = func;
|
||||
msg->call_struct.info = info;
|
||||
msg->call_struct.wait = wait;
|
||||
msg->call_struct.waitmask = &waitmask;
|
||||
}
|
||||
msg_queue->count++;
|
||||
} else
|
||||
panic("IPI message queue overflow\n");
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
smp_mb();
|
||||
set_bit(msg, &bfin_ipi_data->bits);
|
||||
bfin_ipi_data->count++;
|
||||
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
while (!cpumask_empty(&waitmask))
|
||||
blackfin_dcache_invalidate_range(
|
||||
(unsigned long)(&waitmask),
|
||||
(unsigned long)(&waitmask));
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* Invalidate D cache in case shared data was changed by
|
||||
* other processors to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
|
||||
preempt_disable();
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function);
|
||||
|
||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
unsigned int cpu = cpuid;
|
||||
cpumask_t callmap;
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
return 0;
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
return 0;
|
||||
send_ipi(mask, BFIN_IPI_CALL_FUNC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_single);
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
/* simply trigger an ipi */
|
||||
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void smp_send_msg(const struct cpumask *mask, unsigned long type)
|
||||
{
|
||||
smp_send_message(*mask, type, NULL, NULL, 0);
|
||||
send_ipi(mask, type);
|
||||
}
|
||||
|
||||
void smp_timer_broadcast(const struct cpumask *mask)
|
||||
|
@ -333,7 +242,7 @@ void smp_send_stop(void)
|
|||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
|
||||
send_ipi(&callmap, BFIN_IPI_CPU_STOP);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
|
@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
|
|||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
platform_prepare_cpus(max_cpus);
|
||||
ipi_queue_init();
|
||||
bfin_ipi_init();
|
||||
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
|
||||
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
|
|||
if (*offset)
|
||||
return -EINVAL;
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val < oprofile_min_interval)
|
||||
oprofile_hw_interval = oprofile_min_interval;
|
||||
|
@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val != 0)
|
||||
return -EINVAL;
|
||||
|
@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
|
|
@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
case 28: /* Atom */
|
||||
case 54: /* Cedariew */
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
|
|
|
@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
|
|||
* to have an operational LBR which can freeze
|
||||
* on PMU interrupt
|
||||
*/
|
||||
if (boot_cpu_data.x86_mask < 10) {
|
||||
if (boot_cpu_data.x86_model == 28
|
||||
&& boot_cpu_data.x86_mask < 10) {
|
||||
pr_cont("LBR disabled due to erratum");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
|
|||
if (do_microcode_update(buf, len) == 0)
|
||||
ret = (ssize_t)len;
|
||||
|
||||
if (ret > 0)
|
||||
perf_check_microcode();
|
||||
|
||||
mutex_unlock(µcode_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
|
|
|
@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
|
|||
if (val & 0x10) {
|
||||
u8 edge_irr = s->irr & ~s->elcr;
|
||||
int i;
|
||||
bool found;
|
||||
bool found = false;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
s->init4 = val & 1;
|
||||
|
|
|
@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
|
|||
|
||||
static int alloc_apic_access_page(struct kvm *kvm)
|
||||
{
|
||||
struct page *page;
|
||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||
int r = 0;
|
||||
|
||||
|
@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
|
||||
page = gfn_to_page(kvm, 0xfee00);
|
||||
if (is_error_page(page)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.apic_access_page = page;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
|
@ -3641,6 +3648,7 @@ out:
|
|||
|
||||
static int alloc_identity_pagetable(struct kvm *kvm)
|
||||
{
|
||||
struct page *page;
|
||||
struct kvm_userspace_memory_region kvm_userspace_mem;
|
||||
int r = 0;
|
||||
|
||||
|
@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
|
||||
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
|
||||
page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
|
||||
if (is_error_page(page)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.ept_identity_pagetable = page;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
|
@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||
/* Exposing INVPCID only when PCID is exposed */
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
|
||||
if (vmx_invpcid_supported() &&
|
||||
best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
|
||||
best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
|
||||
guest_cpuid_has_pcid(vcpu)) {
|
||||
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
|
||||
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
|
||||
|
@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
|
||||
exec_control);
|
||||
if (best)
|
||||
best->ecx &= ~bit(X86_FEATURE_INVPCID);
|
||||
best->ebx &= ~bit(X86_FEATURE_INVPCID);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
|||
!kvm_event_needs_reinjection(vcpu);
|
||||
}
|
||||
|
||||
static void vapic_enter(struct kvm_vcpu *vcpu)
|
||||
static int vapic_enter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
struct page *page;
|
||||
|
||||
if (!apic || !apic->vapic_addr)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
||||
if (is_error_page(page))
|
||||
return -EFAULT;
|
||||
|
||||
vcpu->arch.apic->vapic_page = page;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vapic_exit(struct kvm_vcpu *vcpu)
|
||||
|
@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
vapic_enter(vcpu);
|
||||
r = vapic_enter(vcpu);
|
||||
if (r) {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = 1;
|
||||
while (r > 0) {
|
||||
|
|
|
@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
|
|||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (sg_is_last(assoc)) {
|
||||
if (req->assoclen && sg_is_last(assoc)) {
|
||||
authenc_ahash_fn = crypto_authenc_ahash;
|
||||
sg_init_table(asg, 2);
|
||||
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
|
@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
|
|||
cryptlen += ivsize;
|
||||
}
|
||||
|
||||
if (sg_is_last(assoc)) {
|
||||
if (req->assoclen && sg_is_last(assoc)) {
|
||||
authenc_ahash_fn = crypto_authenc_ahash;
|
||||
sg_init_table(asg, 2);
|
||||
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
|
||||
|
|
|
@ -237,6 +237,16 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
|
|||
} else if (result == ACPI_STATE_D3_HOT) {
|
||||
result = ACPI_STATE_D3;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we were unsure about the device parent's power state up to this
|
||||
* point, the fact that the device is in D0 implies that the parent has
|
||||
* to be in D0 too.
|
||||
*/
|
||||
if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
|
||||
&& result == ACPI_STATE_D0)
|
||||
device->parent->power.state = ACPI_STATE_D0;
|
||||
|
||||
*state = result;
|
||||
|
||||
out:
|
||||
|
|
|
@ -107,6 +107,7 @@ struct acpi_power_resource {
|
|||
|
||||
/* List of devices relying on this power resource */
|
||||
struct acpi_power_resource_device *devices;
|
||||
struct mutex devices_lock;
|
||||
};
|
||||
|
||||
static struct list_head acpi_power_resource_list;
|
||||
|
@ -225,7 +226,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device)
|
|||
|
||||
static int __acpi_power_on(struct acpi_power_resource *resource)
|
||||
{
|
||||
struct acpi_power_resource_device *device_list = resource->devices;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
|
||||
|
@ -238,19 +238,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
|
||||
resource->name));
|
||||
|
||||
while (device_list) {
|
||||
acpi_power_on_device(device_list->device);
|
||||
|
||||
device_list = device_list->next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acpi_power_on(acpi_handle handle)
|
||||
{
|
||||
int result = 0;
|
||||
bool resume_device = false;
|
||||
struct acpi_power_resource *resource = NULL;
|
||||
struct acpi_power_resource_device *device_list;
|
||||
|
||||
result = acpi_power_get_context(handle, &resource);
|
||||
if (result)
|
||||
|
@ -266,10 +262,25 @@ static int acpi_power_on(acpi_handle handle)
|
|||
result = __acpi_power_on(resource);
|
||||
if (result)
|
||||
resource->ref_count--;
|
||||
else
|
||||
resume_device = true;
|
||||
}
|
||||
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
|
||||
if (!resume_device)
|
||||
return result;
|
||||
|
||||
mutex_lock(&resource->devices_lock);
|
||||
|
||||
device_list = resource->devices;
|
||||
while (device_list) {
|
||||
acpi_power_on_device(device_list->device);
|
||||
device_list = device_list->next;
|
||||
}
|
||||
|
||||
mutex_unlock(&resource->devices_lock);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -355,7 +366,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
|
|||
if (acpi_power_get_context(res_handle, &resource))
|
||||
return;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
mutex_lock(&resource->devices_lock);
|
||||
prev = NULL;
|
||||
curr = resource->devices;
|
||||
while (curr) {
|
||||
|
@ -372,7 +383,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
|
|||
prev = curr;
|
||||
curr = curr->next;
|
||||
}
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
mutex_unlock(&resource->devices_lock);
|
||||
}
|
||||
|
||||
/* Unlink dev from all power resources in _PR0 */
|
||||
|
@ -414,10 +425,10 @@ static int __acpi_power_resource_register_device(
|
|||
|
||||
power_resource_device->device = powered_device;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
mutex_lock(&resource->devices_lock);
|
||||
power_resource_device->next = resource->devices;
|
||||
resource->devices = power_resource_device;
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
mutex_unlock(&resource->devices_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -462,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
|
|||
return ret;
|
||||
|
||||
no_power_resource:
|
||||
printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
|
||||
printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
|
||||
|
@ -721,6 +732,7 @@ static int acpi_power_add(struct acpi_device *device)
|
|||
|
||||
resource->device = device;
|
||||
mutex_init(&resource->resource_lock);
|
||||
mutex_init(&resource->devices_lock);
|
||||
strcpy(resource->name, device->pnp.bus_id);
|
||||
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
|
||||
|
|
|
@ -268,6 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||||
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
|
||||
/* JMicron 362B and 362C have an AHCI function with IDE class code */
|
||||
{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
|
||||
{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
|
||||
|
||||
/* ATI */
|
||||
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
|
||||
|
@ -393,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
|
||||
{ PCI_DEVICE(0x1b4b, 0x917a),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
|
||||
{ PCI_DEVICE(0x1b4b, 0x9192),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
|
||||
{ PCI_DEVICE(0x1b4b, 0x91a3),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
|
||||
|
@ -400,7 +405,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
|
||||
|
||||
/* Asmedia */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
|
||||
|
||||
/* Generic, PCI class code for AHCI */
|
||||
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
|
|
|
@ -86,6 +86,7 @@ static struct usb_device_id ath3k_table[] = {
|
|||
|
||||
/* Atheros AR5BBU22 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xE03C) },
|
||||
{ USB_DEVICE(0x0489, 0xE036) },
|
||||
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
@ -109,6 +110,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
|
|||
|
||||
/* Atheros AR5BBU22 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
|
|
@ -52,6 +52,9 @@ static struct usb_device_id btusb_table[] = {
|
|||
/* Generic Bluetooth USB device */
|
||||
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
|
||||
|
||||
/* Apple-specific (Broadcom) devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
|
||||
|
||||
/* Broadcom SoftSailing reporting vendor specific */
|
||||
{ USB_DEVICE(0x0a5c, 0x21e1) },
|
||||
|
||||
|
@ -94,16 +97,14 @@ static struct usb_device_id btusb_table[] = {
|
|||
|
||||
/* Broadcom BCM20702A0 */
|
||||
{ USB_DEVICE(0x0489, 0xe042) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21e3) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21e6) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21e8) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21f3) },
|
||||
{ USB_DEVICE(0x0a5c, 0x21f4) },
|
||||
{ USB_DEVICE(0x413c, 0x8197) },
|
||||
|
||||
/* Foxconn - Hon Hai */
|
||||
{ USB_DEVICE(0x0489, 0xe033) },
|
||||
|
||||
/*Broadcom devices with vendor specific id */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
|
||||
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
|
@ -141,6 +142,7 @@ static struct usb_device_id blacklist_table[] = {
|
|||
|
||||
/* Atheros AR5BBU12 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* Broadcom BCM2035 */
|
||||
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
|
||||
|
|
|
@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(gen_split_key);
|
||||
|
|
|
@ -669,13 +669,18 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
|
|||
}
|
||||
info->dev = &pdev->dev;
|
||||
info->max77693 = max77693;
|
||||
info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic,
|
||||
&max77693_muic_regmap_config);
|
||||
if (IS_ERR(info->max77693->regmap_muic)) {
|
||||
ret = PTR_ERR(info->max77693->regmap_muic);
|
||||
dev_err(max77693->dev,
|
||||
"failed to allocate register map: %d\n", ret);
|
||||
goto err_regmap;
|
||||
if (info->max77693->regmap_muic)
|
||||
dev_dbg(&pdev->dev, "allocate register map\n");
|
||||
else {
|
||||
info->max77693->regmap_muic = devm_regmap_init_i2c(
|
||||
info->max77693->muic,
|
||||
&max77693_muic_regmap_config);
|
||||
if (IS_ERR(info->max77693->regmap_muic)) {
|
||||
ret = PTR_ERR(info->max77693->regmap_muic);
|
||||
dev_err(max77693->dev,
|
||||
"failed to allocate register map: %d\n", ret);
|
||||
goto err_regmap;
|
||||
}
|
||||
}
|
||||
platform_set_drvdata(pdev, info);
|
||||
mutex_init(&info->mutex);
|
||||
|
|
|
@ -193,6 +193,9 @@ static const struct file_operations ast_fops = {
|
|||
.mmap = ast_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.read = drm_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -841,7 +841,7 @@ int ast_cursor_init(struct drm_device *dev)
|
|||
|
||||
ast->cursor_cache = obj;
|
||||
ast->cursor_cache_gpu_addr = gpu_addr;
|
||||
DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
|
||||
DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
|
||||
return 0;
|
||||
fail:
|
||||
return ret;
|
||||
|
|
|
@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = {
|
|||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = cirrus_mmap,
|
||||
.poll = drm_poll,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.fasync = drm_fasync,
|
||||
};
|
||||
static struct drm_driver driver = {
|
||||
|
|
|
@ -36,6 +36,6 @@ config DRM_EXYNOS_VIDI
|
|||
|
||||
config DRM_EXYNOS_G2D
|
||||
bool "Exynos DRM G2D"
|
||||
depends on DRM_EXYNOS
|
||||
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
|
||||
help
|
||||
Choose this option if you want to use Exynos G2D for DRM.
|
||||
|
|
|
@ -163,6 +163,12 @@ static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
|
|||
/* TODO */
|
||||
}
|
||||
|
||||
static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static struct dma_buf_ops exynos_dmabuf_ops = {
|
||||
.map_dma_buf = exynos_gem_map_dma_buf,
|
||||
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
|
||||
|
@ -170,6 +176,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
|
|||
.kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
|
||||
.kunmap = exynos_gem_dmabuf_kunmap,
|
||||
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
|
||||
.mmap = exynos_gem_dmabuf_mmap,
|
||||
.release = exynos_dmabuf_release,
|
||||
};
|
||||
|
||||
|
|
|
@ -160,7 +160,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
|
|||
if (!file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_prime_init_file_private(&file->prime);
|
||||
file->driver_priv = file_priv;
|
||||
|
||||
return exynos_drm_subdrv_open(dev, file);
|
||||
|
@ -184,7 +183,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
|
|||
e->base.destroy(&e->base);
|
||||
}
|
||||
}
|
||||
drm_prime_destroy_file_private(&file->prime);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
exynos_drm_subdrv_close(dev, file);
|
||||
|
@ -241,6 +239,9 @@ static const struct file_operations exynos_drm_driver_fops = {
|
|||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.release = drm_release,
|
||||
};
|
||||
|
||||
|
|
|
@ -831,11 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(dev, "failed to find registers\n");
|
||||
ret = -ENOENT;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!ctx->regs) {
|
||||
|
|
|
@ -129,7 +129,6 @@ struct g2d_runqueue_node {
|
|||
struct g2d_data {
|
||||
struct device *dev;
|
||||
struct clk *gate_clk;
|
||||
struct resource *regs_res;
|
||||
void __iomem *regs;
|
||||
int irq;
|
||||
struct workqueue_struct *g2d_workq;
|
||||
|
@ -751,7 +750,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
struct exynos_drm_subdrv *subdrv;
|
||||
int ret;
|
||||
|
||||
g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
|
||||
g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
|
||||
if (!g2d) {
|
||||
dev_err(dev, "failed to allocate driver data\n");
|
||||
return -ENOMEM;
|
||||
|
@ -759,10 +758,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
|
||||
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
|
||||
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
|
||||
if (!g2d->runqueue_slab) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_mem;
|
||||
}
|
||||
if (!g2d->runqueue_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
g2d->dev = dev;
|
||||
|
||||
|
@ -794,38 +791,26 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
pm_runtime_enable(dev);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(dev, "failed to get I/O memory\n");
|
||||
ret = -ENOENT;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
g2d->regs_res = request_mem_region(res->start, resource_size(res),
|
||||
dev_name(dev));
|
||||
if (!g2d->regs_res) {
|
||||
dev_err(dev, "failed to request I/O memory\n");
|
||||
ret = -ENOENT;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
g2d->regs = ioremap(res->start, resource_size(res));
|
||||
g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!g2d->regs) {
|
||||
dev_err(dev, "failed to remap I/O memory\n");
|
||||
ret = -ENXIO;
|
||||
goto err_release_res;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
g2d->irq = platform_get_irq(pdev, 0);
|
||||
if (g2d->irq < 0) {
|
||||
dev_err(dev, "failed to get irq\n");
|
||||
ret = g2d->irq;
|
||||
goto err_unmap_base;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
|
||||
ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
|
||||
"drm_g2d", g2d);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "irq request failed\n");
|
||||
goto err_unmap_base;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, g2d);
|
||||
|
@ -838,7 +823,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
ret = exynos_drm_subdrv_register(subdrv);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to register drm g2d device\n");
|
||||
goto err_free_irq;
|
||||
goto err_put_clk;
|
||||
}
|
||||
|
||||
dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
|
||||
|
@ -846,13 +831,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
free_irq(g2d->irq, g2d);
|
||||
err_unmap_base:
|
||||
iounmap(g2d->regs);
|
||||
err_release_res:
|
||||
release_resource(g2d->regs_res);
|
||||
kfree(g2d->regs_res);
|
||||
err_put_clk:
|
||||
pm_runtime_disable(dev);
|
||||
clk_put(g2d->gate_clk);
|
||||
|
@ -862,8 +840,6 @@ err_destroy_workqueue:
|
|||
destroy_workqueue(g2d->g2d_workq);
|
||||
err_destroy_slab:
|
||||
kmem_cache_destroy(g2d->runqueue_slab);
|
||||
err_free_mem:
|
||||
kfree(g2d);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -873,24 +849,18 @@ static int __devexit g2d_remove(struct platform_device *pdev)
|
|||
|
||||
cancel_work_sync(&g2d->runqueue_work);
|
||||
exynos_drm_subdrv_unregister(&g2d->subdrv);
|
||||
free_irq(g2d->irq, g2d);
|
||||
|
||||
while (g2d->runqueue_node) {
|
||||
g2d_free_runqueue_node(g2d, g2d->runqueue_node);
|
||||
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
|
||||
}
|
||||
|
||||
iounmap(g2d->regs);
|
||||
release_resource(g2d->regs_res);
|
||||
kfree(g2d->regs_res);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
clk_put(g2d->gate_clk);
|
||||
|
||||
g2d_fini_cmdlist(g2d);
|
||||
destroy_workqueue(g2d->g2d_workq);
|
||||
kmem_cache_destroy(g2d->runqueue_slab);
|
||||
kfree(g2d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -924,7 +894,7 @@ static int g2d_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
|
||||
static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
|
||||
|
||||
struct platform_driver g2d_driver = {
|
||||
.probe = g2d_probe,
|
||||
|
|
|
@ -122,7 +122,7 @@ fail:
|
|||
__free_page(pages[i]);
|
||||
|
||||
drm_free_large(pages);
|
||||
return ERR_PTR(PTR_ERR(p));
|
||||
return ERR_CAST(p);
|
||||
}
|
||||
|
||||
static void exynos_gem_put_pages(struct drm_gem_object *obj,
|
||||
|
@ -662,7 +662,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
|
|||
*/
|
||||
|
||||
args->pitch = args->width * ((args->bpp + 7) / 8);
|
||||
args->size = PAGE_ALIGN(args->pitch * args->height);
|
||||
args->size = args->pitch * args->height;
|
||||
|
||||
exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
|
||||
if (IS_ERR(exynos_gem_obj))
|
||||
|
|
|
@ -345,7 +345,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
|
|||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
DRM_LOG_KMS("failed to alloc common hdmi context.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -371,7 +371,6 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
|
|||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
exynos_drm_subdrv_unregister(&ctx->subdrv);
|
||||
kfree(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ static const uint32_t formats[] = {
|
|||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_NV12M,
|
||||
DRM_FORMAT_NV12MT,
|
||||
};
|
||||
|
||||
|
|
|
@ -633,7 +633,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
|
|||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -673,8 +673,6 @@ static int __devexit vidi_remove(struct platform_device *pdev)
|
|||
ctx->raw_edid = NULL;
|
||||
}
|
||||
|
||||
kfree(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2172,7 +2172,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
|
|||
|
||||
DRM_DEBUG_KMS("HDMI resource init\n");
|
||||
|
||||
memset(res, 0, sizeof *res);
|
||||
memset(res, 0, sizeof(*res));
|
||||
|
||||
/* get clocks, power */
|
||||
res->hdmi = clk_get(dev, "hdmi");
|
||||
|
@ -2204,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
|
|||
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
|
||||
|
||||
res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
|
||||
sizeof res->regul_bulk[0], GFP_KERNEL);
|
||||
sizeof(res->regul_bulk[0]), GFP_KERNEL);
|
||||
if (!res->regul_bulk) {
|
||||
DRM_ERROR("failed to get memory for regulators\n");
|
||||
goto fail;
|
||||
|
@ -2243,7 +2243,7 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
|
|||
clk_put(res->sclk_hdmi);
|
||||
if (!IS_ERR_OR_NULL(res->hdmi))
|
||||
clk_put(res->hdmi);
|
||||
memset(res, 0, sizeof *res);
|
||||
memset(res, 0, sizeof(*res));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2312,11 +2312,6 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
DRM_ERROR("failed to find registers\n");
|
||||
ret = -ENOENT;
|
||||
goto err_resource;
|
||||
}
|
||||
|
||||
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
|
||||
if (!hdata->regs) {
|
||||
|
|
|
@ -236,11 +236,11 @@ static inline void vp_filter_set(struct mixer_resources *res,
|
|||
static void vp_default_filter(struct mixer_resources *res)
|
||||
{
|
||||
vp_filter_set(res, VP_POLY8_Y0_LL,
|
||||
filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
|
||||
filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
|
||||
vp_filter_set(res, VP_POLY4_Y0_LL,
|
||||
filter_y_vert_tap4, sizeof filter_y_vert_tap4);
|
||||
filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
|
||||
vp_filter_set(res, VP_POLY4_C0_LL,
|
||||
filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
|
||||
filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
|
||||
}
|
||||
|
||||
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
|
||||
|
|
|
@ -476,6 +476,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
|
|||
.pos = DSPAPOS,
|
||||
.surf = DSPASURF,
|
||||
.addr = MRST_DSPABASE,
|
||||
.base = MRST_DSPABASE,
|
||||
.status = PIPEASTAT,
|
||||
.linoff = DSPALINOFF,
|
||||
.tileoff = DSPATILEOFF,
|
||||
|
@ -499,6 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
|
|||
.pos = DSPBPOS,
|
||||
.surf = DSPBSURF,
|
||||
.addr = DSPBBASE,
|
||||
.base = DSPBBASE,
|
||||
.status = PIPEBSTAT,
|
||||
.linoff = DSPBLINOFF,
|
||||
.tileoff = DSPBTILEOFF,
|
||||
|
|
|
@ -115,6 +115,9 @@ static const struct file_operations i810_buffer_fops = {
|
|||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = i810_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -51,6 +51,9 @@ static const struct file_operations i810_driver_fops = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -1587,6 +1587,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->error_lock);
|
||||
spin_lock_init(&dev_priv->rps_lock);
|
||||
spin_lock_init(&dev_priv->dpio_lock);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
|
|
|
@ -2700,9 +2700,6 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->irq_handler = i8xx_irq_handler;
|
||||
dev->driver->irq_uninstall = i8xx_irq_uninstall;
|
||||
} else if (INTEL_INFO(dev)->gen == 3) {
|
||||
/* IIR "flip pending" means done if this bit is set */
|
||||
I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
|
||||
|
||||
dev->driver->irq_preinstall = i915_irq_preinstall;
|
||||
dev->driver->irq_postinstall = i915_irq_postinstall;
|
||||
dev->driver->irq_uninstall = i915_irq_uninstall;
|
||||
|
|
|
@ -1376,7 +1376,8 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
|
|||
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
|
||||
reg, pipe_name(pipe));
|
||||
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
|
||||
&& (val & DP_PIPEB_SELECT),
|
||||
"IBX PCH dp port still using transcoder B\n");
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1389,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
|
|||
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
|
||||
reg, pipe_name(pipe));
|
||||
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
|
||||
&& (val & SDVO_PIPE_B_SELECT),
|
||||
"IBX PCH hdmi port still using transcoder B\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -2533,14 +2533,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
break;
|
||||
}
|
||||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
||||
/* Cache some DPCD data in the eDP case */
|
||||
if (is_edp(intel_dp)) {
|
||||
bool ret;
|
||||
struct edp_power_seq cur, vbt;
|
||||
u32 pp_on, pp_off, pp_div;
|
||||
struct edid *edid;
|
||||
|
||||
pp_on = I915_READ(PCH_PP_ON_DELAYS);
|
||||
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
|
||||
|
@ -2591,6 +2587,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
|
||||
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
|
||||
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
|
||||
}
|
||||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
bool ret;
|
||||
struct edid *edid;
|
||||
|
||||
ironlake_edp_panel_vdd_on(intel_dp);
|
||||
ret = intel_dp_get_dpcd(intel_dp);
|
||||
|
|
|
@ -162,19 +162,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
|
|||
return val;
|
||||
}
|
||||
|
||||
u32 intel_panel_get_max_backlight(struct drm_device *dev)
|
||||
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 max;
|
||||
|
||||
max = i915_read_blc_pwm_ctl(dev_priv);
|
||||
if (max == 0) {
|
||||
/* XXX add code here to query mode clock or hardware clock
|
||||
* and program max PWM appropriately.
|
||||
*/
|
||||
pr_warn_once("fixme: max PWM is zero\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
max >>= 16;
|
||||
|
@ -188,6 +181,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
|
|||
max *= 0xff;
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
u32 intel_panel_get_max_backlight(struct drm_device *dev)
|
||||
{
|
||||
u32 max;
|
||||
|
||||
max = _intel_panel_get_max_backlight(dev);
|
||||
if (max == 0) {
|
||||
/* XXX add code here to query mode clock or hardware clock
|
||||
* and program max PWM appropriately.
|
||||
*/
|
||||
pr_warn_once("fixme: max PWM is zero\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
|
||||
return max;
|
||||
}
|
||||
|
@ -424,7 +433,11 @@ int intel_panel_setup_backlight(struct drm_device *dev)
|
|||
|
||||
memset(&props, 0, sizeof(props));
|
||||
props.type = BACKLIGHT_RAW;
|
||||
props.max_brightness = intel_panel_get_max_backlight(dev);
|
||||
props.max_brightness = _intel_panel_get_max_backlight(dev);
|
||||
if (props.max_brightness == 0) {
|
||||
DRM_ERROR("Failed to get maximum backlight value\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
dev_priv->backlight =
|
||||
backlight_device_register("intel_backlight",
|
||||
&connector->kdev, dev,
|
||||
|
|
|
@ -3672,6 +3672,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
if (IS_PINEVIEW(dev))
|
||||
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
|
||||
|
||||
/* IIR "flip pending" means done if this bit is set */
|
||||
I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
|
||||
}
|
||||
|
||||
static void i85x_init_clock_gating(struct drm_device *dev)
|
||||
|
|
|
@ -2573,7 +2573,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||
hotplug_mask = intel_sdvo->is_sdvob ?
|
||||
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
|
||||
}
|
||||
dev_priv->hotplug_supported_mask |= hotplug_mask;
|
||||
|
||||
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
|
||||
|
||||
|
@ -2581,14 +2580,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
|
||||
goto err;
|
||||
|
||||
/* Set up hotplug command - note paranoia about contents of reply.
|
||||
* We assume that the hardware is in a sane state, and only touch
|
||||
* the bits we think we understand.
|
||||
*/
|
||||
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
|
||||
&intel_sdvo->hotplug_active, 2);
|
||||
intel_sdvo->hotplug_active[0] &= ~0x3;
|
||||
|
||||
if (intel_sdvo_output_setup(intel_sdvo,
|
||||
intel_sdvo->caps.output_flags) != true) {
|
||||
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
|
||||
|
@ -2596,6 +2587,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* Only enable the hotplug irq if we need it, to work around noisy
|
||||
* hotplug lines.
|
||||
*/
|
||||
if (intel_sdvo->hotplug_active[0])
|
||||
dev_priv->hotplug_supported_mask |= hotplug_mask;
|
||||
|
||||
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
|
||||
|
||||
/* Set the input timing to the screen. Assume always input 0. */
|
||||
|
|
|
@ -84,6 +84,9 @@ static const struct file_operations mgag200_driver_fops = {
|
|||
.mmap = mgag200_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.read = drm_read,
|
||||
};
|
||||
|
||||
|
|
|
@ -598,7 +598,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
|||
args->size = args->pitch * args->height;
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
|
||||
ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
|
||||
ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -115,6 +115,9 @@ nv50_gpio_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* initialise gpios and routing to vbios defaults */
|
||||
nouveau_gpio_reset(dev);
|
||||
|
||||
/* disable, and ack any pending gpio interrupts */
|
||||
nv_wr32(dev, 0xe050, 0x00000000);
|
||||
nv_wr32(dev, 0xe054, 0xffffffff);
|
||||
|
|
|
@ -1510,10 +1510,10 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
|
|||
case OUTPUT_DP:
|
||||
if (nv_connector->base.display_info.bpc == 6) {
|
||||
nv_encoder->dp.datarate = mode->clock * 18 / 8;
|
||||
syncs |= 0x00000140;
|
||||
syncs |= 0x00000002 << 6;
|
||||
} else {
|
||||
nv_encoder->dp.datarate = mode->clock * 24 / 8;
|
||||
syncs |= 0x00000180;
|
||||
syncs |= 0x00000005 << 6;
|
||||
}
|
||||
|
||||
if (nv_encoder->dcb->sorconf.link & 1)
|
||||
|
|
|
@ -1479,14 +1479,98 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_get_pll_use_mask - look up a mask of which pplls are in use
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* Returns the mask of which PPLLs (Pixel PLLs) are in use.
|
||||
*/
|
||||
static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc *test_crtc;
|
||||
struct radeon_crtc *radeon_test_crtc;
|
||||
u32 pll_in_use = 0;
|
||||
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
|
||||
radeon_test_crtc = to_radeon_crtc(test_crtc);
|
||||
if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
|
||||
pll_in_use |= (1 << radeon_test_crtc->pll_id);
|
||||
}
|
||||
return pll_in_use;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
|
||||
* also in DP mode. For DP, a single PPLL can be used for all DP
|
||||
* crtcs/encoders.
|
||||
*/
|
||||
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_encoder *test_encoder;
|
||||
struct radeon_crtc *radeon_test_crtc;
|
||||
|
||||
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
|
||||
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
|
||||
/* for DP use the same PLL for all */
|
||||
radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
|
||||
if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
|
||||
return radeon_test_crtc->pll_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ATOM_PPLL_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
|
||||
* a single PPLL can be used for all DP crtcs/encoders. For non-DP
|
||||
* monitors a dedicated PPLL must be used. If a particular board has
|
||||
* an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
|
||||
* as there is no need to program the PLL itself. If we are not able to
|
||||
* allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
|
||||
* avoid messing up an existing monitor.
|
||||
*
|
||||
* Asic specific PLL information
|
||||
*
|
||||
* DCE 6.1
|
||||
* - PPLL2 is only available to UNIPHYA (both DP and non-DP)
|
||||
* - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
|
||||
*
|
||||
* DCE 6.0
|
||||
* - PPLL0 is available to all UNIPHY (DP only)
|
||||
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
|
||||
*
|
||||
* DCE 5.0
|
||||
* - DCPLL is available to all UNIPHY (DP only)
|
||||
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
|
||||
*
|
||||
* DCE 3.0/4.0/4.1
|
||||
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
|
||||
*
|
||||
*/
|
||||
static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *test_encoder;
|
||||
struct drm_crtc *test_crtc;
|
||||
uint32_t pll_in_use = 0;
|
||||
u32 pll_in_use;
|
||||
int pll;
|
||||
|
||||
if (ASIC_IS_DCE61(rdev)) {
|
||||
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
|
||||
|
@ -1498,32 +1582,40 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
|
||||
if ((test_radeon_encoder->encoder_id ==
|
||||
ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
|
||||
(dig->linkb == false)) /* UNIPHY A uses PPLL2 */
|
||||
(dig->linkb == false))
|
||||
/* UNIPHY A uses PPLL2 */
|
||||
return ATOM_PPLL2;
|
||||
else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
|
||||
/* UNIPHY B/C/D/E/F */
|
||||
if (rdev->clock.dp_extclk)
|
||||
/* skip PPLL programming if using ext clock */
|
||||
return ATOM_PPLL_INVALID;
|
||||
else {
|
||||
/* use the same PPLL for all DP monitors */
|
||||
pll = radeon_get_shared_dp_ppll(crtc);
|
||||
if (pll != ATOM_PPLL_INVALID)
|
||||
return pll;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* UNIPHY B/C/D/E/F */
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_test_crtc;
|
||||
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
|
||||
radeon_test_crtc = to_radeon_crtc(test_crtc);
|
||||
if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
|
||||
(radeon_test_crtc->pll_id == ATOM_PPLL1))
|
||||
pll_in_use |= (1 << radeon_test_crtc->pll_id);
|
||||
}
|
||||
if (!(pll_in_use & 4))
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL0)))
|
||||
return ATOM_PPLL0;
|
||||
return ATOM_PPLL1;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else if (ASIC_IS_DCE4(rdev)) {
|
||||
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
|
||||
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
|
||||
* depending on the asic:
|
||||
* DCE4: PPLL or ext clock
|
||||
* DCE5: DCPLL or ext clock
|
||||
* DCE5: PPLL, DCPLL, or ext clock
|
||||
* DCE6: PPLL, PPLL0, or ext clock
|
||||
*
|
||||
* Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
|
||||
* PPLL/DCPLL programming and only program the DP DTO for the
|
||||
|
@ -1531,31 +1623,34 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
|
|||
*/
|
||||
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
|
||||
if (rdev->clock.dp_extclk)
|
||||
/* skip PPLL programming if using ext clock */
|
||||
return ATOM_PPLL_INVALID;
|
||||
else if (ASIC_IS_DCE6(rdev))
|
||||
/* use PPLL0 for all DP */
|
||||
return ATOM_PPLL0;
|
||||
else if (ASIC_IS_DCE5(rdev))
|
||||
/* use DCPLL for all DP */
|
||||
return ATOM_DCPLL;
|
||||
else {
|
||||
/* use the same PPLL for all DP monitors */
|
||||
pll = radeon_get_shared_dp_ppll(crtc);
|
||||
if (pll != ATOM_PPLL_INVALID)
|
||||
return pll;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* otherwise, pick one of the plls */
|
||||
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_test_crtc;
|
||||
|
||||
if (crtc == test_crtc)
|
||||
continue;
|
||||
|
||||
radeon_test_crtc = to_radeon_crtc(test_crtc);
|
||||
if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
|
||||
(radeon_test_crtc->pll_id <= ATOM_PPLL2))
|
||||
pll_in_use |= (1 << radeon_test_crtc->pll_id);
|
||||
}
|
||||
if (!(pll_in_use & 1))
|
||||
/* all other cases */
|
||||
pll_in_use = radeon_get_pll_use_mask(crtc);
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL2)))
|
||||
return ATOM_PPLL2;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
return ATOM_PPLL2;
|
||||
DRM_ERROR("unable to allocate a PPLL\n");
|
||||
return ATOM_PPLL_INVALID;
|
||||
} else
|
||||
/* use PPLL1 or PPLL2 */
|
||||
return radeon_crtc->crtc_id;
|
||||
|
||||
}
|
||||
|
@ -1697,7 +1792,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
|
|||
break;
|
||||
}
|
||||
done:
|
||||
radeon_crtc->pll_id = -1;
|
||||
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
|
||||
|
@ -1746,6 +1841,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
|
|||
else
|
||||
radeon_crtc->crtc_offset = 0;
|
||||
}
|
||||
radeon_crtc->pll_id = -1;
|
||||
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
|
||||
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
|
|||
*/
|
||||
void radeon_fence_process(struct radeon_device *rdev, int ring)
|
||||
{
|
||||
uint64_t seq, last_seq;
|
||||
uint64_t seq, last_seq, last_emitted;
|
||||
unsigned count_loop = 0;
|
||||
bool wake = false;
|
||||
|
||||
|
@ -158,13 +158,15 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
|
|||
*/
|
||||
last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
|
||||
do {
|
||||
last_emitted = rdev->fence_drv[ring].sync_seq[ring];
|
||||
seq = radeon_fence_read(rdev, ring);
|
||||
seq |= last_seq & 0xffffffff00000000LL;
|
||||
if (seq < last_seq) {
|
||||
seq += 0x100000000LL;
|
||||
seq &= 0xffffffff;
|
||||
seq |= last_emitted & 0xffffffff00000000LL;
|
||||
}
|
||||
|
||||
if (seq == last_seq) {
|
||||
if (seq <= last_seq || seq > last_emitted) {
|
||||
break;
|
||||
}
|
||||
/* If we loop over we don't want to return without
|
||||
|
|
|
@ -43,6 +43,9 @@ static const struct file_operations savage_driver_fops = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -74,6 +74,9 @@ static const struct file_operations sis_driver_fops = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -49,6 +49,9 @@ static const struct file_operations tdfx_driver_fops = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -66,6 +66,9 @@ static const struct file_operations udl_driver_fops = {
|
|||
.unlocked_ioctl = drm_ioctl,
|
||||
.release = drm_release,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -65,6 +65,9 @@ static const struct file_operations via_driver_fops = {
|
|||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -12,3 +12,11 @@ config DRM_VMWGFX
|
|||
This is a KMS enabled DRM driver for the VMware SVGA2
|
||||
virtual hardware.
|
||||
The compiled module will be called "vmwgfx.ko".
|
||||
|
||||
config DRM_VMWGFX_FBCON
|
||||
depends on DRM_VMWGFX
|
||||
bool "Enable framebuffer console under vmwgfx by default"
|
||||
help
|
||||
Choose this option if you are shipping a new vmwgfx
|
||||
userspace driver that supports using the kernel driver.
|
||||
|
||||
|
|
|
@ -182,8 +182,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
|
|||
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
|
||||
{0, 0, 0}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
|
||||
|
||||
static int enable_fbdev;
|
||||
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
|
||||
|
||||
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
||||
static void vmw_master_init(struct vmw_master *);
|
||||
|
@ -1154,6 +1155,11 @@ static struct drm_driver driver = {
|
|||
.open = vmw_driver_open,
|
||||
.preclose = vmw_preclose,
|
||||
.postclose = vmw_postclose,
|
||||
|
||||
.dumb_create = vmw_dumb_create,
|
||||
.dumb_map_offset = vmw_dumb_map_offset,
|
||||
.dumb_destroy = vmw_dumb_destroy,
|
||||
|
||||
.fops = &vmwgfx_driver_fops,
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.desc = VMWGFX_DRIVER_DESC,
|
||||
|
|
|
@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
|
|||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset);
|
||||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
*/
|
||||
|
|
|
@ -1917,3 +1917,76 @@ err_ref:
|
|||
vmw_resource_unreference(&res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
struct vmw_user_dma_buffer *vmw_user_bo;
|
||||
struct ttm_buffer_object *tmp;
|
||||
int ret;
|
||||
|
||||
args->pitch = args->width * ((args->bpp + 7) / 8);
|
||||
args->size = args->pitch * args->height;
|
||||
|
||||
vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
|
||||
if (vmw_user_bo == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (ret != 0) {
|
||||
kfree(vmw_user_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
|
||||
&vmw_vram_sys_placement, true,
|
||||
&vmw_user_dmabuf_destroy);
|
||||
if (ret != 0)
|
||||
goto out_no_dmabuf;
|
||||
|
||||
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
|
||||
&vmw_user_bo->base,
|
||||
false,
|
||||
ttm_buffer_type,
|
||||
&vmw_user_dmabuf_release, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_base_object;
|
||||
|
||||
args->handle = vmw_user_bo->base.hash.key;
|
||||
|
||||
out_no_base_object:
|
||||
ttm_bo_unref(&tmp);
|
||||
out_no_dmabuf:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_dma_buffer *out_buf;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
|
||||
if (ret != 0)
|
||||
return -EINVAL;
|
||||
|
||||
*offset = out_buf->base.addr_space_offset;
|
||||
vmw_dmabuf_unreference(&out_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
|
||||
handle, TTM_REF_USAGE);
|
||||
}
|
||||
|
|
|
@ -69,22 +69,6 @@ struct ina2xx_data {
|
|||
u16 regs[INA2XX_MAX_REGISTERS];
|
||||
};
|
||||
|
||||
int ina2xx_read_word(struct i2c_client *client, int reg)
|
||||
{
|
||||
int val = i2c_smbus_read_word_data(client, reg);
|
||||
if (unlikely(val < 0)) {
|
||||
dev_dbg(&client->dev,
|
||||
"Failed to read register: %d\n", reg);
|
||||
return val;
|
||||
}
|
||||
return be16_to_cpu(val);
|
||||
}
|
||||
|
||||
void ina2xx_write_word(struct i2c_client *client, int reg, int data)
|
||||
{
|
||||
i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
|
||||
}
|
||||
|
||||
static struct ina2xx_data *ina2xx_update_device(struct device *dev)
|
||||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
|
@ -102,7 +86,7 @@ static struct ina2xx_data *ina2xx_update_device(struct device *dev)
|
|||
|
||||
/* Read all registers */
|
||||
for (i = 0; i < data->registers; i++) {
|
||||
int rv = ina2xx_read_word(client, i);
|
||||
int rv = i2c_smbus_read_word_swapped(client, i);
|
||||
if (rv < 0) {
|
||||
ret = ERR_PTR(rv);
|
||||
goto abort;
|
||||
|
@ -279,22 +263,26 @@ static int ina2xx_probe(struct i2c_client *client,
|
|||
switch (data->kind) {
|
||||
case ina219:
|
||||
/* device configuration */
|
||||
ina2xx_write_word(client, INA2XX_CONFIG, INA219_CONFIG_DEFAULT);
|
||||
i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
|
||||
INA219_CONFIG_DEFAULT);
|
||||
|
||||
/* set current LSB to 1mA, shunt is in uOhms */
|
||||
/* (equation 13 in datasheet) */
|
||||
ina2xx_write_word(client, INA2XX_CALIBRATION, 40960000 / shunt);
|
||||
i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
|
||||
40960000 / shunt);
|
||||
dev_info(&client->dev,
|
||||
"power monitor INA219 (Rshunt = %li uOhm)\n", shunt);
|
||||
data->registers = INA219_REGISTERS;
|
||||
break;
|
||||
case ina226:
|
||||
/* device configuration */
|
||||
ina2xx_write_word(client, INA2XX_CONFIG, INA226_CONFIG_DEFAULT);
|
||||
i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
|
||||
INA226_CONFIG_DEFAULT);
|
||||
|
||||
/* set current LSB to 1mA, shunt is in uOhms */
|
||||
/* (equation 1 in datasheet)*/
|
||||
ina2xx_write_word(client, INA2XX_CALIBRATION, 5120000 / shunt);
|
||||
i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
|
||||
5120000 / shunt);
|
||||
dev_info(&client->dev,
|
||||
"power monitor INA226 (Rshunt = %li uOhm)\n", shunt);
|
||||
data->registers = INA226_REGISTERS;
|
||||
|
|
|
@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
|
|||
struct device_attribute *devattr, char *buf)
|
||||
{
|
||||
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
|
||||
struct twl4030_madc_request req;
|
||||
struct twl4030_madc_request req = {
|
||||
.channels = 1 << attr->index,
|
||||
.method = TWL4030_MADC_SW2,
|
||||
.type = TWL4030_MADC_WAIT,
|
||||
};
|
||||
long val;
|
||||
|
||||
req.channels = (1 << attr->index);
|
||||
req.method = TWL4030_MADC_SW2;
|
||||
req.func_cb = NULL;
|
||||
val = twl4030_madc_conversion(&req);
|
||||
if (val < 0)
|
||||
return val;
|
||||
|
|
|
@ -476,17 +476,17 @@ static int pca_init(struct i2c_adapter *adap)
|
|||
/* To avoid integer overflow, use clock/100 for calculations */
|
||||
clock = pca_clock(pca_data) / 100;
|
||||
|
||||
if (pca_data->i2c_clock > 10000) {
|
||||
if (pca_data->i2c_clock > 1000000) {
|
||||
mode = I2C_PCA_MODE_TURBO;
|
||||
min_tlow = 14;
|
||||
min_thi = 5;
|
||||
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
|
||||
} else if (pca_data->i2c_clock > 4000) {
|
||||
} else if (pca_data->i2c_clock > 400000) {
|
||||
mode = I2C_PCA_MODE_FASTP;
|
||||
min_tlow = 17;
|
||||
min_thi = 9;
|
||||
raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
|
||||
} else if (pca_data->i2c_clock > 1000) {
|
||||
} else if (pca_data->i2c_clock > 100000) {
|
||||
mode = I2C_PCA_MODE_FAST;
|
||||
min_tlow = 44;
|
||||
min_thi = 20;
|
||||
|
|
|
@ -104,6 +104,7 @@ config I2C_I801
|
|||
DH89xxCC (PCH)
|
||||
Panther Point (PCH)
|
||||
Lynx Point (PCH)
|
||||
Lynx Point-LP (PCH)
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called i2c-i801.
|
||||
|
@ -354,9 +355,13 @@ config I2C_DAVINCI
|
|||
devices such as DaVinci NIC.
|
||||
For details please see http://www.ti.com/davinci
|
||||
|
||||
config I2C_DESIGNWARE_CORE
|
||||
tristate
|
||||
|
||||
config I2C_DESIGNWARE_PLATFORM
|
||||
tristate "Synopsys DesignWare Platform"
|
||||
depends on HAVE_CLK
|
||||
select I2C_DESIGNWARE_CORE
|
||||
help
|
||||
If you say yes to this option, support will be included for the
|
||||
Synopsys DesignWare I2C adapter. Only master mode is supported.
|
||||
|
@ -367,6 +372,7 @@ config I2C_DESIGNWARE_PLATFORM
|
|||
config I2C_DESIGNWARE_PCI
|
||||
tristate "Synopsys DesignWare PCI"
|
||||
depends on PCI
|
||||
select I2C_DESIGNWARE_CORE
|
||||
help
|
||||
If you say yes to this option, support will be included for the
|
||||
Synopsys DesignWare I2C adapter. Only master mode is supported.
|
||||
|
|
|
@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
|
|||
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
|
||||
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
|
||||
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
|
||||
obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
|
||||
obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
|
||||
i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
|
||||
i2c-designware-platform-objs := i2c-designware-platdrv.o
|
||||
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
|
||||
i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
|
||||
i2c-designware-pci-objs := i2c-designware-pcidrv.o
|
||||
obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
|
||||
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
|
||||
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue