Merge branch 'kprobes-test' of git://git.yxit.co.uk/linux into devel-stable
This commit is contained in:
commit
f70cac8d9c
|
@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-h264-vui-sar-idc">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_video_h264_vui_sar_idc</entry>
|
||||
</row>
|
||||
|
@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-h264-level">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_video_h264_level</entry>
|
||||
</row>
|
||||
|
@ -1641,7 +1641,7 @@ Possible values are:</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-mpeg4-level">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_video_mpeg4_level</entry>
|
||||
</row>
|
||||
|
@ -1689,9 +1689,9 @@ Possible values are:</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-h264-profile">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_h264_profile</entry>
|
||||
<entry>enum v4l2_mpeg_video_h264_profile</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">The profile information for H264.
|
||||
Applicable to the H264 encoder.
|
||||
|
@ -1774,9 +1774,9 @@ Possible values are:</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-mpeg4-profile">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_mpeg4_profile</entry>
|
||||
<entry>enum v4l2_mpeg_video_mpeg4_profile</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">The profile information for MPEG4.
|
||||
Applicable to the MPEG4 encoder.
|
||||
|
@ -1820,9 +1820,9 @@ Applicable to the encoder.
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-multi-slice-mode">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_multi_slice_mode</entry>
|
||||
<entry>enum v4l2_mpeg_video_multi_slice_mode</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
|
||||
Applicable to the encoder.
|
||||
|
@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-h264-loop-filter-mode">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_h264_loop_filter_mode</entry>
|
||||
<entry>enum v4l2_mpeg_video_h264_loop_filter_mode</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">Loop filter mode for H264 encoder.
|
||||
Possible values are:</entry>
|
||||
|
@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-h264-entropy-mode">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_h264_symbol_mode</entry>
|
||||
<entry>enum v4l2_mpeg_video_h264_entropy_mode</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
|
||||
Applicable to the H264 encoder.
|
||||
|
@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry>
|
|||
</row>
|
||||
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-video-header-mode">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_header_mode</entry>
|
||||
<entry>enum v4l2_mpeg_video_header_mode</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
|
||||
it returned together with the first frame. Applicable to encoders.
|
||||
|
@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE
|
|||
Applicable to the H264 encoder.</entry>
|
||||
</row>
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_mfc51_frame_skip_mode</entry>
|
||||
<entry>enum v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">
|
||||
Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
|
||||
|
@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders.
|
|||
</entry>
|
||||
</row>
|
||||
<row><entry></entry></row>
|
||||
<row>
|
||||
<row id="v4l2-mpeg-mfc51-video-force-frame-type">
|
||||
<entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant> </entry>
|
||||
<entry>enum v4l2_mpeg_mfc51_force_frame_type</entry>
|
||||
<entry>enum v4l2_mpeg_mfc51_video_force_frame_type</entry>
|
||||
</row>
|
||||
<row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
|
||||
Possible values are:</entry>
|
||||
|
|
|
@ -592,3 +592,11 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
|
|||
interface that was used by acer-wmi driver. It will replaced by
|
||||
information log when acer-wmi initial.
|
||||
Who: Lee, Chun-Yi <jlee@novell.com>
|
||||
|
||||
----------------------------
|
||||
What: The XFS nodelaylog mount option
|
||||
When: 3.3
|
||||
Why: The delaylog mode that has been the default since 2.6.39 has proven
|
||||
stable, and the old code is in the way of additional improvements in
|
||||
the log code.
|
||||
Who: Christoph Hellwig <hch@lst.de>
|
||||
|
|
|
@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
|
|||
the devices explicitly. Please see Documentation/i2c/instantiating-devices for
|
||||
details.
|
||||
|
||||
WARNING: Do not access chip registers using the i2cdump command, and do not use
|
||||
any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
|
||||
supported by this driver interpret any access to a command register (including
|
||||
read commands) as request to execute the command in question. This may result in
|
||||
power loss, board resets, and/or Flash corruption. Worst case, your board may
|
||||
turn into a brick.
|
||||
|
||||
|
||||
Sysfs entries
|
||||
-------------
|
||||
|
|
|
@ -319,4 +319,6 @@ Code Seq#(hex) Include File Comments
|
|||
<mailto:thomas@winischhofer.net>
|
||||
0xF4 00-1F video/mbxfb.h mbxfb
|
||||
<mailto:raph@8d.com>
|
||||
0xF6 all LTTng Linux Trace Toolkit Next Generation
|
||||
<mailto:mathieu.desnoyers@efficios.com>
|
||||
0xFD all linux/dm-ioctl.h
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = "Divemaster Edition"
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -129,4 +129,10 @@ config DEBUG_S3C_UART
|
|||
The uncompressor code port configuration is now handled
|
||||
by CONFIG_S3C_LOWLEVEL_UART_PORT.
|
||||
|
||||
config ARM_KPROBES_TEST
|
||||
tristate "Kprobes test module"
|
||||
depends on KPROBES && MODULES
|
||||
help
|
||||
Perform tests of kprobes API and instruction set simulation.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -45,8 +45,13 @@
|
|||
#define L2X0_CLEAN_INV_LINE_PA 0x7F0
|
||||
#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
|
||||
#define L2X0_CLEAN_INV_WAY 0x7FC
|
||||
#define L2X0_LOCKDOWN_WAY_D 0x900
|
||||
#define L2X0_LOCKDOWN_WAY_I 0x904
|
||||
/*
|
||||
* The lockdown registers repeat 8 times for L310, the L210 has only one
|
||||
* D and one I lockdown register at 0x0900 and 0x0904.
|
||||
*/
|
||||
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
|
||||
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
|
||||
#define L2X0_LOCKDOWN_STRIDE 0x08
|
||||
#define L2X0_TEST_OPERATION 0xF00
|
||||
#define L2X0_LINE_DATA 0xF10
|
||||
#define L2X0_LINE_TAG 0xF30
|
||||
|
|
|
@ -43,6 +43,13 @@ obj-$(CONFIG_KPROBES) += kprobes-thumb.o
|
|||
else
|
||||
obj-$(CONFIG_KPROBES) += kprobes-arm.o
|
||||
endif
|
||||
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
|
||||
test-kprobes-objs := kprobes-test.o
|
||||
ifdef CONFIG_THUMB2_KERNEL
|
||||
test-kprobes-objs += kprobes-test-thumb.o
|
||||
else
|
||||
test-kprobes-objs += kprobes-test-arm.o
|
||||
endif
|
||||
obj-$(CONFIG_ATAGS_PROC) += atags.o
|
||||
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
|
||||
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "kprobes.h"
|
||||
|
||||
|
@ -971,6 +972,9 @@ const union decode_item kprobe_decode_arm_table[] = {
|
|||
|
||||
DECODE_END
|
||||
};
|
||||
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
|
||||
EXPORT_SYMBOL_GPL(kprobe_decode_arm_table);
|
||||
#endif
|
||||
|
||||
static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,392 @@
|
|||
/*
|
||||
* arch/arm/kernel/kprobes-test.h
|
||||
*
|
||||
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define VERBOSE 0 /* Set to '1' for more logging of test cases */
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define NORMAL_ISA "16"
|
||||
#else
|
||||
#define NORMAL_ISA "32"
|
||||
#endif
|
||||
|
||||
|
||||
/* Flags used in kprobe_test_flags */
|
||||
#define TEST_FLAG_NO_ITBLOCK (1<<0)
|
||||
#define TEST_FLAG_FULL_ITBLOCK (1<<1)
|
||||
#define TEST_FLAG_NARROW_INSTR (1<<2)
|
||||
|
||||
extern int kprobe_test_flags;
|
||||
extern int kprobe_test_cc_position;
|
||||
|
||||
|
||||
#define TEST_MEMORY_SIZE 256
|
||||
|
||||
|
||||
/*
|
||||
* Test case structures.
|
||||
*
|
||||
* The arguments given to test cases can be one of three types.
|
||||
*
|
||||
* ARG_TYPE_REG
|
||||
* Load a register with the given value.
|
||||
*
|
||||
* ARG_TYPE_PTR
|
||||
* Load a register with a pointer into the stack buffer (SP + given value).
|
||||
*
|
||||
* ARG_TYPE_MEM
|
||||
* Store the given value into the stack buffer at [SP+index].
|
||||
*
|
||||
*/
|
||||
|
||||
#define ARG_TYPE_END 0
|
||||
#define ARG_TYPE_REG 1
|
||||
#define ARG_TYPE_PTR 2
|
||||
#define ARG_TYPE_MEM 3
|
||||
|
||||
#define ARG_FLAG_UNSUPPORTED 0x01
|
||||
#define ARG_FLAG_SUPPORTED 0x02
|
||||
#define ARG_FLAG_THUMB 0x10 /* Must be 16 so TEST_ISA can be used */
|
||||
#define ARG_FLAG_ARM 0x20 /* Must be 32 so TEST_ISA can be used */
|
||||
|
||||
struct test_arg {
|
||||
u8 type; /* ARG_TYPE_x */
|
||||
u8 _padding[7];
|
||||
};
|
||||
|
||||
struct test_arg_regptr {
|
||||
u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR */
|
||||
u8 reg;
|
||||
u8 _padding[2];
|
||||
u32 val;
|
||||
};
|
||||
|
||||
struct test_arg_mem {
|
||||
u8 type; /* ARG_TYPE_MEM */
|
||||
u8 index;
|
||||
u8 _padding[2];
|
||||
u32 val;
|
||||
};
|
||||
|
||||
struct test_arg_end {
|
||||
u8 type; /* ARG_TYPE_END */
|
||||
u8 flags; /* ARG_FLAG_x */
|
||||
u16 code_offset;
|
||||
u16 branch_offset;
|
||||
u16 end_offset;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Building blocks for test cases.
|
||||
*
|
||||
* Each test case is wrapped between TESTCASE_START and TESTCASE_END.
|
||||
*
|
||||
* To specify arguments for a test case the TEST_ARG_{REG,PTR,MEM} macros are
|
||||
* used followed by a terminating TEST_ARG_END.
|
||||
*
|
||||
* After this, the instruction to be tested is defined with TEST_INSTRUCTION.
|
||||
* Or for branches, TEST_BRANCH_B and TEST_BRANCH_F (branch forwards/backwards).
|
||||
*
|
||||
* Some specific test cases may make use of other custom constructs.
|
||||
*/
|
||||
|
||||
#if VERBOSE
|
||||
#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define verbose(fmt, ...)
|
||||
#endif
|
||||
|
||||
#define TEST_GROUP(title) \
|
||||
verbose("\n"); \
|
||||
verbose(title"\n"); \
|
||||
verbose("---------------------------------------------------------\n");
|
||||
|
||||
#define TESTCASE_START(title) \
|
||||
__asm__ __volatile__ ( \
|
||||
"bl __kprobes_test_case_start \n\t" \
|
||||
/* don't use .asciz here as 'title' may be */ \
|
||||
/* multiple strings to be concatenated. */ \
|
||||
".ascii "#title" \n\t" \
|
||||
".byte 0 \n\t" \
|
||||
".align 2 \n\t"
|
||||
|
||||
#define TEST_ARG_REG(reg, val) \
|
||||
".byte "__stringify(ARG_TYPE_REG)" \n\t" \
|
||||
".byte "#reg" \n\t" \
|
||||
".short 0 \n\t" \
|
||||
".word "#val" \n\t"
|
||||
|
||||
#define TEST_ARG_PTR(reg, val) \
|
||||
".byte "__stringify(ARG_TYPE_PTR)" \n\t" \
|
||||
".byte "#reg" \n\t" \
|
||||
".short 0 \n\t" \
|
||||
".word "#val" \n\t"
|
||||
|
||||
#define TEST_ARG_MEM(index, val) \
|
||||
".byte "__stringify(ARG_TYPE_MEM)" \n\t" \
|
||||
".byte "#index" \n\t" \
|
||||
".short 0 \n\t" \
|
||||
".word "#val" \n\t"
|
||||
|
||||
#define TEST_ARG_END(flags) \
|
||||
".byte "__stringify(ARG_TYPE_END)" \n\t" \
|
||||
".byte "TEST_ISA flags" \n\t" \
|
||||
".short 50f-0f \n\t" \
|
||||
".short 2f-0f \n\t" \
|
||||
".short 99f-0f \n\t" \
|
||||
".code "TEST_ISA" \n\t" \
|
||||
"0: \n\t"
|
||||
|
||||
#define TEST_INSTRUCTION(instruction) \
|
||||
"50: nop \n\t" \
|
||||
"1: "instruction" \n\t" \
|
||||
" nop \n\t"
|
||||
|
||||
#define TEST_BRANCH_F(instruction, xtra_dist) \
|
||||
TEST_INSTRUCTION(instruction) \
|
||||
".if "#xtra_dist" \n\t" \
|
||||
" b 99f \n\t" \
|
||||
".space "#xtra_dist" \n\t" \
|
||||
".endif \n\t" \
|
||||
" b 99f \n\t" \
|
||||
"2: nop \n\t"
|
||||
|
||||
#define TEST_BRANCH_B(instruction, xtra_dist) \
|
||||
" b 50f \n\t" \
|
||||
" b 99f \n\t" \
|
||||
"2: nop \n\t" \
|
||||
" b 99f \n\t" \
|
||||
".if "#xtra_dist" \n\t" \
|
||||
".space "#xtra_dist" \n\t" \
|
||||
".endif \n\t" \
|
||||
TEST_INSTRUCTION(instruction)
|
||||
|
||||
#define TESTCASE_END \
|
||||
"2: \n\t" \
|
||||
"99: \n\t" \
|
||||
" bl __kprobes_test_case_end_"TEST_ISA" \n\t" \
|
||||
".code "NORMAL_ISA" \n\t" \
|
||||
: : \
|
||||
: "r0", "r1", "r2", "r3", "ip", "lr", "memory", "cc" \
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* Macros to define test cases.
|
||||
*
|
||||
* Those of the form TEST_{R,P,M}* can be used to define test cases
|
||||
* which take combinations of the three basic types of arguments. E.g.
|
||||
*
|
||||
* TEST_R One register argument
|
||||
* TEST_RR Two register arguments
|
||||
* TEST_RPR A register, a pointer, then a register argument
|
||||
*
|
||||
* For testing instructions which may branch, there are macros TEST_BF_*
|
||||
* and TEST_BB_* for branching forwards and backwards.
|
||||
*
|
||||
* TEST_SUPPORTED and TEST_UNSUPPORTED don't cause the code to be executed,
|
||||
* the just verify that a kprobe is or is not allowed on the given instruction.
|
||||
*/
|
||||
|
||||
#define TEST(code) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_UNSUPPORTED(code) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("|"__stringify(ARG_FLAG_UNSUPPORTED)) \
|
||||
TEST_INSTRUCTION(code) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_SUPPORTED(code) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("|"__stringify(ARG_FLAG_SUPPORTED)) \
|
||||
TEST_INSTRUCTION(code) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_R(code1, reg, val, code2) \
|
||||
TESTCASE_START(code1 #reg code2) \
|
||||
TEST_ARG_REG(reg, val) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg code2) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RR(code1, reg1, val1, code2, reg2, val2, code3) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_REG(reg3, val3) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RRRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4, reg4, val4) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_REG(reg3, val3) \
|
||||
TEST_ARG_REG(reg4, val4) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_P(code1, reg1, val1, code2) \
|
||||
TESTCASE_START(code1 #reg1 code2) \
|
||||
TEST_ARG_PTR(reg1, val1) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_PR(code1, reg1, val1, code2, reg2, val2, code3) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
|
||||
TEST_ARG_PTR(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RP(code1, reg1, val1, code2, reg2, val2, code3) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_PTR(reg2, val2) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_PRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TEST_ARG_PTR(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_REG(reg3, val3) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RPR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_PTR(reg2, val2) \
|
||||
TEST_ARG_REG(reg3, val3) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RRP(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_PTR(reg3, val3) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BF_P(code1, reg1, val1, code2) \
|
||||
TESTCASE_START(code1 #reg1 code2) \
|
||||
TEST_ARG_PTR(reg1, val1) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_F(code1 #reg1 code2, 0) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BF_X(code, xtra_dist) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_F(code, xtra_dist) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BB_X(code, xtra_dist) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_B(code, xtra_dist) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
|
||||
TESTCASE_START(code1 #reg code2) \
|
||||
TEST_ARG_REG(reg, val) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
|
||||
TESTCASE_START(code1 #reg code2) \
|
||||
TEST_ARG_REG(reg, val) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_BF(code) TEST_BF_X(code, 0)
|
||||
#define TEST_BB(code) TEST_BB_X(code, 0)
|
||||
|
||||
#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
|
||||
#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
|
||||
|
||||
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_X(code, codex) \
|
||||
TESTCASE_START(code) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code) \
|
||||
" b 99f \n\t" \
|
||||
" "codex" \n\t" \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RX(code1, reg, val, code2, codex) \
|
||||
TESTCASE_START(code1 #reg code2) \
|
||||
TEST_ARG_REG(reg, val) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 __stringify(reg) code2) \
|
||||
" b 99f \n\t" \
|
||||
" "codex" \n\t" \
|
||||
TESTCASE_END
|
||||
|
||||
#define TEST_RRX(code1, reg1, val1, code2, reg2, val2, code3, codex) \
|
||||
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
|
||||
TEST_ARG_REG(reg1, val1) \
|
||||
TEST_ARG_REG(reg2, val2) \
|
||||
TEST_ARG_END("") \
|
||||
TEST_INSTRUCTION(code1 __stringify(reg1) code2 __stringify(reg2) code3) \
|
||||
" b 99f \n\t" \
|
||||
" "codex" \n\t" \
|
||||
TESTCASE_END
|
||||
|
||||
|
||||
/* Various values used in test cases... */
|
||||
#define N(val) (val ^ 0xffffffff)
|
||||
#define VAL1 0x12345678
|
||||
#define VAL2 N(VAL1)
|
||||
#define VAL3 0xa5f801
|
||||
#define VAL4 N(VAL3)
|
||||
#define VALM 0x456789ab
|
||||
#define VALR 0xdeaddead
|
||||
#define HH1 0x0123fecb
|
||||
#define HH2 0xa9874567
|
||||
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
void kprobe_thumb16_test_cases(void);
|
||||
void kprobe_thumb32_test_cases(void);
|
||||
#else
|
||||
void kprobe_arm_test_cases(void);
|
||||
#endif
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "kprobes.h"
|
||||
|
||||
|
@ -943,6 +944,9 @@ const union decode_item kprobe_decode_thumb32_table[] = {
|
|||
*/
|
||||
DECODE_END
|
||||
};
|
||||
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
|
||||
EXPORT_SYMBOL_GPL(kprobe_decode_thumb32_table);
|
||||
#endif
|
||||
|
||||
static void __kprobes
|
||||
t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
|
||||
|
@ -1423,6 +1427,9 @@ const union decode_item kprobe_decode_thumb16_table[] = {
|
|||
|
||||
DECODE_END
|
||||
};
|
||||
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
|
||||
EXPORT_SYMBOL_GPL(kprobe_decode_thumb16_table);
|
||||
#endif
|
||||
|
||||
static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
|
||||
{
|
||||
|
|
|
@ -413,6 +413,14 @@ struct decode_reject {
|
|||
DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
|
||||
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
extern const union decode_item kprobe_decode_thumb16_table[];
|
||||
extern const union decode_item kprobe_decode_thumb32_table[];
|
||||
#else
|
||||
extern const union decode_item kprobe_decode_arm_table[];
|
||||
#endif
|
||||
|
||||
|
||||
int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
|
||||
const union decode_item *table, bool thumb16);
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/hardware/entry-macro-gic.S>
|
||||
|
||||
.macro disable_fiq
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include <linux/io.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
static inline void arch_idle(void)
|
||||
{
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
*/
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/cns3xxx.h>
|
||||
|
||||
#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
|
||||
|
|
|
@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
|
|||
return &cns3xxx_pcie[root->domain];
|
||||
}
|
||||
|
||||
static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev)
|
||||
static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
|
||||
{
|
||||
return sysdata_to_cnspci(dev->sysdata);
|
||||
}
|
||||
|
|
|
@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MTD
|
||||
static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
|
||||
{
|
||||
char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
|
||||
size_t retlen;
|
||||
|
||||
if (!strcmp(mtd->name, "MAC-Address")) {
|
||||
mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
|
||||
if (retlen == ETH_ALEN)
|
||||
pr_info("Read MAC addr from SPI Flash: %pM\n",
|
||||
mac_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static struct mtd_notifier da850evm_spi_notifier = {
|
||||
.add = da850_evm_m25p80_notify_add,
|
||||
};
|
||||
|
||||
static void da850_evm_setup_mac_addr(void)
|
||||
{
|
||||
register_mtd_user(&da850evm_spi_notifier);
|
||||
}
|
||||
#else
|
||||
static void da850_evm_setup_mac_addr(void) { }
|
||||
#endif
|
||||
|
||||
static struct mtd_partition da850_evm_norflash_partition[] = {
|
||||
{
|
||||
.name = "bootloaders + env",
|
||||
|
@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void)
|
|||
if (ret)
|
||||
pr_warning("da850_evm_init: sata registration failed: %d\n",
|
||||
ret);
|
||||
|
||||
da850_evm_setup_mac_addr();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||||
|
|
|
@ -243,7 +243,7 @@
|
|||
#define PSC_STATE_DISABLE 2
|
||||
#define PSC_STATE_ENABLE 3
|
||||
|
||||
#define MDSTAT_STATE_MASK 0x1f
|
||||
#define MDSTAT_STATE_MASK 0x3f
|
||||
#define MDCTL_FORCE BIT(31)
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
|
|
@ -217,7 +217,11 @@ ddr2clk_stop_done:
|
|||
ENDPROC(davinci_ddr_psc_config)
|
||||
|
||||
CACHE_FLUSH:
|
||||
.word arm926_flush_kern_cache_all
|
||||
#ifdef CONFIG_CPU_V6
|
||||
.word v6_flush_kern_cache_all
|
||||
#else
|
||||
.word arm926_flush_kern_cache_all
|
||||
#endif
|
||||
|
||||
ENTRY(davinci_cpu_suspend_sz)
|
||||
.word . - davinci_cpu_suspend
|
||||
|
|
|
@ -337,15 +337,15 @@ static unsigned long timer_reload;
|
|||
static void integrator_clocksource_init(u32 khz)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
|
||||
u32 ctrl = TIMER_CTRL_ENABLE;
|
||||
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
|
||||
|
||||
if (khz >= 1500) {
|
||||
khz /= 16;
|
||||
ctrl = TIMER_CTRL_DIV16;
|
||||
ctrl |= TIMER_CTRL_DIV16;
|
||||
}
|
||||
|
||||
writel(ctrl, base + TIMER_CTRL);
|
||||
writel(0xffff, base + TIMER_LOAD);
|
||||
writel(ctrl, base + TIMER_CTRL);
|
||||
|
||||
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
|
||||
khz * 1000, 200, 16, clocksource_mmio_readl_down);
|
||||
|
|
|
@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = {
|
|||
.name = "gpt12_fck",
|
||||
.ops = &clkops_null,
|
||||
.parent = &secure_32k_fck,
|
||||
.clkdm_name = "wkup_clkdm",
|
||||
.recalc = &followparent_recalc,
|
||||
};
|
||||
|
||||
|
@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = {
|
|||
.name = "wdt1_fck",
|
||||
.ops = &clkops_null,
|
||||
.parent = &secure_32k_fck,
|
||||
.clkdm_name = "wkup_clkdm",
|
||||
.recalc = &followparent_recalc,
|
||||
};
|
||||
|
||||
|
|
|
@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void)
|
|||
} else if (cpu_is_omap446x()) {
|
||||
cpu_mask = RATE_IN_4460;
|
||||
cpu_clkflg = CK_446X;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
clk_init(&omap2_clk_functions);
|
||||
omap2_clk_disable_clkdm_control();
|
||||
|
||||
/*
|
||||
* Must stay commented until all OMAP SoC drivers are
|
||||
* converted to runtime PM, or drivers may start crashing
|
||||
*
|
||||
* omap2_clk_disable_clkdm_control();
|
||||
*/
|
||||
|
||||
for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
|
||||
c++)
|
||||
|
|
|
@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm)
|
|||
spin_lock_irqsave(&clkdm->lock, flags);
|
||||
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
|
||||
ret = arch_clkdm->clkdm_wakeup(clkdm);
|
||||
ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
|
||||
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
|
|||
spin_lock_irqsave(&clkdm->lock, flags);
|
||||
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
|
||||
arch_clkdm->clkdm_deny_idle(clkdm);
|
||||
pwrdm_state_switch(clkdm->pwrdm.ptr);
|
||||
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
|
|||
.pa_end = OMAP243X_HS_BASE + SZ_4K - 1,
|
||||
.flags = ADDR_TYPE_RT
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* l4_core ->usbhsotg interface */
|
||||
|
|
|
@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
|
|||
} else {
|
||||
hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
|
||||
clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
|
||||
pwrdm_wait_transition(pwrdm);
|
||||
sleep_switch = FORCEWAKEUP_SWITCH;
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
|
|||
return ret;
|
||||
}
|
||||
|
||||
pwrdm_wait_transition(pwrdm);
|
||||
pwrdm_state_switch(pwrdm);
|
||||
err:
|
||||
return ret;
|
||||
|
|
|
@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
|
|||
|
||||
/**
|
||||
* pwrdm_init - set up the powerdomain layer
|
||||
* @pwrdm_list: array of struct powerdomain pointers to register
|
||||
* @pwrdms: array of struct powerdomain pointers to register
|
||||
* @custom_funcs: func pointers for arch specific implementations
|
||||
*
|
||||
* Loop through the array of powerdomains @pwrdm_list, registering all
|
||||
* that are available on the current CPU. If pwrdm_list is supplied
|
||||
* and not null, all of the referenced powerdomains will be
|
||||
* registered. No return value. XXX pwrdm_list is not really a
|
||||
* "list"; it is an array. Rename appropriately.
|
||||
* Loop through the array of powerdomains @pwrdms, registering all
|
||||
* that are available on the current CPU. Also, program all
|
||||
* powerdomain target state as ON; this is to prevent domains from
|
||||
* hitting low power states (if bootloader has target states set to
|
||||
* something other than ON) and potentially even losing context while
|
||||
* PM is not fully initialized. The PM late init code can then program
|
||||
* the desired target state for all the power domains. No return
|
||||
* value.
|
||||
*/
|
||||
void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs)
|
||||
void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs)
|
||||
{
|
||||
struct powerdomain **p = NULL;
|
||||
struct powerdomain *temp_p;
|
||||
|
||||
if (!custom_funcs)
|
||||
WARN(1, "powerdomain: No custom pwrdm functions registered\n");
|
||||
else
|
||||
arch_pwrdm = custom_funcs;
|
||||
|
||||
if (pwrdm_list) {
|
||||
for (p = pwrdm_list; *p; p++)
|
||||
if (pwrdms) {
|
||||
for (p = pwrdms; *p; p++)
|
||||
_pwrdm_register(*p);
|
||||
}
|
||||
|
||||
list_for_each_entry(temp_p, &pwrdm_list, node)
|
||||
pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void)
|
|||
|
||||
static struct of_device_id clkc_ids[] = {
|
||||
{ .compatible = "sirf,prima2-clkc" },
|
||||
{},
|
||||
};
|
||||
|
||||
void __init sirfsoc_of_clk_init(void)
|
||||
|
|
|
@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void)
|
|||
|
||||
static struct of_device_id intc_ids[] = {
|
||||
{ .compatible = "sirf,prima2-intc" },
|
||||
{},
|
||||
};
|
||||
|
||||
void __init sirfsoc_of_irq_init(void)
|
||||
|
|
|
@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock);
|
|||
|
||||
static struct of_device_id rstc_ids[] = {
|
||||
{ .compatible = "sirf,prima2-rstc" },
|
||||
{},
|
||||
};
|
||||
|
||||
static int __init sirfsoc_of_rstc_init(void)
|
||||
|
|
|
@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void)
|
|||
|
||||
static struct of_device_id timer_ids[] = {
|
||||
{ .compatible = "sirf,prima2-tick" },
|
||||
{},
|
||||
};
|
||||
|
||||
static void __init sirfsoc_of_timer_map(void)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
cmp \tmp, # 0x5600 @ Is it ldrsb?
|
||||
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
|
||||
tst \tmp, #1 << 11 @ L = 0 -> write
|
||||
orreq \psr, \psr, #1 << 11 @ yes.
|
||||
orreq \fsr, \fsr, #1 << 11 @ yes.
|
||||
b do_DataAbort
|
||||
not_thumb:
|
||||
.endm
|
||||
|
|
|
@ -277,6 +277,25 @@ static void l2x0_disable(void)
|
|||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void __init l2x0_unlock(__u32 cache_id)
|
||||
{
|
||||
int lockregs;
|
||||
int i;
|
||||
|
||||
if (cache_id == L2X0_CACHE_ID_PART_L310)
|
||||
lockregs = 8;
|
||||
else
|
||||
/* L210 and unknown types */
|
||||
lockregs = 1;
|
||||
|
||||
for (i = 0; i < lockregs; i++) {
|
||||
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
|
||||
i * L2X0_LOCKDOWN_STRIDE);
|
||||
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
|
||||
i * L2X0_LOCKDOWN_STRIDE);
|
||||
}
|
||||
}
|
||||
|
||||
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
|
||||
{
|
||||
__u32 aux;
|
||||
|
@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
|
|||
* accessing the below registers will fault.
|
||||
*/
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
|
||||
/* Make sure that I&D is not locked down when starting */
|
||||
l2x0_unlock(cache_id);
|
||||
|
||||
/* l2x0 controller is disabled */
|
||||
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
|
||||
|
|
|
@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
|
|||
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return memblock_is_memory(pfn << PAGE_SHIFT);
|
||||
return memblock_is_memory(__pfn_to_phys(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL(pfn_valid);
|
||||
#endif
|
||||
|
|
|
@ -615,6 +615,9 @@ static int _od_resume_noirq(struct device *dev)
|
|||
|
||||
return pm_generic_resume_noirq(dev);
|
||||
}
|
||||
#else
|
||||
#define _od_suspend_noirq NULL
|
||||
#define _od_resume_noirq NULL
|
||||
#endif
|
||||
|
||||
static struct dev_pm_domain omap_device_pm_domain = {
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
|
||||
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
|
|||
void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs);
|
||||
int or1k_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs);
|
||||
void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs);
|
||||
void or1k_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
|
|||
debug_dma_unmap_page(dev, addr, size, dir, true);
|
||||
}
|
||||
|
||||
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
int i, ents;
|
||||
struct scatterlist *s;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
kmemcheck_mark_initialized(sg_virt(s), s->length);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
ents = or1k_map_sg(dev, sg, nents, dir, NULL);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
|
||||
return ents;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
or1k_unmap_sg(dev, sg, nents, dir, NULL);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(page_address(page) + offset, size);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = or1k_map_page(dev, page, offset, size, dir, NULL);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
or1k_unmap_page(dev, addr, size, dir, NULL);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, true);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
|||
static inline int dma_supported(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
/* Support 32 bit DMA mask exclusively */
|
||||
return dma_mask == 0xffffffffULL;
|
||||
return dma_mask == DMA_BIT_MASK(32);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
|
|
|
@ -23,16 +23,11 @@
|
|||
|
||||
/* This struct is saved by setup_frame in signal.c, to keep the current
|
||||
context while a signal handler is executed. It's restored by sys_sigreturn.
|
||||
|
||||
To keep things simple, we use pt_regs here even though normally you just
|
||||
specify the list of regs to save. Then we can use copy_from_user on the
|
||||
entire regs instead of a bunch of get_user's as well...
|
||||
*/
|
||||
|
||||
struct sigcontext {
|
||||
struct pt_regs regs; /* needs to be first */
|
||||
struct user_regs_struct regs; /* needs to be first */
|
||||
unsigned long oldmask;
|
||||
unsigned long usp; /* usp before stacking this gunk on it */
|
||||
};
|
||||
|
||||
#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
|
||||
|
|
|
@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|||
/* Nothing special to do here... */
|
||||
}
|
||||
|
||||
int or1k_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
|
||||
s->length, dir, NULL);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void or1k_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@ -187,5 +214,4 @@ static int __init dma_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(dma_init);
|
||||
|
|
|
@ -52,31 +52,25 @@ struct rt_sigframe {
|
|||
static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
|
||||
{
|
||||
unsigned int err = 0;
|
||||
unsigned long old_usp;
|
||||
|
||||
/* Alwys make any pending restarted system call return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
/* restore the regs from &sc->regs (same as sc, since regs is first)
|
||||
/*
|
||||
* Restore the regs from &sc->regs.
|
||||
* (sc is already checked for VERIFY_READ since the sigframe was
|
||||
* checked in sys_sigreturn previously)
|
||||
*/
|
||||
|
||||
if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
|
||||
if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long)))
|
||||
goto badframe;
|
||||
|
||||
/* make sure the SM-bit is cleared so user-mode cannot fool us */
|
||||
regs->sr &= ~SPR_SR_SM;
|
||||
|
||||
/* restore the old USP as it was before we stacked the sc etc.
|
||||
* (we cannot just pop the sigcontext since we aligned the sp and
|
||||
* stuff after pushing it)
|
||||
*/
|
||||
|
||||
err |= __get_user(old_usp, &sc->usp);
|
||||
|
||||
regs->sp = old_usp;
|
||||
|
||||
/* TODO: the other ports use regs->orig_XX to disable syscall checks
|
||||
* after this completes, but we don't use that mechanism. maybe we can
|
||||
* use it now ?
|
||||
|
@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
|||
unsigned long mask)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned long usp = regs->sp;
|
||||
|
||||
/* copy the regs. they are first in sc so we can use sc directly */
|
||||
/* copy the regs */
|
||||
|
||||
err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
|
||||
err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long));
|
||||
err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long));
|
||||
|
||||
/* then some other stuff */
|
||||
|
||||
err |= __put_user(mask, &sc->oldmask);
|
||||
|
||||
err |= __put_user(usp, &sc->usp);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -387,7 +387,7 @@
|
|||
#size-cells = <1>;
|
||||
compatible = "cfi-flash";
|
||||
reg = <0x0 0x0 0x02000000>;
|
||||
bank-width = <1>;
|
||||
bank-width = <2>;
|
||||
device-width = <1>;
|
||||
partition@0 {
|
||||
label = "ramdisk";
|
||||
|
|
|
@ -171,3 +171,4 @@ CONFIG_CRYPTO_SHA256=y
|
|||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_CRYPTO_AES=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRYPTO_DEV_FSL_CAAM=y
|
||||
|
|
|
@ -185,3 +185,4 @@ CONFIG_CRYPTO_SHA256=y
|
|||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_CRYPTO_AES=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRYPTO_DEV_FSL_CAAM=y
|
||||
|
|
|
@ -100,5 +100,8 @@ CONFIG_DEBUG_INFO=y
|
|||
CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_VIRQ_DEBUG=y
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_SHA256=y
|
||||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_CRYPTO_AES=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRYPTO_DEV_TALITOS=y
|
||||
CONFIG_CRYPTO_DEV_FSL_CAAM=y
|
||||
|
|
|
@ -139,6 +139,7 @@ CONFIG_SND=y
|
|||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_PPC is not set
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_SND_SOC=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
|
|
|
@ -140,6 +140,7 @@ CONFIG_SND=y
|
|||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_PPC is not set
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_SND_SOC=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
|
|
|
@ -440,8 +440,14 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
cap |= AV_SPARC_VIS;
|
||||
if (tlb_type == cheetah || tlb_type == cheetah_plus)
|
||||
cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
|
||||
if (tlb_type == cheetah_plus)
|
||||
cap |= AV_SPARC_POPC;
|
||||
if (tlb_type == cheetah_plus) {
|
||||
unsigned long impl, ver;
|
||||
|
||||
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
|
||||
impl = ((ver >> 32) & 0xffff);
|
||||
if (impl == PANTHER_IMPL)
|
||||
cap |= AV_SPARC_POPC;
|
||||
}
|
||||
if (tlb_type == hypervisor) {
|
||||
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
|
||||
cap |= AV_SPARC_ASI_BLK_INIT;
|
||||
|
|
|
@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
|||
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
|
||||
#elif defined(__x86_64__)
|
||||
__asm__ (
|
||||
"mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
|
||||
"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
|
||||
: [lo]"=a"(product),
|
||||
[hi]"=d"(tmp)
|
||||
: "0"(delta),
|
||||
|
|
|
@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (perf_callchain_user32(regs, entry))
|
||||
return;
|
||||
|
||||
|
|
|
@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
|
|||
*/
|
||||
if (bus) {
|
||||
struct pci_bus *child;
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child, child->self->pcie_mpss);
|
||||
list_for_each_entry(child, &bus->children, node) {
|
||||
struct pci_dev *self = child->self;
|
||||
if (!self)
|
||||
continue;
|
||||
|
||||
pcie_bus_configure_settings(child, self->pcie_mpss);
|
||||
}
|
||||
}
|
||||
|
||||
if (!bus)
|
||||
|
|
|
@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
|
|||
PFN_UP(start_pci), PFN_DOWN(last));
|
||||
return identity;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_get_max_pages(void)
|
||||
{
|
||||
unsigned long max_pages = MAX_DOMAIN_PAGES;
|
||||
domid_t domid = DOMID_SELF;
|
||||
int ret;
|
||||
|
||||
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
|
||||
if (ret > 0)
|
||||
max_pages = ret;
|
||||
return min(max_pages, MAX_DOMAIN_PAGES);
|
||||
}
|
||||
|
||||
/**
|
||||
* machine_specific_memory_setup - Hook for machine specific memory setup.
|
||||
**/
|
||||
|
@ -292,6 +305,12 @@ char * __init xen_memory_setup(void)
|
|||
|
||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||
|
||||
extra_limit = xen_get_max_pages();
|
||||
if (extra_limit >= max_pfn)
|
||||
extra_pages = extra_limit - max_pfn;
|
||||
else
|
||||
extra_pages = 0;
|
||||
|
||||
extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <xen/page.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
#include <xen/hvc-console.h>
|
||||
#include "xen-ops.h"
|
||||
#include "mmu.h"
|
||||
|
||||
|
@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|||
unsigned cpu;
|
||||
unsigned int i;
|
||||
|
||||
if (skip_ioapic_setup) {
|
||||
char *m = (max_cpus == 0) ?
|
||||
"The nosmp parameter is incompatible with Xen; " \
|
||||
"use Xen dom0_max_vcpus=1 parameter" :
|
||||
"The noapic parameter is incompatible with Xen";
|
||||
|
||||
xen_raw_printk(m);
|
||||
panic(m);
|
||||
}
|
||||
xen_init_lock_cpu(0);
|
||||
|
||||
smp_store_cpu_info(0);
|
||||
|
|
|
@ -113,11 +113,13 @@ xen_iret_start_crit:
|
|||
|
||||
/*
|
||||
* If there's something pending, mask events again so we can
|
||||
* jump back into xen_hypervisor_callback
|
||||
* jump back into xen_hypervisor_callback. Otherwise do not
|
||||
* touch XEN_vcpu_info_mask.
|
||||
*/
|
||||
sete XEN_vcpu_info_mask(%eax)
|
||||
jne 1f
|
||||
movb $1, XEN_vcpu_info_mask(%eax)
|
||||
|
||||
popl %eax
|
||||
1: popl %eax
|
||||
|
||||
/*
|
||||
* From this point on the registers are restored and the stack
|
||||
|
|
|
@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
|
|||
map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
|
||||
if (map->work_buf == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_bus;
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
return map;
|
||||
|
||||
err_bus:
|
||||
module_put(map->bus->owner);
|
||||
err_map:
|
||||
kfree(map);
|
||||
err:
|
||||
|
@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
|
|||
void regmap_exit(struct regmap *map)
|
||||
{
|
||||
kfree(map->work_buf);
|
||||
module_put(map->bus->owner);
|
||||
kfree(map);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_exit);
|
||||
|
|
|
@ -174,8 +174,10 @@ struct d40_base;
|
|||
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
|
||||
* transfer and call client callback.
|
||||
* @client: Cliented owned descriptor list.
|
||||
* @pending_queue: Submitted jobs, to be issued by issue_pending()
|
||||
* @active: Active descriptor.
|
||||
* @queue: Queued jobs.
|
||||
* @prepare_queue: Prepared jobs.
|
||||
* @dma_cfg: The client configuration of this dma channel.
|
||||
* @configured: whether the dma_cfg configuration is valid
|
||||
* @base: Pointer to the device instance struct.
|
||||
|
@ -203,6 +205,7 @@ struct d40_chan {
|
|||
struct list_head pending_queue;
|
||||
struct list_head active;
|
||||
struct list_head queue;
|
||||
struct list_head prepare_queue;
|
||||
struct stedma40_chan_cfg dma_cfg;
|
||||
bool configured;
|
||||
struct d40_base *base;
|
||||
|
@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
|
|||
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node)
|
||||
if (async_tx_test_ack(&d->txd)) {
|
||||
d40_pool_lli_free(d40c, d);
|
||||
d40_desc_remove(d);
|
||||
desc = d;
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
|
|||
return d;
|
||||
}
|
||||
|
||||
/* remove desc from current queue and add it to the pending_queue */
|
||||
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
|
||||
{
|
||||
d40_desc_remove(desc);
|
||||
desc->is_in_client_list = false;
|
||||
list_add_tail(&desc->node, &d40c->pending_queue);
|
||||
}
|
||||
|
||||
|
@ -803,6 +808,7 @@ done:
|
|||
static void d40_term_all(struct d40_chan *d40c)
|
||||
{
|
||||
struct d40_desc *d40d;
|
||||
struct d40_desc *_d;
|
||||
|
||||
/* Release active descriptors */
|
||||
while ((d40d = d40_first_active_get(d40c))) {
|
||||
|
@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
|
|||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
/* Release client owned descriptors */
|
||||
if (!list_empty(&d40c->client))
|
||||
list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
/* Release descriptors in prepare queue */
|
||||
if (!list_empty(&d40c->prepare_queue))
|
||||
list_for_each_entry_safe(d40d, _d,
|
||||
&d40c->prepare_queue, node) {
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
d40c->pending_tx = 0;
|
||||
d40c->busy = false;
|
||||
}
|
||||
|
@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
|
|||
|
||||
if (!d40d->cyclic) {
|
||||
if (async_tx_test_ack(&d40d->txd)) {
|
||||
d40_pool_lli_free(d40c, d40d);
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
} else {
|
||||
|
@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
u32 event;
|
||||
struct d40_phy_res *phy = d40c->phy_chan;
|
||||
bool is_src;
|
||||
struct d40_desc *d;
|
||||
struct d40_desc *_d;
|
||||
|
||||
|
||||
/* Terminate all queued and active transfers */
|
||||
d40_term_all(d40c);
|
||||
|
||||
/* Release client owned descriptors */
|
||||
if (!list_empty(&d40c->client))
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node) {
|
||||
d40_pool_lli_free(d40c, d);
|
||||
d40_desc_remove(d);
|
||||
d40_desc_free(d40c, d);
|
||||
}
|
||||
|
||||
if (phy == NULL) {
|
||||
chan_err(d40c, "phy == null\n");
|
||||
return -EINVAL;
|
||||
|
@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* add descriptor to the prepare queue in order to be able
|
||||
* to free them later in terminate_all
|
||||
*/
|
||||
list_add_tail(&desc->node, &chan->prepare_queue);
|
||||
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
return &desc->txd;
|
||||
|
@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
|
|||
INIT_LIST_HEAD(&d40c->queue);
|
||||
INIT_LIST_HEAD(&d40c->pending_queue);
|
||||
INIT_LIST_HEAD(&d40c->client);
|
||||
INIT_LIST_HEAD(&d40c->prepare_queue);
|
||||
|
||||
tasklet_init(&d40c->tasklet, dma_tasklet,
|
||||
(unsigned long) d40c);
|
||||
|
|
|
@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
|
|||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_mode_object_put(dev, &connector->base);
|
||||
list_del(&connector->head);
|
||||
dev->mode_config.num_connector--;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_connector_cleanup);
|
||||
|
@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
|
|||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_mode_object_put(dev, &encoder->base);
|
||||
list_del(&encoder->head);
|
||||
dev->mode_config.num_encoder--;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_encoder_cleanup);
|
||||
|
|
|
@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
|
|||
{
|
||||
printk(KERN_ERR "panic occurred, switching back to text console\n");
|
||||
return drm_fb_helper_force_kernel_mode();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_panic);
|
||||
|
||||
|
|
|
@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|||
nouveau_gpuobj_ref(NULL, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
} else
|
||||
if (USE_SEMA(dev)) {
|
||||
/* map fence bo into channel's vm */
|
||||
ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
|
||||
&chan->fence.vma);
|
||||
|
|
|
@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
|
|||
return -ENOMEM;
|
||||
|
||||
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
|
||||
if (!nvbe->ttm_alloced)
|
||||
if (!nvbe->ttm_alloced) {
|
||||
kfree(nvbe->pages);
|
||||
nvbe->pages = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvbe->nr_pages = 0;
|
||||
while (num_pages--) {
|
||||
|
@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
|
|||
|
||||
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
|
||||
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
|
||||
dma_offset += NV_CTXDMA_PAGE_SIZE;
|
||||
offset_l += NV_CTXDMA_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
|
||||
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
struct drm_framebuffer *drm_fb;
|
||||
struct nouveau_framebuffer *fb;
|
||||
int arb_burst, arb_lwm;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
/* no fb bound */
|
||||
if (!atomic && !crtc->fb) {
|
||||
NV_DEBUG_KMS(dev, "No FB bound\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* If atomic, we want to switch to the fb we were passed, so
|
||||
* now we update pointers to do that. (We don't pin; just
|
||||
* assume we're already pinned and update the base address.)
|
||||
|
@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
drm_fb = passed_fb;
|
||||
fb = nouveau_framebuffer(passed_fb);
|
||||
} else {
|
||||
drm_fb = crtc->fb;
|
||||
fb = nouveau_framebuffer(crtc->fb);
|
||||
/* If not atomic, we can go ahead and pin, and unpin the
|
||||
* old fb we were passed.
|
||||
*/
|
||||
|
|
|
@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_channel *evo = nv50_display(dev)->master;
|
||||
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
struct drm_framebuffer *drm_fb;
|
||||
struct nouveau_framebuffer *fb;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
/* no fb bound */
|
||||
if (!atomic && !crtc->fb) {
|
||||
NV_DEBUG_KMS(dev, "No FB bound\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If atomic, we want to switch to the fb we were passed, so
|
||||
* now we update pointers to do that. (We don't pin; just
|
||||
* assume we're already pinned and update the base address.)
|
||||
|
@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
drm_fb = passed_fb;
|
||||
fb = nouveau_framebuffer(passed_fb);
|
||||
} else {
|
||||
drm_fb = crtc->fb;
|
||||
fb = nouveau_framebuffer(crtc->fb);
|
||||
/* If not atomic, we can go ahead and pin, and unpin the
|
||||
* old fb we were passed.
|
||||
*/
|
||||
|
|
|
@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
|
|||
void evergreen_fini(struct radeon_device *rdev);
|
||||
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
|
||||
|
||||
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
|
||||
{
|
||||
u16 ctl, v;
|
||||
int cap, err;
|
||||
|
||||
cap = pci_pcie_cap(rdev->pdev);
|
||||
if (!cap)
|
||||
return;
|
||||
|
||||
err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
|
||||
|
||||
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
|
||||
* to avoid hangs or perfomance issues
|
||||
*/
|
||||
if ((v == 0) || (v == 6) || (v == 7)) {
|
||||
ctl &= ~PCI_EXP_DEVCTL_READRQ;
|
||||
ctl |= (2 << 12);
|
||||
pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
|
||||
}
|
||||
}
|
||||
|
||||
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
|
||||
{
|
||||
/* enable the pflip int */
|
||||
|
@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
|
|||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SX));
|
||||
RREG32(GRBM_SOFT_RESET);
|
||||
mdelay(15);
|
||||
|
@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
|
||||
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
|
||||
|
||||
evergreen_fix_pci_max_read_req_size(rdev);
|
||||
|
||||
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
|
||||
|
||||
cc_gc_shader_pipe_config |=
|
||||
|
|
|
@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
|
|||
extern void evergreen_mc_program(struct radeon_device *rdev);
|
||||
extern void evergreen_irq_suspend(struct radeon_device *rdev);
|
||||
extern int evergreen_mc_init(struct radeon_device *rdev);
|
||||
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
|
||||
|
||||
#define EVERGREEN_PFP_UCODE_SIZE 1120
|
||||
#define EVERGREEN_PM4_UCODE_SIZE 1376
|
||||
|
@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|||
|
||||
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
|
||||
|
||||
evergreen_fix_pci_max_read_req_size(rdev);
|
||||
|
||||
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
|
||||
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
|
||||
|
||||
|
@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
|
|||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SX));
|
||||
RREG32(GRBM_SOFT_RESET);
|
||||
mdelay(15);
|
||||
|
|
|
@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
|
|||
} else {
|
||||
DRM_INFO("Using generic clock info\n");
|
||||
|
||||
/* may need to be per card */
|
||||
rdev->clock.max_pixel_clock = 35000;
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
p1pll->reference_freq = 1432;
|
||||
p2pll->reference_freq = 1432;
|
||||
|
|
|
@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
|
|||
rdev->pdev->subsystem_device == 0x30a4)
|
||||
return;
|
||||
|
||||
/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
|
||||
* - it hangs on resume inside the dynclk 1 table.
|
||||
*/
|
||||
if (rdev->family == CHIP_RS480 &&
|
||||
rdev->pdev->subsystem_vendor == 0x103c &&
|
||||
rdev->pdev->subsystem_device == 0x30ae)
|
||||
return;
|
||||
|
||||
/* DYN CLK 1 */
|
||||
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
|
||||
if (table)
|
||||
|
|
|
@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
|||
if (!radeon_dig_connector->edp_on)
|
||||
atombios_set_edp_panel_power(connector,
|
||||
ATOM_TRANSMITTER_ACTION_POWER_OFF);
|
||||
} else {
|
||||
/* need to setup ddc on the bridge */
|
||||
if (radeon_connector_encoder_is_dp_bridge(connector)) {
|
||||
} else if (radeon_connector_encoder_is_dp_bridge(connector)) {
|
||||
/* DP bridges are always DP */
|
||||
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
|
||||
/* get the DPCD from the bridge */
|
||||
radeon_dp_getdpcd(radeon_connector);
|
||||
|
||||
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
|
||||
ret = connector_status_connected;
|
||||
else {
|
||||
/* need to setup ddc on the bridge */
|
||||
if (encoder)
|
||||
radeon_atom_ext_encoder_setup_ddc(encoder);
|
||||
if (radeon_ddc_probe(radeon_connector,
|
||||
radeon_connector->requires_extended_probe))
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
|
||||
if ((ret == connector_status_disconnected) &&
|
||||
radeon_connector->dac_load_detect) {
|
||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
if (encoder) {
|
||||
encoder_funcs = encoder->helper_private;
|
||||
ret = encoder_funcs->detect(encoder, connector);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
|
||||
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
|
||||
ret = connector_status_connected;
|
||||
|
@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
|||
ret = connector_status_connected;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret == connector_status_disconnected) &&
|
||||
radeon_connector->dac_load_detect) {
|
||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
if (encoder) {
|
||||
encoder_funcs = encoder->helper_private;
|
||||
ret = encoder_funcs->detect(encoder, connector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
|
|
|
@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
|||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
|
||||
radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
|
||||
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
|
||||
|
||||
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
|
||||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
|
||||
}
|
||||
if (!radeon_connector->ddc_bus)
|
||||
return -1;
|
||||
if (!radeon_connector->edid) {
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&dig->dp_i2c_bus->adapter);
|
||||
else if (radeon_connector->ddc_bus && !radeon_connector->edid)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
} else {
|
||||
if (radeon_connector->ddc_bus && !radeon_connector->edid)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
}
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
|
|
|
@ -277,6 +277,7 @@
|
|||
#define USB_DEVICE_ID_PENPOWER 0x00f4
|
||||
|
||||
#define USB_VENDOR_ID_GREENASIA 0x0e8f
|
||||
#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
|
||||
|
||||
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
|
||||
#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
|
||||
|
|
|
@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
|
|||
#define NO_TOUCHES -1
|
||||
#define SINGLE_TOUCH_UP -2
|
||||
|
||||
/* Touch surface information. Dimension is in hundredths of a mm, min and max
|
||||
* are in units. */
|
||||
#define MOUSE_DIMENSION_X (float)9056
|
||||
#define MOUSE_MIN_X -1100
|
||||
#define MOUSE_MAX_X 1258
|
||||
#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
|
||||
#define MOUSE_DIMENSION_Y (float)5152
|
||||
#define MOUSE_MIN_Y -1589
|
||||
#define MOUSE_MAX_Y 2047
|
||||
#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
|
||||
|
||||
#define TRACKPAD_DIMENSION_X (float)13000
|
||||
#define TRACKPAD_MIN_X -2909
|
||||
#define TRACKPAD_MAX_X 3167
|
||||
#define TRACKPAD_RES_X \
|
||||
((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
|
||||
#define TRACKPAD_DIMENSION_Y (float)11000
|
||||
#define TRACKPAD_MIN_Y -2456
|
||||
#define TRACKPAD_MAX_Y 2565
|
||||
#define TRACKPAD_RES_Y \
|
||||
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
|
||||
|
||||
/**
|
||||
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
|
||||
* @input: Input device through which we report events.
|
||||
|
@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
|
|||
* inverse of the reported Y.
|
||||
*/
|
||||
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
|
||||
1358, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
|
||||
2047, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X,
|
||||
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y,
|
||||
MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
|
||||
|
||||
input_abs_set_res(input, ABS_MT_POSITION_X,
|
||||
MOUSE_RES_X);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_Y,
|
||||
MOUSE_RES_Y);
|
||||
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
|
||||
input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
|
||||
input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
|
||||
3167, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
|
||||
2565, 4, 0);
|
||||
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
|
||||
TRACKPAD_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
|
||||
TRACKPAD_MAX_Y, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X,
|
||||
TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y,
|
||||
TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
|
||||
|
||||
input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
|
||||
input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_X,
|
||||
TRACKPAD_RES_X);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_Y,
|
||||
TRACKPAD_RES_Y);
|
||||
}
|
||||
|
||||
input_set_events_per_packet(input, 60);
|
||||
|
@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
|
|||
}
|
||||
report->size = 6;
|
||||
|
||||
/*
|
||||
* Some devices repond with 'invalid report id' when feature
|
||||
* report switching it into multitouch mode is sent to it.
|
||||
*
|
||||
* This results in -EIO from the _raw low-level transport callback,
|
||||
* but there seems to be no other way of switching the mode.
|
||||
* Thus the super-ugly hacky success check below.
|
||||
*/
|
||||
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
|
||||
HID_FEATURE_REPORT);
|
||||
if (ret != sizeof(feature)) {
|
||||
if (ret != -EIO && ret != sizeof(feature)) {
|
||||
hid_err(hdev, "unable to request touch data (%d)\n", ret);
|
||||
goto err_stop_hw;
|
||||
}
|
||||
|
|
|
@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
|
|||
if (ret) {
|
||||
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
|
||||
ret);
|
||||
/*
|
||||
* battery attribute is not critical for the tablet, but if it
|
||||
* failed then there is no need to create ac attribute
|
||||
*/
|
||||
goto move_on;
|
||||
goto err_battery;
|
||||
}
|
||||
|
||||
wdata->ac.properties = wacom_ac_props;
|
||||
|
@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
|
|||
if (ret) {
|
||||
hid_warn(hdev,
|
||||
"can't create ac battery attribute, err: %d\n", ret);
|
||||
/*
|
||||
* ac attribute is not critical for the tablet, but if it
|
||||
* failed then we don't want to battery attribute to exist
|
||||
*/
|
||||
power_supply_unregister(&wdata->battery);
|
||||
goto err_ac;
|
||||
}
|
||||
|
||||
move_on:
|
||||
#endif
|
||||
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
|
||||
input = hidinput->input;
|
||||
|
@ -416,6 +406,13 @@ move_on:
|
|||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
err_ac:
|
||||
power_supply_unregister(&wdata->battery);
|
||||
err_battery:
|
||||
device_remove_file(&hdev->dev, &dev_attr_speed);
|
||||
hid_hw_stop(hdev);
|
||||
#endif
|
||||
err_free:
|
||||
kfree(wdata);
|
||||
return ret;
|
||||
|
@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
|
|||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
struct wacom_data *wdata = hid_get_drvdata(hdev);
|
||||
#endif
|
||||
device_remove_file(&hdev->dev, &dev_attr_speed);
|
||||
hid_hw_stop(hdev);
|
||||
|
||||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
|
|
|
@ -47,6 +47,7 @@ static const struct hid_blacklist {
|
|||
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
|
||||
|
||||
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
|
||||
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
|
||||
|
|
|
@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
|
|||
|
||||
static inline int ADC_TO_CURR(int adc, int gain)
|
||||
{
|
||||
return adc * 1400000 / gain * 255;
|
||||
return adc * 1400000 / (gain * 255);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
|
|||
block_buffer[ret] = '\0';
|
||||
dev_info(&client->dev, "Device ID %s\n", block_buffer);
|
||||
|
||||
mid = NULL;
|
||||
for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
|
||||
mid = &ucd9000_id[i];
|
||||
for (mid = ucd9000_id; mid->name[0]; mid++) {
|
||||
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
|
||||
break;
|
||||
}
|
||||
if (!mid || !strlen(mid->name)) {
|
||||
if (!mid->name[0]) {
|
||||
dev_err(&client->dev, "Unsupported device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
|
|||
block_buffer[ret] = '\0';
|
||||
dev_info(&client->dev, "Device ID %s\n", block_buffer);
|
||||
|
||||
mid = NULL;
|
||||
for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
|
||||
mid = &ucd9200_id[i];
|
||||
for (mid = ucd9200_id; mid->name[0]; mid++) {
|
||||
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
|
||||
break;
|
||||
}
|
||||
if (!mid || !strlen(mid->name)) {
|
||||
if (!mid->name[0]) {
|
||||
dev_err(&client->dev, "Unsupported device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
sds = kzalloc(sizeof(*sds), GFP_KERNEL);
|
||||
if (!sds)
|
||||
if (!sds) {
|
||||
ret = -ENOMEM;
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
|
||||
sds->pdev[i] = add_i2c_device(dev, i);
|
||||
if (IS_ERR(sds->pdev[i])) {
|
||||
ret = PTR_ERR(sds->pdev[i]);
|
||||
while (--i >= 0)
|
||||
platform_device_unregister(sds->pdev[i]);
|
||||
goto err_dev_add;
|
||||
|
|
|
@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
|
|||
|
||||
/* Rounds down to not include partial word at the end of buf */
|
||||
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
|
||||
if (words_to_transfer > tx_fifo_avail)
|
||||
words_to_transfer = tx_fifo_avail;
|
||||
|
||||
i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
|
||||
/* It's very common to have < 4 bytes, so optimize that case. */
|
||||
if (words_to_transfer) {
|
||||
if (words_to_transfer > tx_fifo_avail)
|
||||
words_to_transfer = tx_fifo_avail;
|
||||
|
||||
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
tx_fifo_avail -= words_to_transfer;
|
||||
/*
|
||||
* Update state before writing to FIFO. If this casues us
|
||||
* to finish writing all bytes (AKA buf_remaining goes to 0) we
|
||||
* have a potential for an interrupt (PACKET_XFER_COMPLETE is
|
||||
* not maskable). We need to make sure that the isr sees
|
||||
* buf_remaining as 0 and doesn't call us back re-entrantly.
|
||||
*/
|
||||
buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
tx_fifo_avail -= words_to_transfer;
|
||||
i2c_dev->msg_buf_remaining = buf_remaining;
|
||||
i2c_dev->msg_buf = buf +
|
||||
words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
barrier();
|
||||
|
||||
i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
|
||||
|
||||
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is a partial word at the end of buf, handle it manually to
|
||||
|
@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
|
|||
if (tx_fifo_avail > 0 && buf_remaining > 0) {
|
||||
BUG_ON(buf_remaining > 3);
|
||||
memcpy(&val, buf, buf_remaining);
|
||||
|
||||
/* Again update before writing to FIFO to make sure isr sees. */
|
||||
i2c_dev->msg_buf_remaining = 0;
|
||||
i2c_dev->msg_buf = NULL;
|
||||
barrier();
|
||||
|
||||
i2c_writel(i2c_dev, val, I2C_TX_FIFO);
|
||||
buf_remaining = 0;
|
||||
tx_fifo_avail--;
|
||||
}
|
||||
|
||||
BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
|
||||
i2c_dev->msg_buf_remaining = buf_remaining;
|
||||
i2c_dev->msg_buf = buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
|
|||
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
|
||||
}
|
||||
|
||||
if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
|
||||
!i2c_dev->msg_buf_remaining)
|
||||
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
|
||||
BUG_ON(i2c_dev->msg_buf_remaining);
|
||||
complete(&i2c_dev->msg_complete);
|
||||
}
|
||||
|
||||
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
|
||||
if (i2c_dev->is_dvc)
|
||||
|
@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
|
|||
|
||||
static u32 tegra_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C;
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm tegra_i2c_algo = {
|
||||
|
@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
/* Match table for of_platform binding */
|
||||
static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
|
||||
{ .compatible = "nvidia,tegra20-i2c", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
|
||||
#else
|
||||
#define tegra_i2c_of_match NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver tegra_i2c_driver = {
|
||||
.probe = tegra_i2c_probe,
|
||||
.remove = tegra_i2c_remove,
|
||||
|
@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
|
|||
.driver = {
|
||||
.name = "tegra-i2c",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = tegra_i2c_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
|
|||
* Writes the command to the IOMMUs command buffer and informs the
|
||||
* hardware about the new command.
|
||||
*/
|
||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
static int iommu_queue_command_sync(struct amd_iommu *iommu,
|
||||
struct iommu_cmd *cmd,
|
||||
bool sync)
|
||||
{
|
||||
u32 left, tail, head, next_tail;
|
||||
unsigned long flags;
|
||||
|
@ -639,13 +641,18 @@ again:
|
|||
copy_cmd_to_buffer(iommu, cmd, tail);
|
||||
|
||||
/* We need to sync now to make sure all commands are processed */
|
||||
iommu->need_sync = true;
|
||||
iommu->need_sync = sync;
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
{
|
||||
return iommu_queue_command_sync(iommu, cmd, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function queues a completion wait command into the command
|
||||
* buffer of an IOMMU
|
||||
|
@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
|||
|
||||
build_completion_wait(&cmd, (u64)&sem);
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
ret = iommu_queue_command_sync(iommu, &cmd, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
|
|||
static void domain_flush_devices(struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
list_for_each_entry(dev_data, &domain->dev_list, list)
|
||||
device_flush_dte(dev_data);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
|
|
@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
|
|||
|
||||
struct linear_private_data
|
||||
{
|
||||
struct rcu_head rcu;
|
||||
sector_t array_sectors;
|
||||
dev_info_t disks[0];
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
|
|||
bio->bi_end_io = super_written;
|
||||
|
||||
atomic_inc(&mddev->pending_writes);
|
||||
submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
|
||||
submit_bio(WRITE_FLUSH_FUA, bio);
|
||||
}
|
||||
|
||||
void md_super_wait(mddev_t *mddev)
|
||||
|
@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
|
|||
ret = 0;
|
||||
}
|
||||
rdev->sectors = rdev->sb_start;
|
||||
/* Limit to 4TB as metadata cannot record more than that */
|
||||
if (rdev->sectors >= (2ULL << 32))
|
||||
rdev->sectors = (2ULL << 32) - 2;
|
||||
|
||||
if (rdev->sectors < sb->size * 2 && sb->level > 1)
|
||||
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
|
||||
/* "this cannot possibly happen" ... */
|
||||
ret = -EINVAL;
|
||||
|
||||
|
@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
mddev->clevel[0] = 0;
|
||||
mddev->layout = sb->layout;
|
||||
mddev->raid_disks = sb->raid_disks;
|
||||
mddev->dev_sectors = sb->size * 2;
|
||||
mddev->dev_sectors = ((sector_t)sb->size) * 2;
|
||||
mddev->events = ev1;
|
||||
mddev->bitmap_info.offset = 0;
|
||||
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
|
||||
|
@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
|
|||
rdev->sb_start = calc_dev_sboffset(rdev);
|
||||
if (!num_sectors || num_sectors > rdev->sb_start)
|
||||
num_sectors = rdev->sb_start;
|
||||
/* Limit to 4TB as metadata cannot record more than that.
|
||||
* 4TB == 2^32 KB, or 2*2^32 sectors.
|
||||
*/
|
||||
if (num_sectors >= (2ULL << 32))
|
||||
num_sectors = (2ULL << 32) - 2;
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
rdev->sb_page);
|
||||
md_super_wait(rdev->mddev);
|
||||
|
@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
sb->level = cpu_to_le32(mddev->level);
|
||||
sb->layout = cpu_to_le32(mddev->layout);
|
||||
|
||||
if (test_bit(WriteMostly, &rdev->flags))
|
||||
sb->devflags |= WriteMostly1;
|
||||
else
|
||||
sb->devflags &= ~WriteMostly1;
|
||||
|
||||
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
|
||||
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
|
||||
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
|
||||
|
@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
|
|||
int err = -EINVAL;
|
||||
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
|
||||
md_error(rdev->mddev, rdev);
|
||||
err = 0;
|
||||
if (test_bit(Faulty, &rdev->flags))
|
||||
err = 0;
|
||||
else
|
||||
err = -EBUSY;
|
||||
} else if (cmd_match(buf, "remove")) {
|
||||
if (rdev->raid_disk >= 0)
|
||||
err = -EBUSY;
|
||||
|
@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
|
|||
err = 0;
|
||||
} else if (cmd_match(buf, "-blocked")) {
|
||||
if (!test_bit(Faulty, &rdev->flags) &&
|
||||
test_bit(BlockedBadBlocks, &rdev->flags)) {
|
||||
rdev->badblocks.unacked_exist) {
|
||||
/* metadata handler doesn't understand badblocks,
|
||||
* so we need to fail the device
|
||||
*/
|
||||
|
@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
|
|||
return -ENODEV;
|
||||
|
||||
md_error(mddev, rdev);
|
||||
if (!test_bit(Faulty, &rdev->flags))
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1099,12 +1099,11 @@ read_again:
|
|||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
r1_bio_write_done(r1_bio);
|
||||
|
||||
/* In case raid1d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
/* Mustn't call r1_bio_write_done before this next test,
|
||||
* as it could result in the bio being freed.
|
||||
*/
|
||||
if (sectors_handled < (bio->bi_size >> 9)) {
|
||||
r1_bio_write_done(r1_bio);
|
||||
/* We need another r1_bio. It has already been counted
|
||||
* in bio->bi_phys_segments
|
||||
*/
|
||||
|
@ -1117,6 +1116,11 @@ read_again:
|
|||
goto retry_write;
|
||||
}
|
||||
|
||||
r1_bio_write_done(r1_bio);
|
||||
|
||||
/* In case raid1d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
|
|
|
@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
|
|||
md_write_end(r10_bio->mddev);
|
||||
}
|
||||
|
||||
static void one_write_done(r10bio_t *r10_bio)
|
||||
{
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
if (test_bit(R10BIO_WriteError, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else {
|
||||
close_write(r10_bio);
|
||||
if (test_bit(R10BIO_MadeGood, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void raid10_end_write_request(struct bio *bio, int error)
|
||||
{
|
||||
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
|
@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
|
|||
* Let's see if all mirrored write operations have finished
|
||||
* already.
|
||||
*/
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
if (test_bit(R10BIO_WriteError, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else {
|
||||
close_write(r10_bio);
|
||||
if (test_bit(R10BIO_MadeGood, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
}
|
||||
one_write_done(r10_bio);
|
||||
if (dec_rdev)
|
||||
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
|
||||
}
|
||||
|
@ -1127,20 +1132,12 @@ retry_write:
|
|||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
/* This matches the end of raid10_end_write_request() */
|
||||
bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
|
||||
r10_bio->sectors,
|
||||
!test_bit(R10BIO_Degraded, &r10_bio->state),
|
||||
0);
|
||||
md_write_end(mddev);
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Don't remove the bias on 'remaining' (one_write_done) until
|
||||
* after checking if we need to go around again.
|
||||
*/
|
||||
|
||||
if (sectors_handled < (bio->bi_size >> 9)) {
|
||||
one_write_done(r10_bio);
|
||||
/* We need another r10_bio. It has already been counted
|
||||
* in bio->bi_phys_segments.
|
||||
*/
|
||||
|
@ -1154,6 +1151,10 @@ retry_write:
|
|||
r10_bio->state = 0;
|
||||
goto retry_write;
|
||||
}
|
||||
one_write_done(r10_bio);
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !mddev->bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
|
|
@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
|
||||
finish:
|
||||
/* wait for this device to become unblocked */
|
||||
if (unlikely(s.blocked_rdev))
|
||||
if (conf->mddev->external && unlikely(s.blocked_rdev))
|
||||
md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
|
||||
|
||||
if (s.handle_bad_blocks)
|
||||
|
|
|
@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
|
|||
static int vp7045_usb_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct dvb_usb_device *d;
|
||||
int ret = dvb_usb_device_init(intf, &vp7045_properties,
|
||||
THIS_MODULE, &d, adapter_nr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
d->priv = kmalloc(20, GFP_KERNEL);
|
||||
if (!d->priv) {
|
||||
dvb_usb_device_exit(intf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vp7045_usb_disconnect(struct usb_interface *intf)
|
||||
{
|
||||
struct dvb_usb_device *d = usb_get_intfdata(intf);
|
||||
kfree(d->priv);
|
||||
dvb_usb_device_exit(intf);
|
||||
return dvb_usb_device_init(intf, &vp7045_properties,
|
||||
THIS_MODULE, NULL, adapter_nr);
|
||||
}
|
||||
|
||||
static struct usb_device_id vp7045_usb_table [] = {
|
||||
|
@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
|
|||
static struct dvb_usb_device_properties vp7045_properties = {
|
||||
.usb_ctrl = CYPRESS_FX2,
|
||||
.firmware = "dvb-usb-vp7045-01.fw",
|
||||
.size_of_priv = sizeof(u8 *),
|
||||
.size_of_priv = 20,
|
||||
|
||||
.num_adapters = 1,
|
||||
.adapter = {
|
||||
|
@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
|
|||
static struct usb_driver vp7045_usb_driver = {
|
||||
.name = "dvb_usb_vp7045",
|
||||
.probe = vp7045_usb_probe,
|
||||
.disconnect = vp7045_usb_disconnect,
|
||||
.disconnect = dvb_usb_device_exit,
|
||||
.id_table = vp7045_usb_table,
|
||||
};
|
||||
|
||||
|
|
|
@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
|
|||
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
|
||||
{
|
||||
DEFINE_IR_RAW_EVENT(rawir);
|
||||
unsigned int count;
|
||||
u32 carrier;
|
||||
u8 sample;
|
||||
int i;
|
||||
|
@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
|
|||
if (nvt->carrier_detect_enabled)
|
||||
carrier = nvt_rx_carrier_detect(nvt);
|
||||
|
||||
count = nvt->pkts;
|
||||
nvt_dbg_verbose("Processing buffer of len %d", count);
|
||||
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
|
||||
|
||||
init_ir_raw_event(&rawir);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
nvt->pkts--;
|
||||
for (i = 0; i < nvt->pkts; i++) {
|
||||
sample = nvt->buf[i];
|
||||
|
||||
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
|
||||
rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
|
||||
* SAMPLE_PERIOD);
|
||||
|
||||
if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
|
||||
if (nvt->rawir.pulse == rawir.pulse)
|
||||
nvt->rawir.duration += rawir.duration;
|
||||
else {
|
||||
nvt->rawir.duration = rawir.duration;
|
||||
nvt->rawir.pulse = rawir.pulse;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
nvt_dbg("Storing %s with duration %d",
|
||||
rawir.pulse ? "pulse" : "space", rawir.duration);
|
||||
|
||||
rawir.duration += nvt->rawir.duration;
|
||||
|
||||
init_ir_raw_event(&nvt->rawir);
|
||||
nvt->rawir.duration = 0;
|
||||
nvt->rawir.pulse = rawir.pulse;
|
||||
|
||||
if (sample == BUF_PULSE_BIT)
|
||||
rawir.pulse = false;
|
||||
|
||||
if (rawir.duration) {
|
||||
nvt_dbg("Storing %s with duration %d",
|
||||
rawir.pulse ? "pulse" : "space",
|
||||
rawir.duration);
|
||||
|
||||
ir_raw_event_store_with_filter(nvt->rdev, &rawir);
|
||||
}
|
||||
ir_raw_event_store_with_filter(nvt->rdev, &rawir);
|
||||
|
||||
/*
|
||||
* BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
|
||||
* indicates end of IR signal, but new data incoming. In both
|
||||
* cases, it means we're ready to call ir_raw_event_handle
|
||||
*/
|
||||
if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
|
||||
if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
|
||||
nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
|
||||
ir_raw_event_handle(nvt->rdev);
|
||||
}
|
||||
}
|
||||
|
||||
nvt->pkts = 0;
|
||||
|
||||
nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
|
||||
ir_raw_event_handle(nvt->rdev);
|
||||
|
||||
if (nvt->pkts) {
|
||||
nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
|
||||
nvt->pkts = 0;
|
||||
}
|
||||
|
||||
nvt_dbg_verbose("%s done", __func__);
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
|
|||
|
||||
spin_lock_init(&nvt->nvt_lock);
|
||||
spin_lock_init(&nvt->tx.lock);
|
||||
init_ir_raw_event(&nvt->rawir);
|
||||
|
||||
ret = -EBUSY;
|
||||
/* now claim resources */
|
||||
|
|
|
@ -67,7 +67,6 @@ static int debug;
|
|||
struct nvt_dev {
|
||||
struct pnp_dev *pdev;
|
||||
struct rc_dev *rdev;
|
||||
struct ir_raw_event rawir;
|
||||
|
||||
spinlock_t nvt_lock;
|
||||
|
||||
|
|
|
@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
|
|||
case 0x60:
|
||||
PDEBUG(D_PROBE, "Sensor is a OV7660");
|
||||
sd->sensor = SEN_OV7660;
|
||||
sd->invert_led = 0;
|
||||
break;
|
||||
default:
|
||||
PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
|
||||
|
@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
|
|||
case BRIDGE_OV519:
|
||||
cam->cam_mode = ov519_vga_mode;
|
||||
cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
|
||||
sd->invert_led = !sd->invert_led;
|
||||
break;
|
||||
case BRIDGE_OVFX2:
|
||||
cam->cam_mode = ov519_vga_mode;
|
||||
|
@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
|
|||
/* -- module initialisation -- */
|
||||
static const struct usb_device_id device_table[] = {
|
||||
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
|
||||
{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x405f),
|
||||
{USB_DEVICE(0x041e, 0x4052),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x4064),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x041e, 0x4068),
|
||||
{USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x045e, 0x028c),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x054c, 0x0155),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
|
||||
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
|
||||
{USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x05a9, 0x0519),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x05a9, 0x0530),
|
||||
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
|
||||
{USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
|
||||
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
|
||||
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
|
||||
|
|
|
@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
|
|||
reg_w1(gspca_dev, 0x01, 0x22);
|
||||
msleep(100);
|
||||
reg01 = SCL_SEL_OD | S_PDN_INV;
|
||||
reg17 &= MCK_SIZE_MASK;
|
||||
reg17 &= ~MCK_SIZE_MASK;
|
||||
reg17 |= 0x04; /* clock / 4 */
|
||||
break;
|
||||
}
|
||||
|
@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
|
|||
if (!mode) { /* if 640x480 */
|
||||
reg17 &= ~MCK_SIZE_MASK;
|
||||
reg17 |= 0x04; /* clock / 4 */
|
||||
} else {
|
||||
reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
|
||||
reg17 &= ~MCK_SIZE_MASK;
|
||||
reg17 |= 0x02; /* clock / 2 */
|
||||
}
|
||||
break;
|
||||
case SENSOR_OV7630:
|
||||
|
|
|
@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
|
|||
if (pdev->restore_factory)
|
||||
pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
|
||||
|
||||
if (!pdev->features & FEATURE_MOTOR_PANTILT)
|
||||
if (!(pdev->features & FEATURE_MOTOR_PANTILT))
|
||||
return hdl->error;
|
||||
|
||||
/* Motor pan / tilt / reset */
|
||||
|
|
|
@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
|
|||
struct pci_bus *pbus = pci_find_bus(0, 0);
|
||||
u8 cbyte;
|
||||
|
||||
if (!pbus)
|
||||
return false;
|
||||
pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
|
||||
VIACAM_SERIAL_CREG, &cbyte);
|
||||
if ((cbyte & VIACAM_SERIAL_BIT) == 0)
|
||||
|
|
|
@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
|
|||
if (mrq->done)
|
||||
mrq->done(mrq);
|
||||
|
||||
mmc_host_clk_gate(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
|||
mrq->stop->mrq = mrq;
|
||||
}
|
||||
}
|
||||
mmc_host_clk_ungate(host);
|
||||
mmc_host_clk_hold(host);
|
||||
led_trigger_event(host->led, LED_FULL);
|
||||
host->ops->request(host, mrq);
|
||||
}
|
||||
|
@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
|
|||
*/
|
||||
void mmc_set_chip_select(struct mmc_host *host, int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.chip_select = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the host clock to the highest possible frequency that
|
||||
* is below "hz".
|
||||
*/
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
WARN_ON(hz < host->f_min);
|
||||
|
||||
|
@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
|||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
__mmc_set_clock(host, hz);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
/*
|
||||
* This gates the clock by setting it to 0 Hz.
|
||||
|
@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
|
|||
if (host->clk_old) {
|
||||
BUG_ON(host->ios.clock);
|
||||
/* This call will also set host->clk_gated to false */
|
||||
mmc_set_clock(host, host->clk_old);
|
||||
__mmc_set_clock(host, host->clk_old);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
|
|||
*/
|
||||
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_mode = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
|||
*/
|
||||
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_width = width;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
|
|||
|
||||
ocr &= 3 << bit;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.vdd = bit;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
} else {
|
||||
pr_warning("%s: host doesn't support card's voltages\n",
|
||||
mmc_hostname(host));
|
||||
|
@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
|
|||
*/
|
||||
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.timing = timing;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
|||
*/
|
||||
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.drv_type = drv_type;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
|
|||
{
|
||||
int bit;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
/* If ocr is set, we use it */
|
||||
if (host->ocr)
|
||||
bit = ffs(host->ocr) - 1;
|
||||
|
@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
|
|||
* time required to reach a stable voltage.
|
||||
*/
|
||||
mmc_delay(10);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
static void mmc_power_off(struct mmc_host *host)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
host->ios.clock = 0;
|
||||
host->ios.vdd = 0;
|
||||
|
||||
|
@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
|
|||
host->ios.bus_width = MMC_BUS_WIDTH_1;
|
||||
host->ios.timing = MMC_TIMING_LEGACY;
|
||||
mmc_set_ios(host);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_ungate - ungate hardware MCI clocks
|
||||
* mmc_host_clk_hold - ungate hardware MCI clocks
|
||||
* @host: host to ungate.
|
||||
*
|
||||
* Makes sure the host ios.clock is restored to a non-zero value
|
||||
* past this call. Increase clock reference count and ungate clock
|
||||
* if we're the first user.
|
||||
*/
|
||||
void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
|
|||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_gate - gate off hardware MCI clocks
|
||||
* mmc_host_clk_release - gate off hardware MCI clocks
|
||||
* @host: host to gate.
|
||||
*
|
||||
* Calls the host driver with ios.clock set to zero as often as possible
|
||||
* in order to gate off hardware MCI clocks. Decrease clock reference
|
||||
* count and schedule disabling of clock.
|
||||
*/
|
||||
void mmc_host_clk_gate(struct mmc_host *host)
|
||||
void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
|
|||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
schedule_work(&host->clk_gate_work);
|
||||
queue_work(system_nrt_wq, &host->clk_gate_work);
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
|
|||
if (cancel_work_sync(&host->clk_gate_work))
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
if (host->clk_gated)
|
||||
mmc_host_clk_ungate(host);
|
||||
mmc_host_clk_hold(host);
|
||||
/* There should be only one user now */
|
||||
WARN_ON(host->clk_requests > 1);
|
||||
}
|
||||
|
|
|
@ -16,16 +16,16 @@ int mmc_register_host_class(void);
|
|||
void mmc_unregister_host_class(void);
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
void mmc_host_clk_ungate(struct mmc_host *host);
|
||||
void mmc_host_clk_gate(struct mmc_host *host);
|
||||
void mmc_host_clk_hold(struct mmc_host *host);
|
||||
void mmc_host_clk_release(struct mmc_host *host);
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host);
|
||||
|
||||
#else
|
||||
static inline void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
static inline void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_gate(struct mmc_host *host)
|
||||
static inline void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
|
||||
static void sd_update_bus_speed_mode(struct mmc_card *card)
|
||||
{
|
||||
unsigned int bus_speed = 0, timing = 0;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* If the host doesn't support any of the UHS-I modes, fallback on
|
||||
* default speed.
|
||||
*/
|
||||
if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
|
||||
return 0;
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
|
||||
card->sd_bus_speed = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
|
||||
bus_speed = UHS_SDR104_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR104;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
|
||||
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
|
||||
bus_speed = UHS_DDR50_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_DDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
|
||||
SD_MODE_UHS_SDR50)) {
|
||||
bus_speed = UHS_SDR50_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
|
||||
bus_speed = UHS_SDR25_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR25;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
|
||||
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
|
||||
SD_MODE_UHS_SDR12)) {
|
||||
bus_speed = UHS_SDR12_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR12;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
|
||||
{
|
||||
int err;
|
||||
unsigned int timing = 0;
|
||||
|
||||
switch (card->sd_bus_speed) {
|
||||
case UHS_SDR104_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR104;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
|
||||
break;
|
||||
case UHS_DDR50_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_DDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR50_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR25_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR25;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR12_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR12;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
card->sd_bus_speed = bus_speed;
|
||||
err = mmc_sd_switch(card, 1, 0, bus_speed, status);
|
||||
err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if ((status[16] & 0xF) != bus_speed)
|
||||
if ((status[16] & 0xF) != card->sd_bus_speed)
|
||||
printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
|
||||
mmc_hostname(card->host));
|
||||
else {
|
||||
|
@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
|
|||
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the bus speed mode depending on host
|
||||
* and card capability.
|
||||
*/
|
||||
sd_update_bus_speed_mode(card);
|
||||
|
||||
/* Set the driver strength for the card */
|
||||
err = sd_select_driver_type(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Set bus speed mode of the card */
|
||||
err = sd_set_bus_speed_mode(card, status);
|
||||
/* Set current limit for the card */
|
||||
err = sd_set_current_limit(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Set current limit for the card */
|
||||
err = sd_set_current_limit(card, status);
|
||||
/* Set bus speed mode of the card */
|
||||
err = sd_set_bus_speed_mode(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
|
|
|
@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
|
|||
ctrl &= ~SDHCI_CTRL_8BITBUS;
|
||||
break;
|
||||
default:
|
||||
ctrl &= ~SDHCI_CTRL_4BITBUS;
|
||||
ctrl &= ~SDHCI_CTRL_8BITBUS;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
mmc_data->hclk = clk_get_rate(priv->clk);
|
||||
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
|
||||
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
|
||||
if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
|
||||
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
|
||||
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
|
||||
if (p) {
|
||||
mmc_data->flags = p->tmio_flags;
|
||||
if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
|
||||
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
|
||||
mmc_data->ocr_mask = p->tmio_ocr_mask;
|
||||
mmc_data->capabilities |= p->tmio_caps;
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
|
|||
|
||||
#define ubi_dbg_msg(fmt, ...) do { \
|
||||
if (0) \
|
||||
pr_debug(fmt "\n", ##__VA_ARGS__); \
|
||||
printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
|
||||
|
|
|
@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
|
|||
struct net_device *dev = (struct net_device *)data;
|
||||
struct dev_priv *priv = netdev_priv(dev);
|
||||
unsigned int lnkstat, carrier;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->chip_lock, flags);
|
||||
lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
|
||||
spin_unlock_irqrestore(&priv->chip_lock, flags);
|
||||
carrier = netif_carrier_ok(dev);
|
||||
|
||||
if (lnkstat && !carrier) {
|
||||
|
|
|
@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
|
|||
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
|
||||
return;
|
||||
|
||||
pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
|
||||
if (dev->bus && dev->bus->self)
|
||||
pcie_bus_configure_settings(dev->bus,
|
||||
dev->bus->self->pcie_mpss);
|
||||
|
||||
memset(&hpp, 0, sizeof(hpp));
|
||||
ret = pci_get_hp_params(dev, &hpp);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue