Linux 4.1-rc5

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJVYnloAAoJEHm+PkMAQRiGCgkH/j3r2djOOm4h83FXrShaHORY
 p8TBI3FNj4fzLk2PfzqbmiDw2T2CwygB+pxb2Ac9CE99epw8qPk2SRvPXBpdKR7t
 lolhhwfzApLJMZbhzNLVywUCDUhFoiEWRhmPqIfA3WXFcIW3t5VNXAoIFjV5HFr6
 sYUlaxSI1XiQ5tldVv8D6YSFHms41pisziBIZmzhIUg10P6Vv3D0FbE74fjAJwx0
 +08zj3EO7yQMv7Aeeq8F8AJ3628142rcZf0NWF5ohlKLRK3gt0cl9jO5U4Co2dDt
 29v03LP5EI6jDKkIbuWlqRMq9YxJz7N3wnkzV0EJiqXucoqPLFDqzbxB4gnS1pI=
 =7vbA
 -----END PGP SIGNATURE-----

Merge tag 'v4.1-rc5' into x86/mm, to refresh the tree before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-05-27 14:40:10 +02:00
commit d563a6bb3d
495 changed files with 4900 additions and 4458 deletions

View File

@ -17,7 +17,8 @@ Required properties:
- #clock-cells: from common clock binding; shall be set to 1.
- clocks: from common clock binding; list of parent clock
handles, shall be xtal reference clock or xtal and clkin for
si5351c only.
si5351c only. Corresponding clock input names are "xtal" and
"clkin" respectively.
- #address-cells: shall be set to 1.
- #size-cells: shall be set to 0.
@ -71,6 +72,7 @@ i2c-master-node {
/* connect xtal input to 25MHz reference */
clocks = <&ref25>;
clock-names = "xtal";
/* connect xtal input as source of pll0 and pll1 */
silabs,pll-source = <0 0>, <1 0>;

View File

@ -8,8 +8,8 @@ Required properties:
is not Linux-only, but in case of Linux, see the "m25p_ids"
table in drivers/mtd/devices/m25p80.c for the list of supported
chips.
Must also include "nor-jedec" for any SPI NOR flash that can be
identified by the JEDEC READ ID opcode (0x9F).
Must also include "jedec,spi-nor" for any SPI NOR flash that can
be identified by the JEDEC READ ID opcode (0x9F).
- reg : Chip-Select number
- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
@ -25,7 +25,7 @@ Example:
flash: m25p80@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "spansion,m25p80", "nor-jedec";
compatible = "spansion,m25p80", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
m25p,fast-read;

View File

@ -3,7 +3,8 @@
Required properties:
- compatible: Should be "cdns,[<chip>-]{emac}"
Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
or the generic form: "cdns,emac".
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
Or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
- phy-mode: see ethernet.txt file in the same directory.

View File

@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
TTY_OTHER_CLOSED Device is a pty and the other side has closed.
TTY_OTHER_DONE Device is a pty and the other side has closed and
all pending input processing has been completed.
TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
smaller chunks.

View File

@ -169,6 +169,10 @@ Shadow pages contain the following information:
Contains the value of cr4.smep && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
role.smap_andnot_wp:
Contains the value of cr4.smap && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
gfn:
Either the guest page table containing the translations shadowed by this
page, or the base page frame for linear translations. See role.direct.
@ -344,10 +348,16 @@ on fault type:
(user write faults generate a #PF)
In the first case there is an additional complication if CR4.SMEP is
enabled: since we've turned the page into a kernel page, the kernel may now
execute it. We handle this by also setting spte.nx. If we get a user
fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
In the first case there are two additional complications:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and
spte.nx=gpte.nx back.
- if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
here we do not care the case that CR4.SMAP is enabled since KVM will
directly inject #PF to guest due to failed permission check.
To prevent an spte that was converted into a kernel page with cr0.wp=0
from being written by the kernel after cr0.wp has changed to 1, we make

View File

@ -974,7 +974,7 @@ S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.berlios.de/gemini-board
T: git git://github.com/ulli-kroll/linux.git
S: Maintained
F: arch/arm/mach-gemini/
@ -1193,7 +1193,7 @@ ARM/MAGICIAN MACHINE SUPPORT
M: Philipp Zabel <philipp.zabel@gmail.com>
S: Maintained
ARM/Marvell Armada 370 and Armada XP SOC support
ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Gregory Clement <gregory.clement@free-electrons.com>
@ -1202,12 +1202,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
F: drivers/rtc/rtc-armada38x.c
F: arch/arm/boot/dts/armada*
F: arch/arm/boot/dts/kirkwood*
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-berlin/
F: arch/arm/boot/dts/berlin*
ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
@ -1220,6 +1225,9 @@ F: arch/arm/mach-dove/
F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
F: arch/arm/plat-orion/
F: arch/arm/boot/dts/dove*
F: arch/arm/boot/dts/orion5x*
ARM/Orion SoC/Technologic Systems TS-78xx platform support
M: Alexander Clouter <alex@digriz.org.uk>
@ -1371,6 +1379,7 @@ N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
@ -1935,7 +1944,7 @@ S: Maintained
F: drivers/net/wireless/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
M: Jingoo Han <jg1.han@samsung.com>
M: Jingoo Han <jingoohan1@gmail.com>
M: Lee Jones <lee.jones@linaro.org>
S: Maintained
F: drivers/video/backlight/
@ -3816,10 +3825,11 @@ M: David Woodhouse <dwmw2@infradead.org>
L: linux-embedded@vger.kernel.org
S: Maintained
EMULEX LPFC FC SCSI DRIVER
M: James Smart <james.smart@emulex.com>
EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
M: James Smart <james.smart@avagotech.com>
M: Dick Kennedy <dick.kennedy@avagotech.com>
L: linux-scsi@vger.kernel.org
W: http://sourceforge.net/projects/lpfcxxxx
W: http://www.avagotech.com
S: Supported
F: drivers/scsi/lpfc/
@ -3918,7 +3928,7 @@ F: drivers/extcon/
F: Documentation/extcon/
EXYNOS DP DRIVER
M: Jingoo Han <jg1.han@samsung.com>
M: Jingoo Han <jingoohan1@gmail.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: drivers/gpu/drm/exynos/exynos_dp*
@ -4377,11 +4387,10 @@ F: fs/gfs2/
F: include/uapi/linux/gfs2_ondisk.h
GIGASET ISDN DRIVERS
M: Hansjoerg Lipp <hjlipp@web.de>
M: Tilman Schmidt <tilman@imap.cc>
M: Paul Bolle <pebolle@tiscali.nl>
L: gigaset307x-common@lists.sourceforge.net
W: http://gigaset307x.sourceforge.net/
S: Maintained
S: Odd Fixes
F: Documentation/isdn/README.gigaset
F: drivers/isdn/gigaset/
F: include/uapi/linux/gigaset_dev.h
@ -4528,7 +4537,7 @@ M: Jean Delvare <jdelvare@suse.de>
M: Guenter Roeck <linux@roeck-us.net>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
@ -5054,7 +5063,7 @@ M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git git://github.com/dledford/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
S: Supported
F: Documentation/infiniband/
F: drivers/infiniband/
@ -6959,6 +6968,17 @@ T: git git://git.rocketboards.org/linux-socfpga-next.git
S: Maintained
F: arch/nios2/
NOKIA N900 POWER SUPPLY DRIVERS
M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: include/linux/power/bq2415x_charger.h
F: include/linux/power/bq27x00_battery.h
F: include/linux/power/isp1704_charger.h
F: drivers/power/bq2415x_charger.c
F: drivers/power/bq27x00_battery.c
F: drivers/power/isp1704_charger.c
F: drivers/power/rx51_battery.c
NTB DRIVER
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
@ -7547,7 +7567,7 @@ S: Maintained
F: drivers/pci/host/*rcar*
PCI DRIVER FOR SAMSUNG EXYNOS
M: Jingoo Han <jg1.han@samsung.com>
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@ -7555,7 +7575,7 @@ S: Maintained
F: drivers/pci/host/pci-exynos.c
PCI DRIVER FOR SYNOPSIS DESIGNWARE
M: Jingoo Han <jg1.han@samsung.com>
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: drivers/pci/host/*designware*
@ -8511,7 +8531,7 @@ S: Supported
F: sound/soc/samsung/
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jg1.han@samsung.com>
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/s3c-fb.c
@ -8810,16 +8830,19 @@ F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
M: Minh Tran <minh.tran@avagotech.com>
M: John Soni Jose <sony.john-n@avagotech.com>
L: linux-scsi@vger.kernel.org
W: http://www.emulex.com
W: http://www.avagotech.com
S: Supported
F: drivers/scsi/be2iscsi/
SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
M: Sathya Perla <sathya.perla@emulex.com>
M: Subbu Seetharaman <subbu.seetharaman@emulex.com>
M: Ajit Khaparde <ajit.khaparde@emulex.com>
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
M: Sathya Perla <sathya.perla@avagotech.com>
M: Ajit Khaparde <ajit.khaparde@avagotech.com>
M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
L: netdev@vger.kernel.org
W: http://www.emulex.com
S: Supported

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -2,19 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
config EARLY_PRINTK
bool "Early printk" if EMBEDDED
default y
help
Write kernel log output directly into the VGA buffer or to a serial
port.
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized. For normal operation
it is not recommended because it looks ugly and doesn't cooperate
with klogd/syslogd or the X server. You should normally N here,
unless you want to debug such a crash.
config 16KSTACKS
bool "Use 16Kb for kernel stacks instead of 8Kb"
help

View File

@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
atomic_ops_unlock(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \

View File

@ -266,7 +266,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
* Machine specific helpers for Entire D-Cache or Per Line ops
*/
static unsigned int __before_dc_op(const int op)
static inline unsigned int __before_dc_op(const int op)
{
unsigned int reg = reg;
@ -284,7 +284,7 @@ static unsigned int __before_dc_op(const int op)
return reg;
}
static void __after_dc_op(const int op, unsigned int reg)
static inline void __after_dc_op(const int op, unsigned int reg)
{
if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);

View File

@ -69,7 +69,7 @@
mainpll: mainpll {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <2000000000>;
clock-frequency = <1000000000>;
};
/* 25 MHz reference crystal */
refclk: oscillator {

View File

@ -585,7 +585,7 @@
mainpll: mainpll {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <2000000000>;
clock-frequency = <1000000000>;
};
/* 25 MHz reference crystal */

View File

@ -502,7 +502,7 @@
mainpll: mainpll {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <2000000000>;
clock-frequency = <1000000000>;
};
};
};

View File

@ -87,6 +87,7 @@
/* connect xtal input to 25MHz reference */
clocks = <&ref25>;
clock-names = "xtal";
/* connect xtal input as source of pll0 and pll1 */
silabs,pll-source = <0 0>, <1 0>;

View File

@ -711,6 +711,7 @@
num-slots = <1>;
broken-cd;
cap-sdio-irq;
keep-power-in-suspend;
card-detect-delay = <200>;
clock-frequency = <400000000>;
samsung,dw-mshc-ciu-div = <1>;

View File

@ -674,6 +674,7 @@
num-slots = <1>;
broken-cd;
cap-sdio-irq;
keep-power-in-suspend;
card-detect-delay = <200>;
clock-frequency = <400000000>;
samsung,dw-mshc-ciu-div = <1>;

View File

@ -826,7 +826,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
resets = <&tegra_car 59>, <&tegra_car 22>;
resets = <&tegra_car 22>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;
@ -838,6 +838,7 @@
nvidia,hssquelch-level = <2>;
nvidia,hsdiscon-level = <5>;
nvidia,xcvr-hsslew = <12>;
nvidia,has-utmi-pad-registers;
status = "disabled";
};
@ -862,7 +863,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
resets = <&tegra_car 22>, <&tegra_car 22>;
resets = <&tegra_car 58>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;
@ -874,7 +875,6 @@
nvidia,hssquelch-level = <2>;
nvidia,hsdiscon-level = <5>;
nvidia,xcvr-hsslew = <12>;
nvidia,has-utmi-pad-registers;
status = "disabled";
};
@ -899,7 +899,7 @@
<&tegra_car TEGRA124_CLK_PLL_U>,
<&tegra_car TEGRA124_CLK_USBD>;
clock-names = "reg", "pll_u", "utmi-pads";
resets = <&tegra_car 58>, <&tegra_car 22>;
resets = <&tegra_car 59>, <&tegra_car 22>;
reset-names = "usb", "utmi-pads";
nvidia,hssync-start-delay = <0>;
nvidia,idle-wait-delay = <17>;

View File

@ -191,6 +191,7 @@
compatible = "arm,cortex-a15-pmu";
interrupts = <0 68 4>,
<0 69 4>;
interrupt-affinity = <&cpu0>, <&cpu1>;
};
oscclk6a: oscclk6a {

View File

@ -33,28 +33,28 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
A9_0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
next-level-cache = <&L2>;
};
cpu@1 {
A9_1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
next-level-cache = <&L2>;
};
cpu@2 {
A9_2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <2>;
next-level-cache = <&L2>;
};
cpu@3 {
A9_3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <3>;
@ -170,6 +170,7 @@
compatible = "arm,pl310-cache";
reg = <0x1e00a000 0x1000>;
interrupts = <0 43 4>;
cache-unified;
cache-level = <2>;
arm,data-latency = <1 1 1>;
arm,tag-latency = <1 1 1>;
@ -181,6 +182,8 @@
<0 61 4>,
<0 62 4>,
<0 63 4>;
interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>;
};
dcc {

View File

@ -193,7 +193,7 @@
};
gem0: ethernet@e000b000 {
compatible = "cdns,gem";
compatible = "cdns,zynq-gem";
reg = <0xe000b000 0x1000>;
status = "disabled";
interrupts = <0 22 4>;
@ -204,7 +204,7 @@
};
gem1: ethernet@e000c000 {
compatible = "cdns,gem";
compatible = "cdns,zynq-gem";
reg = <0xe000c000 0x1000>;
status = "disabled";
interrupts = <0 45 4>;

View File

@ -159,6 +159,8 @@ extern void exynos_enter_aftr(void);
extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
extern void exynos_set_delayed_reset_assertion(bool enable);
extern void s5p_init_cpu(void __iomem *cpuid_addr);
extern unsigned int samsung_rev(void);
extern void __iomem *cpu_boot_reg_base(void);

View File

@ -166,6 +166,33 @@ static void __init exynos_init_io(void)
exynos_map_io();
}
/*
* Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code
* and suspend.
*
* This is necessary only on Exynos4 SoCs. When system is running
* USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down
* feature could properly detect global idle state when secondary CPU is
* powered down.
*
* However this should not be set when such system is going into suspend.
*/
void exynos_set_delayed_reset_assertion(bool enable)
{
if (of_machine_is_compatible("samsung,exynos4")) {
unsigned int tmp, core_id;
for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
if (enable)
tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
else
tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
}
}
}
/*
* Apparently, these SoCs are not able to wake-up from suspend using
* the PMU. Too bad. Should they suddenly become capable of such a

View File

@ -34,30 +34,6 @@
extern void exynos4_secondary_startup(void);
/*
* Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
* during hot-(un)plugging CPUx.
*
* The feature can be cleared safely during first boot of secondary CPU.
*
* Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
* down a CPU so the CPU idle clock down feature could properly detect global
* idle state when CPUx is off.
*/
static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
{
if (soc_is_exynos4()) {
unsigned int tmp;
tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
if (enable)
tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
else
tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
}
}
#ifdef CONFIG_HOTPLUG_CPU
static inline void cpu_leave_lowpower(u32 core_id)
{
@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id)
: "=&r" (v)
: "Ir" (CR_C), "Ir" (0x40)
: "cc");
exynos_set_delayed_reset_assertion(core_id, false);
}
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
/* Turn the CPU off on next WFI instruction. */
exynos_cpu_power_down(core_id);
/*
* Exynos4 SoCs require setting
* USE_DELAYED_RESET_ASSERTION so the CPU idle
* clock down feature could properly detect
* global idle state when CPUx is off.
*/
exynos_set_delayed_reset_assertion(core_id, true);
wfi();
if (pen_release == core_id) {
@ -371,9 +337,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
udelay(10);
}
/* No harm if this is called during first boot of secondary CPU */
exynos_set_delayed_reset_assertion(core_id, false);
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
@ -420,6 +383,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
exynos_sysram_init();
exynos_set_delayed_reset_assertion(true);
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
scu_enable(scu_base_addr());

View File

@ -188,7 +188,7 @@ no_clk:
args.np = np;
args.args_count = 0;
child_domain = of_genpd_get_from_provider(&args);
if (!child_domain)
if (IS_ERR(child_domain))
continue;
if (of_parse_phandle_with_args(np, "power-domains",
@ -196,7 +196,7 @@ no_clk:
continue;
parent_domain = of_genpd_get_from_provider(&args);
if (!parent_domain)
if (IS_ERR(parent_domain))
continue;
if (pm_genpd_add_subdomain(parent_domain, child_domain))

View File

@ -342,6 +342,8 @@ static void exynos_pm_enter_sleep_mode(void)
static void exynos_pm_prepare(void)
{
exynos_set_delayed_reset_assertion(false);
/* Set wake-up mask registers */
exynos_pm_set_wakeup_mask();
@ -482,6 +484,7 @@ early_wakeup:
/* Clear SLEEP mode set in INFORM1 */
pmu_raw_writel(0x0, S5P_INFORM1);
exynos_set_delayed_reset_assertion(true);
}
static void exynos3250_pm_resume(void)
@ -723,8 +726,10 @@ void __init exynos_pm_init(void)
return;
}
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
return;
}
pm_data = (const struct exynos_pm_data *) match->data;

View File

@ -12,6 +12,8 @@
#ifndef __GEMINI_COMMON_H__
#define __GEMINI_COMMON_H__
#include <linux/reboot.h>
struct mtd_partition;
extern void gemini_map_io(void);
@ -26,6 +28,6 @@ extern int platform_register_pflash(unsigned int size,
struct mtd_partition *parts,
unsigned int nr_parts);
extern void gemini_restart(char mode, const char *cmd);
extern void gemini_restart(enum reboot_mode mode, const char *cmd);
#endif /* __GEMINI_COMMON_H__ */

View File

@ -14,7 +14,9 @@
#include <mach/hardware.h>
#include <mach/global_reg.h>
void gemini_restart(char mode, const char *cmd)
#include "common.h"
void gemini_restart(enum reboot_mode mode, const char *cmd)
{
__raw_writel(RESET_GLOBAL | RESET_CPU1,
IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET);

View File

@ -171,6 +171,12 @@
*/
#define LINKS_PER_OCP_IF 2
/*
* Address offset (in bytes) between the reset control and the reset
* status registers: 4 bytes on OMAP4
*/
#define OMAP4_RST_CTRL_ST_OFFSET 4
/**
* struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
* @enable_module: function to enable a module (via MODULEMODE)
@ -3016,10 +3022,12 @@ static int _omap4_deassert_hardreset(struct omap_hwmod *oh,
if (ohri->st_shift)
pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
oh->name, ohri->name);
return omap_prm_deassert_hardreset(ohri->rst_shift, 0,
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift,
oh->clkdm->pwrdm.ptr->prcm_partition,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs, 0);
oh->prcm.omap4.rstctrl_offs,
oh->prcm.omap4.rstctrl_offs +
OMAP4_RST_CTRL_ST_OFFSET);
}
/**
@ -3047,27 +3055,6 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
oh->prcm.omap4.rstctrl_offs);
}
/**
* _am33xx_assert_hardreset - call AM33XX PRM hardreset fn with hwmod args
* @oh: struct omap_hwmod * to assert hardreset
* @ohri: hardreset line data
*
* Call am33xx_prminst_assert_hardreset() with parameters extracted
* from the hwmod @oh and the hardreset line data @ohri. Only
* intended for use as an soc_ops function pointer. Passes along the
* return value from am33xx_prminst_assert_hardreset(). XXX This
* function is scheduled for removal when the PRM code is moved into
* drivers/.
*/
static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri)
{
return omap_prm_assert_hardreset(ohri->rst_shift, 0,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs);
}
/**
* _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args
* @oh: struct omap_hwmod * to deassert hardreset
@ -3083,32 +3070,13 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri)
{
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0,
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift,
oh->clkdm->pwrdm.ptr->prcm_partition,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs,
oh->prcm.omap4.rstst_offs);
}
/**
* _am33xx_is_hardreset_asserted - call AM33XX PRM hardreset fn with hwmod args
* @oh: struct omap_hwmod * to test hardreset
* @ohri: hardreset line data
*
* Call am33xx_prminst_is_hardreset_asserted() with parameters
* extracted from the hwmod @oh and the hardreset line data @ohri.
* Only intended for use as an soc_ops function pointer. Passes along
* the return value from am33xx_prminst_is_hardreset_asserted(). XXX
* This function is scheduled for removal when the PRM code is moved
* into drivers/.
*/
static int _am33xx_is_hardreset_asserted(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri)
{
return omap_prm_is_hardreset_asserted(ohri->rst_shift, 0,
oh->clkdm->pwrdm.ptr->prcm_offs,
oh->prcm.omap4.rstctrl_offs);
}
/* Public functions */
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
@ -3908,21 +3876,13 @@ void __init omap_hwmod_init(void)
soc_ops.init_clkdm = _init_clkdm;
soc_ops.update_context_lost = _omap4_update_context_lost;
soc_ops.get_context_lost = _omap4_get_context_lost;
} else if (soc_is_am43xx()) {
} else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) {
soc_ops.enable_module = _omap4_enable_module;
soc_ops.disable_module = _omap4_disable_module;
soc_ops.wait_target_ready = _omap4_wait_target_ready;
soc_ops.assert_hardreset = _omap4_assert_hardreset;
soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
soc_ops.init_clkdm = _init_clkdm;
} else if (cpu_is_ti816x() || soc_is_am33xx()) {
soc_ops.enable_module = _omap4_enable_module;
soc_ops.disable_module = _omap4_disable_module;
soc_ops.wait_target_ready = _omap4_wait_target_ready;
soc_ops.assert_hardreset = _am33xx_assert_hardreset;
soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
soc_ops.init_clkdm = _init_clkdm;
} else {
WARN(1, "omap_hwmod: unknown SoC type\n");

View File

@ -544,6 +544,44 @@ static struct omap_hwmod am43xx_hdq1w_hwmod = {
},
};
static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x104,
.sysc_flags = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO),
.sysc_fields = &omap_hwmod_sysc_type2,
};
static struct omap_hwmod_class am43xx_vpfe_hwmod_class = {
.name = "vpfe",
.sysc = &am43xx_vpfe_sysc,
};
static struct omap_hwmod am43xx_vpfe0_hwmod = {
.name = "vpfe0",
.class = &am43xx_vpfe_hwmod_class,
.clkdm_name = "l3s_clkdm",
.prcm = {
.omap4 = {
.modulemode = MODULEMODE_SWCTRL,
.clkctrl_offs = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET,
},
},
};
static struct omap_hwmod am43xx_vpfe1_hwmod = {
.name = "vpfe1",
.class = &am43xx_vpfe_hwmod_class,
.clkdm_name = "l3s_clkdm",
.prcm = {
.omap4 = {
.modulemode = MODULEMODE_SWCTRL,
.clkctrl_offs = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET,
},
},
};
/* Interfaces */
static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
.master = &am33xx_l3_main_hwmod,
@ -825,6 +863,34 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = {
.master = &am43xx_vpfe0_hwmod,
.slave = &am33xx_l3_main_hwmod,
.clk = "l3_gclk",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = {
.master = &am43xx_vpfe1_hwmod,
.slave = &am33xx_l3_main_hwmod,
.clk = "l3_gclk",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_vpfe0_hwmod,
.clk = "l4ls_gclk",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_vpfe1_hwmod,
.clk = "l4ls_gclk",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__synctimer,
&am43xx_l4_ls__timer8,
@ -925,6 +991,10 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_ls__dss_dispc,
&am43xx_l4_ls__dss_rfbi,
&am43xx_l4_ls__hdq1w,
&am43xx_l3__vpfe0,
&am43xx_l3__vpfe1,
&am43xx_l4_ls__vpfe0,
&am43xx_l4_ls__vpfe1,
NULL,
};

View File

@ -144,5 +144,6 @@
#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0
#define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20
#define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET 0x04a0
#define AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET 0x0068
#define AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET 0x0070
#endif

View File

@ -87,12 +87,6 @@ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
return v;
}
/*
* Address offset (in bytes) between the reset control and the reset
* status registers: 4 bytes on OMAP4
*/
#define OMAP4_RST_CTRL_ST_OFFSET 4
/**
* omap4_prminst_is_hardreset_asserted - read the HW reset line state of
* submodules contained in the hwmod module
@ -141,11 +135,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
* omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and
* wait
* @shift: register bit shift corresponding to the reset line to deassert
* @st_shift: status bit offset, not used for OMAP4+
* @st_shift: status bit offset corresponding to the reset line
* @part: PRM partition
* @inst: PRM instance offset
* @rstctrl_offs: reset register offset
* @st_offs: reset status register offset, not used for OMAP4+
* @rstst_offs: reset status register offset
*
* Some IPs like dsp, ipu or iva contain processors that require an HW
* reset line to be asserted / deasserted in order to fully enable the
@ -157,11 +151,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
* of reset, or -EBUSY if the submodule did not exit reset promptly.
*/
int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
u16 rstctrl_offs, u16 st_offs)
u16 rstctrl_offs, u16 rstst_offs)
{
int c;
u32 mask = 1 << shift;
u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET;
u32 st_mask = 1 << st_shift;
/* Check the current status to avoid de-asserting the line twice */
if (omap4_prminst_is_hardreset_asserted(shift, part, inst,
@ -169,13 +163,13 @@ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
return -EEXIST;
/* Clear the reset status by writing 1 to the status bit */
omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst,
omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst,
rstst_offs);
/* de-assert the reset control line */
omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs);
/* wait the status to be set */
omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst,
rstst_offs),
omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part,
inst, rstst_offs),
MAX_MODULE_HARDRESET_WAIT, c);
return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;

View File

@ -298,14 +298,11 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
if (IS_ERR(src))
return PTR_ERR(src);
if (clk_get_parent(timer->fclk) != src) {
r = clk_set_parent(timer->fclk, src);
if (r < 0) {
pr_warn("%s: %s cannot set source\n", __func__,
oh->name);
clk_put(src);
return r;
}
r = clk_set_parent(timer->fclk, src);
if (r < 0) {
pr_warn("%s: %s cannot set source\n", __func__, oh->name);
clk_put(src);
return r;
}
clk_put(src);

View File

@ -44,11 +44,9 @@ static void __iomem *rk3288_bootram_base;
static phys_addr_t rk3288_bootram_phy;
static struct regmap *pmu_regmap;
static struct regmap *grf_regmap;
static struct regmap *sgrf_regmap;
static u32 rk3288_pmu_pwr_mode_con;
static u32 rk3288_grf_soc_con0;
static u32 rk3288_sgrf_soc_con0;
static inline u32 rk3288_l2_config(void)
@ -72,25 +70,11 @@ static void rk3288_slp_mode_set(int level)
{
u32 mode_set, mode_set1;
regmap_read(grf_regmap, RK3288_GRF_SOC_CON0, &rk3288_grf_soc_con0);
regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0);
regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
&rk3288_pmu_pwr_mode_con);
/*
* We need set this bit GRF_FORCE_JTAG here, for the debug module,
* otherwise, it may become inaccessible after resume.
* This creates a potential security issue, as the sdmmc pins may
* accept jtag data for a short time during resume if no card is
* inserted.
* But this is of course also true for the regular boot, before we
* turn of the jtag/sdmmc autodetect.
*/
regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, GRF_FORCE_JTAG |
GRF_FORCE_JTAG_WRITE);
/*
* SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
* PCLK_WDT_GATE - disable WDT during suspend.
@ -151,9 +135,6 @@ static void rk3288_slp_mode_set_resume(void)
regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
| SGRF_FAST_BOOT_EN_WRITE);
regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, rk3288_grf_soc_con0 |
GRF_FORCE_JTAG_WRITE);
}
static int rockchip_lpmode_enter(unsigned long arg)
@ -212,13 +193,6 @@ static int rk3288_suspend_init(struct device_node *np)
return PTR_ERR(pmu_regmap);
}
grf_regmap = syscon_regmap_lookup_by_compatible(
"rockchip,rk3288-grf");
if (IS_ERR(grf_regmap)) {
pr_err("%s: could not find grf regmap\n", __func__);
return PTR_ERR(pmu_regmap);
}
sram_np = of_find_compatible_node(NULL, NULL,
"rockchip,rk3288-pmu-sram");
if (!sram_np) {

View File

@ -48,10 +48,6 @@ static inline void rockchip_suspend_init(void)
#define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44
#define RK3288_PMU_PWRMODE_CON1 0x90
#define RK3288_GRF_SOC_CON0 0x244
#define GRF_FORCE_JTAG BIT(12)
#define GRF_FORCE_JTAG_WRITE BIT(28)
#define RK3288_SGRF_SOC_CON0 (0x0000)
#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
#define SGRF_PCLK_WDT_GATE BIT(6)

View File

@ -54,6 +54,7 @@
#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
#define FLAG_NEED_X_RESET (1 << 0)
#define FLAG_IMM_OVERFLOW (1 << 1)
struct jit_ctx {
const struct bpf_prog *skf;
@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
/* PC in ARM mode == address of the instruction + 8 */
imm = offset - (8 + ctx->idx * 4);
if (imm & ~0xfff) {
/*
* literal pool is too far, signal it into flags. we
* can only detect it on the second pass unfortunately.
*/
ctx->flags |= FLAG_IMM_OVERFLOW;
return 0;
}
return imm;
}
@ -449,10 +459,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
return;
}
#endif
if (rm != ARM_R0)
emit(ARM_MOV_R(ARM_R0, rm), ctx);
/*
* For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
* (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
* ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
* before using it as a source for ARM_R1.
*
* For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
* ARM_R5 (r_X) so there is no particular register overlap
* issues.
*/
if (rn != ARM_R1)
emit(ARM_MOV_R(ARM_R1, rn), ctx);
if (rm != ARM_R0)
emit(ARM_MOV_R(ARM_R0, rm), ctx);
ctx->seen |= SEEN_CALL;
emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
@ -855,6 +876,14 @@ b_epilogue:
default:
return -1;
}
if (ctx->flags & FLAG_IMM_OVERFLOW)
/*
* this instruction generated an overflow when
* trying to access the literal pool, so
* delegate this filter to the kernel interpreter.
*/
return -1;
}
/* compute offsets only during the first pass */
@ -917,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
ctx.idx = 0;
build_prologue(&ctx);
build_body(&ctx);
if (build_body(&ctx) < 0) {
#if __LINUX_ARM_ARCH__ < 7
if (ctx.imm_count)
kfree(ctx.imms);
#endif
bpf_jit_binary_free(header);
goto out;
}
build_epilogue(&ctx);
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));

View File

@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
void xen_arch_post_suspend(int suspend_cancelled) { }
void xen_timer_resume(void) { }
void xen_arch_resume(void) { }
void xen_arch_suspend(void) { }
/* In the hypervisor.S file. */

View File

@ -21,6 +21,20 @@
clock-output-names = "juno_mb:clk25mhz";
};
v2m_refclk1mhz: refclk1mhz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1000000>;
clock-output-names = "juno_mb:refclk1mhz";
};
v2m_refclk32khz: refclk32khz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
clock-output-names = "juno_mb:refclk32khz";
};
motherboard {
compatible = "arm,vexpress,v2p-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
@ -66,6 +80,15 @@
#size-cells = <1>;
ranges = <0 3 0 0x200000>;
v2m_sysctl: sysctl@020000 {
compatible = "arm,sp810", "arm,primecell";
reg = <0x020000 0x1000>;
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
clock-names = "refclk", "timclk", "apb_pclk";
#clock-cells = <1>;
clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
};
mmci@050000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
@ -106,16 +129,16 @@
compatible = "arm,sp804", "arm,primecell";
reg = <0x110000 0x10000>;
interrupts = <9>;
clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
clock-names = "timclken1", "apb_pclk";
clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>;
clock-names = "timclken1", "timclken2", "apb_pclk";
};
v2m_timer23: timer@120000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x120000 0x10000>;
interrupts = <9>;
clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
clock-names = "timclken1", "apb_pclk";
clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>;
clock-names = "timclken1", "timclken2", "apb_pclk";
};
rtc@170000 {

View File

@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(ctx->crc, out);
return 0;
}
static int chksumc_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
return 0;
}
@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = 0;
return 0;
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
.final = chksum_final,
.final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32_cra_init,
.cra_init = crc32c_cra_init,
}
};

View File

@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
sctx->finalize = 0;
kernel_neon_begin_partial(16);
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();

View File

@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
sctx->finalize = 0;
kernel_neon_begin_partial(28);
sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();

View File

@ -24,7 +24,6 @@
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <linux/stop_machine.h>
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
@ -34,48 +33,6 @@ struct alt_region {
struct alt_instr *end;
};
/*
* Decode the imm field of a b/bl instruction, and return the byte
* offset as a signed value (so it can be used when computing a new
* branch target).
*/
static s32 get_branch_offset(u32 insn)
{
s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
/* sign-extend the immediate before turning it into a byte offset */
return (imm << 6) >> 4;
}
static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
{
u32 insn;
aarch64_insn_read(altinsnptr, &insn);
/* Stop the world on instructions we don't support... */
BUG_ON(aarch64_insn_is_cbz(insn));
BUG_ON(aarch64_insn_is_cbnz(insn));
BUG_ON(aarch64_insn_is_bcond(insn));
/* ... and there is probably more. */
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
enum aarch64_insn_branch_type type;
unsigned long target;
if (aarch64_insn_is_b(insn))
type = AARCH64_INSN_BRANCH_NOLINK;
else
type = AARCH64_INSN_BRANCH_LINK;
target = (unsigned long)altinsnptr + get_branch_offset(insn);
insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
target, type);
}
return insn;
}
static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region)
u8 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i;
if (!cpus_have_cap(alt->cpufeature))
continue;
@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region)
origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
insn = get_alt_insn(origptr + i, replptr + i);
aarch64_insn_write(origptr + i, insn);
}
memcpy(origptr, replptr, alt->alt_len);
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len));
}

View File

@ -1315,15 +1315,15 @@ static int armpmu_device_probe(struct platform_device *pdev)
if (!cpu_pmu)
return -ENODEV;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
return -ENOMEM;
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
return 0;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
return -ENOMEM;
for (i = 0; i < pdev->num_resources; ++i) {
struct device_node *dn;
int cpu;

View File

@ -328,10 +328,12 @@ static int ptdump_init(void)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
address_markers[VMEMMAP_START_NR].start_address =
(unsigned long)virt_to_page(PAGE_OFFSET);
address_markers[VMEMMAP_END_NR].start_address =
(unsigned long)virt_to_page(high_memory);
#endif
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
&ptdump_fops);

View File

@ -487,7 +487,7 @@ emit_cond_jmp:
return -EINVAL;
}
imm64 = (u64)insn1.imm << 32 | imm;
imm64 = (u64)insn1.imm << 32 | (u32)imm;
emit_a64_mov_i64(dst, imm64, ctx);
return 1;

View File

@ -277,7 +277,7 @@ LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
ifdef CONFIG_64BIT
CHECKFLAGS += -m64
endif

View File

@ -304,7 +304,7 @@ do { \
\
current->thread.abi = &mips_abi; \
\
current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \
current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
} while (0)
#endif /* CONFIG_32BIT */
@ -366,7 +366,7 @@ do { \
else \
current->thread.abi = &mips_abi; \
\
current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \
current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
\
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \

View File

@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_DUMP 0x8
#define SMP_ASK_C0COUNT 0x10
extern volatile cpumask_t cpu_callin_map;
extern cpumask_t cpu_callin_map;
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;

View File

@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
/* Lets see if this is an O32 ELF */
if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
/* FR = 1 for N32 */
if (ehdr32->e_flags & EF_MIPS_ABI2)
state->overall_fp_mode = FP_FR1;
else
/* Set a good default FPU mode for O32 */
state->overall_fp_mode = cpu_has_mips_r6 ?
FP_FRE : FP_FR0;
if (ehdr32->e_flags & EF_MIPS_FP64) {
/*
* Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
(char *)&abiflags,
sizeof(abiflags));
} else {
/* FR=1 is really the only option for 64-bit */
state->overall_fp_mode = FP_FR1;
if (phdr64->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr64->p_filesz < sizeof(abiflags))
@ -137,6 +126,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
struct elf32_hdr *ehdr = _ehdr;
struct mode_req prog_req, interp_req;
int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
bool is_mips64;
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
@ -152,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
abi0 = abi1 = fp_abi;
}
/* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
(!(ehdr->e_flags & EF_MIPS_ABI2))) ?
MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
(ehdr->e_flags & EF_MIPS_ABI2);
if (is_mips64) {
/* MIPS64 code always uses FR=1, thus the default is easy */
state->overall_fp_mode = FP_FR1;
/* Disallow access to the various FPXX & FP64 ABIs */
max_abi = MIPS_ABI_FP_SOFT;
} else {
/* Default to a mode capable of running code expecting FR=0 */
state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
/* Allow all ABIs we know about */
max_abi = MIPS_ABI_FP_64A;
}
if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
(abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))

View File

@ -176,7 +176,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
__get_user(value, data + 64);
fcr31 = child->thread.fpu.fcr31;
mask = current_cpu_data.fpu_msk31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
/* FIR may not be written. */

View File

@ -92,7 +92,7 @@ static void __init cps_smp_setup(void)
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
cpumask_set_cpu(0, &mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
}

View File

@ -43,7 +43,7 @@
#include <asm/time.h>
#include <asm/setup.h>
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
@ -218,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
/*
* Trust is futile. We should really have timeouts ...
*/
while (!cpumask_test_cpu(cpu, &cpu_callin_map))
while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
udelay(100);
schedule();
}
synchronise_count_master(cpu);
return 0;

View File

@ -269,7 +269,6 @@ static void __show_regs(const struct pt_regs *regs)
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
printk(" %s\n", print_tainted());
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);

View File

@ -2389,7 +2389,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
{
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc;
if (run->mmio.len > sizeof(*gpr)) {
kvm_err("Bad MMIO length: %d", run->mmio.len);
@ -2397,11 +2396,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
if (er == EMULATE_FAIL)
return er;

View File

@ -889,7 +889,7 @@ static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
case FPCREG_RID:
value = current_cpu_data.fpu_id;
value = boot_cpu_data.fpu_id;
break;
default:
@ -921,7 +921,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
/* Preserve read-only bits. */
mask = current_cpu_data.fpu_msk31;
mask = boot_cpu_data.fpu_msk31;
fcr31 = (value & ~mask) | (fcr31 & mask);
break;

View File

@ -495,7 +495,7 @@ static void r4k_tlb_configure(void)
if (cpu_has_rixi) {
/*
* Enable the no read, no exec bits, and enable large virtual
* Enable the no read, no exec bits, and enable large physical
* address.
*/
#ifdef CONFIG_64BIT

View File

@ -130,9 +130,9 @@ struct platform_device ip32_rtc_device = {
.resource = ip32_rtc_resources,
};
+static int __init sgio2_rtc_devinit(void)
static __init int sgio2_rtc_devinit(void)
{
return platform_device_register(&ip32_rtc_device);
}
device_initcall(sgio2_cmos_devinit);
device_initcall(sgio2_rtc_devinit);

View File

@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
#define STACK_RND_MASK (is_32bit_task() ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk

View File

@ -181,9 +181,12 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
return 1;
}
/*
* Copy architecture-specific thread state
*/
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
@ -195,11 +198,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
extern void * const child_return;
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (!usp) /* idle thread */
return 0;
/* kernel thread */
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
@ -215,7 +217,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
#else
cregs->gr[26] = usp;
#endif
cregs->gr[25] = arg;
cregs->gr[25] = kthread_arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from

View File

@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
if (stack_base > STACK_SIZE_MAX)
stack_base = STACK_SIZE_MAX;
/* Add space for stack randomization. */
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
return PAGE_ALIGN(STACK_TOP - stack_base);
}

View File

@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t nip, uint64_t addr)
{
uint64_t srr1;
int index = __this_cpu_inc_return(mce_nest_count);
int index = __this_cpu_inc_return(mce_nest_count) - 1;
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
/*
@ -184,7 +184,7 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
index = __this_cpu_inc_return(mce_queue_count);
index = __this_cpu_inc_return(mce_queue_count) - 1;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
__this_cpu_dec(mce_queue_count);

View File

@ -213,6 +213,7 @@ SECTIONS
*(.opd)
}
. = ALIGN(256);
.got : AT(ADDR(.got) - LOAD_OFFSET) {
__toc_start = .;
#ifndef CONFIG_RELOCATABLE

View File

@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
*/
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu;
struct kvm_vcpu *vcpu, *vnext;
int i;
int srcu_idx;
@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
if ((threads_per_core > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
vcpu->arch.ret = -EBUSY;
kvmppc_remove_runnable(vc, vcpu);
wake_up(&vcpu->arch.cpu_run);

View File

@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
pte_t *ptep;
struct page *page;
pte_t *ptep, pte;
unsigned shift;
unsigned long mask, flags;
struct page *page = ERR_PTR(-EINVAL);
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
if (!ptep)
goto no_page;
pte = READ_ONCE(*ptep);
/*
* Verify it is a huge page else bail.
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
goto no_page;
/* Verify it is a huge page else bail. */
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
local_irq_restore(flags);
return ERR_PTR(-EINVAL);
if (!pte_present(pte)) {
page = NULL;
goto no_page;
}
mask = (1UL << shift) - 1;
page = pte_page(*ptep);
page = pte_page(pte);
if (page)
page += (address & mask) / PAGE_SIZE;
no_page:
local_irq_restore(flags);
return page;
}

View File

@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
* hash fault look at them.
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
/*
* Serialize against find_linux_pte_or_hugepte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
* find_linux_pte_or_hugepage to finish.
*/
kick_all_cpus_sync();
return old_pmd;
}

View File

@ -16,11 +16,12 @@
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
u8 icv[16];
u8 key[16];
u8 key[GHASH_BLOCK_SIZE];
};
struct ghash_desc_ctx {
u8 icv[GHASH_BLOCK_SIZE];
u8 key[GHASH_BLOCK_SIZE];
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
@ -28,8 +29,10 @@ struct ghash_desc_ctx {
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
memset(dctx, 0, sizeof(*dctx));
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
return 0;
}
@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
}
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
return 0;
}
@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
unsigned int n;
u8 *buf = dctx->buffer;
int ret;
@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n;
@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
dctx->bytes = 0;
}
dctx->bytes = 0;
return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
int ret;
ret = ghash_flush(ctx, dctx);
ret = ghash_flush(dctx);
if (!ret)
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
return ret;
}

View File

@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
/* fill page with urandom bytes */
get_random_bytes(pg, PAGE_SIZE);
/* exor page with stckf values */
for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}

View File

@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}
static inline int pmd_pfn(pmd_t pmd)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
unsigned long origin_mask;

View File

@ -443,8 +443,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/*
* Compile one eBPF instruction into s390x code
*
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
* stack space for the large switch statement.
*/
static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
{
struct bpf_insn *insn = &fp->insnsi[i];
int jmp_off, last, insn_count = 1;
@ -588,8 +591,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9160000, dst_reg, rc_reg);
break;
}
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
@ -602,10 +605,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
/* llgfr %dst,%src (u32 cast) */
EMIT4(0xb9160000, dst_reg, src_reg);
/* dlgr %w0,%dst */
EMIT4(0xb9870000, REG_W0, dst_reg);
EMIT4(0xb9870000, REG_W0, src_reg);
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
@ -632,8 +633,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9160000, dst_reg, rc_reg);
break;
}
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
{
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
@ -649,7 +650,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
EMIT4(0xb9040000, REG_W1, dst_reg);
/* dlg %w0,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
EMIT_CONST_U64((u32) imm));
EMIT_CONST_U64(imm));
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;

View File

@ -207,6 +207,7 @@ union kvm_mmu_page_role {
unsigned nxe:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
};
};
@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_header_cache;
struct fpu guest_fpu;
bool eager_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
u32 guest_xstate_size;
@ -743,6 +745,7 @@ struct kvm_x86_ops {
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
void (*fpu_activate)(struct kvm_vcpu *vcpu);
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
void (*tlb_flush)(struct kvm_vcpu *vcpu);

View File

@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
[ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
[ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,

View File

@ -722,6 +722,7 @@ static int __init rapl_pmu_init(void)
break;
case 60: /* Haswell */
case 69: /* Haswell-Celeron */
case 61: /* Broadwell */
rapl_cntr_mask = RAPL_IDX_HSW;
rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
break;

View File

@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/xsave.h>
#include "cpuid.h"
@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
* address checks; exit if it is ever changed.

View File

@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
#endif

View File

@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
}
}
void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
{
unsigned bit, byte, pfec;
u8 map;
@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
struct kvm_mmu *context = &vcpu->arch.mmu;
MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
context->base_role.cr0_wp = is_write_protection(vcpu);
context->base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush, zap_page;
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
.cr0_wp = 1,
.cr4_pae = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
};
/*
* If we don't have indirect shadow pages, it means no page is
@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {

View File

@ -71,8 +71,6 @@ enum {
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool ept);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
int index = (pfec >> 1) +
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
WARN_ON(pfec & PFERR_RSVD_MASK);
return (mmu->permissions[index] >> pte_access) & 1;
}

View File

@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
mmu_is_nested(vcpu));
if (likely(r != RET_MMIO_PF_INVALID))
return r;
/*
* page fault with PFEC.RSVD = 1 is caused by shadow
* page fault, should not be used to walk guest page
* table.
*/
error_code &= ~PFERR_RSVD_MASK;
};
r = mmu_topup_memory_caches(vcpu);

View File

@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
.fpu_activate = svm_fpu_activate,
.fpu_deactivate = svm_fpu_deactivate,
.tlb_flush = svm_flush_tlb,

View File

@ -10185,6 +10185,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
.fpu_activate = vmx_fpu_activate,
.fpu_deactivate = vmx_fpu_deactivate,
.tlb_flush = vmx_flush_tlb,

View File

@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
X86_CR4_PAE | X86_CR4_SMEP;
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP | X86_CR4_SMAP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
@ -6197,6 +6195,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
return;
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page))
return;
kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
/*
@ -7060,7 +7060,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fpu_save_init(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
if (!vcpu->arch.eager_fpu)
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
trace_kvm_fpu(0);
}
@ -7076,11 +7078,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n");
return kvm_x86_ops->vcpu_create(kvm, id);
vcpu = kvm_x86_ops->vcpu_create(kvm, id);
/*
* Activate fpu unconditionally in case the guest needs eager FPU. It will be
* deactivated soon if it doesn't.
*/
kvm_x86_ops->fpu_activate(vcpu);
return vcpu;
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)

View File

@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (is_ereg(dst_reg))
EMIT1(0x41);
EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
/* emit 'movzwl eax, ax' */
if (is_ereg(dst_reg))
EMIT3(0x45, 0x0F, 0xB7);
else
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
break;
case 32:
/* emit 'bswap eax' to swap lower 4 bytes */
@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm32) {
case 16:
/* emit 'movzwl eax, ax' to zero extend 16-bit
* into 64 bit
*/
if (is_ereg(dst_reg))
EMIT3(0x45, 0x0F, 0xB7);
else
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
break;
case 32:
/* emit 'mov eax, eax' to clear upper 32-bits */
if (is_ereg(dst_reg))
EMIT1(0x45);
EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
break;
case 64:
/* nop */
break;
}
break;
/* ST: *(u8*)(dst_reg + off) = imm */

View File

@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi
HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@

View File

@ -734,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
EXPORT_SYMBOL(blk_init_queue_node);
static void blk_queue_bio(struct request_queue *q, struct bio *bio);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
@ -1578,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
void blk_queue_bio(struct request_queue *q, struct bio *bio)
static void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
@ -1686,7 +1688,6 @@ out_unlock:
spin_unlock_irq(q->queue_lock);
}
}
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
/*
* If bio->bi_dev is a partition, remap the location

View File

@ -33,7 +33,7 @@ struct aead_ctx {
/*
* RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
* can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
* bytes
* pages
*/
#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
if (err < 0)
goto unlock;
usedpages += err;
/* chain the new scatterlist with initial list */
/* chain the new scatterlist with previous one */
if (cnt)
scatterwalk_crypto_chain(ctx->rsgl[0].sg,
ctx->rsgl[cnt].sg, 1,
sg_nents(ctx->rsgl[cnt-1].sg));
af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;

View File

@ -102,19 +102,12 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_TZ_", ACPI_TYPE_DEVICE, NULL},
/*
* March, 2015:
* The _REV object is in the process of being deprecated, because
* other ACPI implementations permanently return 2. Thus, it
* has little or no value. Return 2 for compatibility with
* other ACPI implementations.
*/
{"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
{"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
{"_OSI", ACPI_TYPE_METHOD, (char *)1},
#endif
/* Table terminator */

View File

@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
static void __init acpi_reserve_resources(void)
{
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
"ACPI PM1a_EVT_BLK");
@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
return 0;
}
device_initcall(acpi_reserve_resources);
void acpi_os_printf(const char *fmt, ...)
{
@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
acpi_reserve_resources();
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);

View File

@ -270,6 +270,7 @@ config ATA_PIIX
config SATA_DWC
tristate "DesignWare Cores SATA support"
depends on 460EX
select DW_DMAC
help
This option enables support for the on-chip SATA controller of the
AppliedMicro processor 460EX.
@ -729,15 +730,6 @@ config PATA_SC1200
If unsure, say N.
config PATA_SCC
tristate "Toshiba's Cell Reference Set IDE support"
depends on PCI && PPC_CELLEB
help
This option enables support for the built-in IDE controller on
Toshiba Cell Reference Board.
If unsure, say N.
config PATA_SCH
tristate "Intel SCH PATA support"
depends on PCI

View File

@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
obj-$(CONFIG_PATA_RDC) += pata_rdc.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
obj-$(CONFIG_PATA_SCC) += pata_scc.o
obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o

View File

@ -66,6 +66,7 @@ enum board_ids {
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
board_ahci_mcp89,
@ -84,6 +85,8 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
/* by features */
[board_ahci] = {
@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
[board_ahci_avn] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_avn_ops,
},
[board_ahci_mcp65] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
AHCI_HFLAG_YES_NCQ),
@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
/*
* ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
*
* It has been observed with some SSDs that the timing of events in the
* link synchronization phase can leave the port in a state that can not
* be recovered by a SATA-hard-reset alone. The failing signature is
* SStatus.DET stuck at 1 ("Device presence detected but Phy
* communication not established"). It was found that unloading and
* reloading the driver when this problem occurs allows the drive
* connection to be recovered (DET advanced to 0x3). The critical
* component of reloading the driver is that the port state machines are
* reset by bouncing "port enable" in the AHCI PCS configuration
* register. So, reproduce that effect by bouncing a port whenever we
* see DET==1 after a reset.
*/
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
unsigned long tmo = deadline - jiffies;
struct ata_taskfile tf;
bool online;
int rc, i;
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
for (i = 0; i < 2; i++) {
u16 val;
u32 sstatus;
int port = ap->port_no;
struct ata_host *host = ap->host;
struct pci_dev *pdev = to_pci_dev(host->dev);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
(sstatus & 0xf) != 1)
break;
ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
port);
pci_read_config_word(pdev, 0x92, &val);
val &= ~(1 << port);
pci_write_config_word(pdev, 0x92, val);
ata_msleep(ap, 1000);
val |= 1 << port;
pci_write_config_word(pdev, 0x92, val);
deadline += tmo;
}
hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
#ifdef CONFIG_PM
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{

View File

@ -37,7 +37,6 @@ struct st_ahci_drv_data {
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
struct ahci_host_priv *hpriv;
};
static void st_ahci_configure_oob(void __iomem *mmio)
@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio)
writel(new_val, mmio + ST_AHCI_OOBR);
}
static int st_ahci_deassert_resets(struct device *dev)
static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
if (drv_data->pwr) {
@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev)
static void st_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
struct device *dev = host->dev;
struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
int err;
if (drv_data->pwr) {
@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
static int st_ahci_probe_resets(struct platform_device *pdev)
static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
if (IS_ERR(drv_data->pwr)) {
dev_info(&pdev->dev, "power reset control not defined\n");
dev_info(dev, "power reset control not defined\n");
drv_data->pwr = NULL;
}
drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
if (IS_ERR(drv_data->sw_rst)) {
dev_info(&pdev->dev, "soft reset control not defined\n");
dev_info(dev, "soft reset control not defined\n");
drv_data->sw_rst = NULL;
}
drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
if (IS_ERR(drv_data->pwr_rst)) {
dev_dbg(&pdev->dev, "power soft reset control not defined\n");
dev_dbg(dev, "power soft reset control not defined\n");
drv_data->pwr_rst = NULL;
}
return st_ahci_deassert_resets(&pdev->dev);
return st_ahci_deassert_resets(hpriv, dev);
}
static struct ata_port_operations st_ahci_port_ops = {
@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev)
if (!drv_data)
return -ENOMEM;
platform_set_drvdata(pdev, drv_data);
hpriv = ahci_platform_get_resources(pdev);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = drv_data;
drv_data->hpriv = hpriv;
err = st_ahci_probe_resets(pdev);
err = st_ahci_probe_resets(hpriv, &pdev->dev);
if (err)
return err;
@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev)
if (err)
return err;
st_ahci_configure_oob(drv_data->hpriv->mmio);
st_ahci_configure_oob(hpriv->mmio);
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = drv_data->hpriv;
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
err = ahci_platform_suspend_host(dev);
@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev)
static int st_ahci_resume(struct device *dev)
{
struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = drv_data->hpriv;
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int err;
err = ahci_platform_enable_resources(hpriv);
if (err)
return err;
err = st_ahci_deassert_resets(dev);
err = st_ahci_deassert_resets(hpriv, dev);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
}
st_ahci_configure_oob(drv_data->hpriv->mmio);
st_ahci_configure_oob(hpriv->mmio);
return ahci_platform_resume_host(dev);
}

View File

@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
/* if LPM is enabled, PHYRDY doesn't mean anything */
if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
if (sata_lpm_ignore_phy_events(&ap->link)) {
status &= ~PORT_IRQ_PHYRDY;
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
}

View File

@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
return tmp;
}
/**
* sata_lpm_ignore_phy_events - test if PHY event should be ignored
* @link: Link receiving the event
*
* Test whether the received PHY event has to be ignored or not.
*
* LOCKING:
* None:
*
* RETURNS:
* True if the event has to be ignored.
*/
bool sata_lpm_ignore_phy_events(struct ata_link *link)
{
unsigned long lpm_timeout = link->last_lpm_change +
msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
/* if LPM is enabled, PHYRDY doesn't mean anything */
if (link->lpm_policy > ATA_LPM_MAX_POWER)
return true;
/* ignore the first PHY event after the LPM policy changed
* as it is might be spurious
*/
if ((link->flags & ATA_LFLAG_CHANGED) &&
time_before(jiffies, lpm_timeout))
return true;
return false;
}
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
/*
* Dummy port_ops
*/

View File

@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
}
}
link->last_lpm_change = jiffies;
link->flags |= ATA_LFLAG_CHANGED;
return 0;
fail:

File diff suppressed because it is too large Load Diff

View File

@ -2257,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
page_code = GET_INQ_PAGE_CODE(cmd);
alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
inq_response = kmalloc(alloc_len, GFP_KERNEL);
inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
GFP_KERNEL);
if (inq_response == NULL) {
res = -ENOMEM;
goto out_mem;

View File

@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0cf3, 0xe003) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0CF3, 0xE006) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },

View File

@ -227,7 +227,6 @@ static void bt3c_receive(struct bt3c_info *info)
iobase = info->p_dev->resource[0]->start;
avail = bt3c_read(iobase, 0x7006);
//printk("bt3c_cs: receiving %d bytes\n", avail);
bt3c_address(iobase, 0x7480);
while (size < avail) {
@ -250,7 +249,6 @@ static void bt3c_receive(struct bt3c_info *info)
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
inb(iobase + DATA_H);
//printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
switch (bt_cb(info->rx_skb)->pkt_type) {
@ -364,7 +362,6 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
if (stat & 0x0001)
bt3c_receive(info);
if (stat & 0x0002) {
//BT_ERR("Ack (stat=0x%04x)", stat);
clear_bit(XMIT_SENDING, &(info->tx_state));
bt3c_write_wakeup(info);
}

View File

@ -95,6 +95,78 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
{
const struct hci_command_hdr *cmd;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
struct sk_buff *skb;
u16 opcode;
int err;
err = request_firmware(&fw, firmware, &hdev->dev);
if (err < 0) {
BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware);
return err;
}
/* Start Download */
skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
hdev->name, err);
goto done;
}
kfree_skb(skb);
/* 50 msec delay after Download Minidrv completes */
msleep(50);
fw_ptr = fw->data;
fw_size = fw->size;
while (fw_size >= sizeof(*cmd)) {
const u8 *cmd_param;
cmd = (struct hci_command_hdr *)fw_ptr;
fw_ptr += sizeof(*cmd);
fw_size -= sizeof(*cmd);
if (fw_size < cmd->plen) {
BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name,
firmware);
err = -EINVAL;
goto done;
}
cmd_param = fw_ptr;
fw_ptr += cmd->plen;
fw_size -= cmd->plen;
opcode = le16_to_cpu(cmd->opcode);
skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: BCM: Patch command %04x failed (%d)",
hdev->name, opcode, err);
goto done;
}
kfree_skb(skb);
}
/* 250 msec delay after Launch Ram completes */
msleep(250);
done:
release_firmware(fw);
return err;
}
EXPORT_SYMBOL(btbcm_patchram);
static int btbcm_reset(struct hci_dev *hdev)
{
struct sk_buff *skb;
@ -198,12 +270,8 @@ static const struct {
int btbcm_setup_patchram(struct hci_dev *hdev)
{
const struct hci_command_hdr *cmd;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
char fw_name[64];
u16 opcode, subver, rev, pid, vid;
u16 subver, rev, pid, vid;
const char *hw_name = NULL;
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
@ -273,74 +341,19 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
hw_name ? : "BCM", (subver & 0x7000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
err = btbcm_patchram(hdev, fw_name);
if (err == -ENOENT)
return 0;
}
/* Start Download */
skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
hdev->name, err);
goto reset;
}
kfree_skb(skb);
/* 50 msec delay after Download Minidrv completes */
msleep(50);
fw_ptr = fw->data;
fw_size = fw->size;
while (fw_size >= sizeof(*cmd)) {
const u8 *cmd_param;
cmd = (struct hci_command_hdr *)fw_ptr;
fw_ptr += sizeof(*cmd);
fw_size -= sizeof(*cmd);
if (fw_size < cmd->plen) {
BT_ERR("%s: BCM: patch %s is corrupted", hdev->name,
fw_name);
err = -EINVAL;
goto reset;
}
cmd_param = fw_ptr;
fw_ptr += cmd->plen;
fw_size -= cmd->plen;
opcode = le16_to_cpu(cmd->opcode);
skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: BCM: patch command %04x failed (%d)",
hdev->name, opcode, err);
goto reset;
}
kfree_skb(skb);
}
/* 250 msec delay after Launch Ram completes */
msleep(250);
reset:
/* Reset */
err = btbcm_reset(hdev);
if (err)
goto done;
return err;
/* Read Local Version Info */
skb = btbcm_read_local_version(hdev);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
goto done;
}
if (IS_ERR(skb))
return PTR_ERR(skb);
ver = (struct hci_rp_read_local_version *)skb->data;
rev = le16_to_cpu(ver->hci_rev);
@ -355,10 +368,7 @@ reset:
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
done:
release_firmware(fw);
return err;
return 0;
}
EXPORT_SYMBOL_GPL(btbcm_setup_patchram);

View File

@ -25,6 +25,7 @@
int btbcm_check_bdaddr(struct hci_dev *hdev);
int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btbcm_patchram(struct hci_dev *hdev, const char *firmware);
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
@ -41,6 +42,11 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
{
return -EOPNOTSUPP;
}
static inline int btbcm_setup_patchram(struct hci_dev *hdev)
{
return 0;

View File

@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@ -57,6 +58,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_AMP 0x4000
#define BTUSB_QCA_ROME 0x8000
#define BTUSB_BCM_APPLE 0x10000
#define BTUSB_REALTEK 0x20000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@ -184,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@ -200,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
@ -216,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@ -288,6 +293,28 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723BE Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
{ } /* Terminating entry */
};
@ -892,7 +919,7 @@ static int btusb_open(struct hci_dev *hdev)
*/
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err <0)
if (err < 0)
return err;
}
@ -1345,6 +1372,378 @@ static int btusb_setup_csr(struct hci_dev *hdev)
return ret;
}
#define RTL_FRAG_LEN 252
struct rtl_download_cmd {
__u8 index;
__u8 data[RTL_FRAG_LEN];
} __packed;
struct rtl_download_response {
__u8 status;
__u8 index;
} __packed;
struct rtl_rom_version_evt {
__u8 status;
__u8 version;
} __packed;
struct rtl_epatch_header {
__u8 signature[8];
__le32 fw_version;
__le16 num_patches;
} __packed;
#define RTL_EPATCH_SIGNATURE "Realtech"
#define RTL_ROM_LMP_3499 0x3499
#define RTL_ROM_LMP_8723A 0x1200
#define RTL_ROM_LMP_8723B 0x8723
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
struct rtl_rom_version_evt *rom_version;
struct sk_buff *skb;
int ret;
/* Read RTL ROM version command */
skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Read ROM version failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*rom_version)) {
BT_ERR("%s: RTL version event length mismatch", hdev->name);
kfree_skb(skb);
return -EIO;
}
rom_version = (struct rtl_rom_version_evt *)skb->data;
BT_INFO("%s: rom_version status=%x version=%x",
hdev->name, rom_version->status, rom_version->version);
ret = rom_version->status;
if (ret == 0)
*version = rom_version->version;
kfree_skb(skb);
return ret;
}
static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
const struct firmware *fw,
unsigned char **_buf)
{
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
int i, ret, len;
size_t min_size;
u8 opcode, length, data, rom_version = 0;
int project_id = -1;
const unsigned char *fwptr, *chip_id_base;
const unsigned char *patch_length_base, *patch_offset_base;
u32 patch_offset = 0;
u16 patch_length, num_patches;
const u16 project_id_to_lmp_subver[] = {
RTL_ROM_LMP_8723A,
RTL_ROM_LMP_8723B,
RTL_ROM_LMP_8821A,
RTL_ROM_LMP_8761A
};
ret = rtl_read_rom_version(hdev, &rom_version);
if (ret)
return -bt_to_errno(ret);
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
if (fw->size < min_size)
return -EINVAL;
fwptr = fw->data + fw->size - sizeof(extension_sig);
if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
BT_ERR("%s: extension section signature mismatch", hdev->name);
return -EINVAL;
}
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
* hardware supported by this firwmare file.
* Once we have that, we double-check that that project_id is suitable
* for the hardware we are working with.
*/
while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
opcode = *--fwptr;
length = *--fwptr;
data = *--fwptr;
BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
if (opcode == 0xff) /* EOF */
break;
if (length == 0) {
BT_ERR("%s: found instruction with length 0",
hdev->name);
return -EINVAL;
}
if (opcode == 0 && length == 1) {
project_id = data;
break;
}
fwptr -= length;
}
if (project_id < 0) {
BT_ERR("%s: failed to find version instruction", hdev->name);
return -EINVAL;
}
if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
BT_ERR("%s: unknown project id %d", hdev->name, project_id);
return -EINVAL;
}
if (lmp_subver != project_id_to_lmp_subver[project_id]) {
BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
project_id_to_lmp_subver[project_id], lmp_subver);
return -EINVAL;
}
epatch_info = (struct rtl_epatch_header *)fw->data;
if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
BT_ERR("%s: bad EPATCH signature", hdev->name);
return -EINVAL;
}
num_patches = le16_to_cpu(epatch_info->num_patches);
BT_DBG("fw_version=%x, num_patches=%d",
le32_to_cpu(epatch_info->fw_version), num_patches);
/* After the rtl_epatch_header there is a funky patch metadata section.
* Assuming 2 patches, the layout is:
* ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
*
* Find the right patch for this chip.
*/
min_size += 8 * num_patches;
if (fw->size < min_size)
return -EINVAL;
chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
for (i = 0; i < num_patches; i++) {
u16 chip_id = get_unaligned_le16(chip_id_base +
(i * sizeof(u16)));
if (chip_id == rom_version + 1) {
patch_length = get_unaligned_le16(patch_length_base +
(i * sizeof(u16)));
patch_offset = get_unaligned_le32(patch_offset_base +
(i * sizeof(u32)));
break;
}
}
if (!patch_offset) {
BT_ERR("%s: didn't find patch for chip id %d",
hdev->name, rom_version);
return -EINVAL;
}
BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
min_size = patch_offset + patch_length;
if (fw->size < min_size)
return -EINVAL;
/* Copy the firmware into a new buffer and write the version at
* the end.
*/
len = patch_length;
buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf;
return len;
}
static int rtl_download_firmware(struct hci_dev *hdev,
const unsigned char *data, int fw_len)
{
struct rtl_download_cmd *dl_cmd;
int frag_num = fw_len / RTL_FRAG_LEN + 1;
int frag_len = RTL_FRAG_LEN;
int ret = 0;
int i;
dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
if (!dl_cmd)
return -ENOMEM;
for (i = 0; i < frag_num; i++) {
struct rtl_download_response *dl_resp;
struct sk_buff *skb;
BT_DBG("download fw (%d/%d)", i, frag_num);
dl_cmd->index = i;
if (i == (frag_num - 1)) {
dl_cmd->index |= 0x80; /* data end */
frag_len = fw_len % RTL_FRAG_LEN;
}
memcpy(dl_cmd->data, data, frag_len);
/* Send download command */
skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: download fw command failed (%ld)",
hdev->name, PTR_ERR(skb));
ret = -PTR_ERR(skb);
goto out;
}
if (skb->len != sizeof(*dl_resp)) {
BT_ERR("%s: download fw event length mismatch",
hdev->name);
kfree_skb(skb);
ret = -EIO;
goto out;
}
dl_resp = (struct rtl_download_response *)skb->data;
if (dl_resp->status != 0) {
kfree_skb(skb);
ret = bt_to_errno(dl_resp->status);
goto out;
}
kfree_skb(skb);
data += RTL_FRAG_LEN;
}
out:
kfree(dl_cmd);
return ret;
}
static int btusb_setup_rtl8723a(struct hci_dev *hdev)
{
struct btusb_data *data = dev_get_drvdata(&hdev->dev);
struct usb_device *udev = interface_to_usbdev(data->intf);
const struct firmware *fw;
int ret;
BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
return ret;
}
if (fw->size < 8) {
ret = -EINVAL;
goto out;
}
/* Check that the firmware doesn't have the epatch signature
* (which is only for RTL8723B and newer).
*/
if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
ret = -EINVAL;
goto out;
}
ret = rtl_download_firmware(hdev, fw->data, fw->size);
out:
release_firmware(fw);
return ret;
}
static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
const char *fw_name)
{
struct btusb_data *data = dev_get_drvdata(&hdev->dev);
struct usb_device *udev = interface_to_usbdev(data->intf);
unsigned char *fw_data = NULL;
const struct firmware *fw;
int ret;
BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
ret = request_firmware(&fw, fw_name, &udev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
return ret;
}
ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0)
goto out;
ret = rtl_download_firmware(hdev, fw_data, ret);
kfree(fw_data);
if (ret < 0)
goto out;
out:
release_firmware(fw);
return ret;
}
static int btusb_setup_realtek(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
u16 lmp_subver;
skb = btusb_read_local_version(hdev);
if (IS_ERR(skb))
return -PTR_ERR(skb);
resp = (struct hci_rp_read_local_version *)skb->data;
BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
"lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
resp->lmp_ver, resp->lmp_subver);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
/* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
switch (lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
return btusb_setup_rtl8723a(hdev);
case RTL_ROM_LMP_8723B:
return btusb_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8723b_fw.bin");
case RTL_ROM_LMP_8821A:
return btusb_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8821a_fw.bin");
case RTL_ROM_LMP_8761A:
return btusb_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8761a_fw.bin");
default:
BT_INFO("rtl: assuming no firmware upload needed.");
return 0;
}
}
static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@ -2577,7 +2976,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
int i, err;
err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
sizeof(ver));
sizeof(ver));
if (err < 0)
return err;
@ -2776,6 +3175,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
}
if (id->driver_info & BTUSB_REALTEK)
hdev->setup = btusb_setup_realtek;
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;

View File

@ -95,7 +95,6 @@ static void ath_hci_uart_work(struct work_struct *work)
hci_uart_tx_wakeup(hu);
}
/* Initialize protocol */
static int ath_open(struct hci_uart *hu)
{
struct ath_struct *ath;
@ -116,19 +115,6 @@ static int ath_open(struct hci_uart *hu)
return 0;
}
/* Flush protocol data */
static int ath_flush(struct hci_uart *hu)
{
struct ath_struct *ath = hu->priv;
BT_DBG("hu %p", hu);
skb_queue_purge(&ath->txq);
return 0;
}
/* Close protocol */
static int ath_close(struct hci_uart *hu)
{
struct ath_struct *ath = hu->priv;
@ -147,9 +133,73 @@ static int ath_close(struct hci_uart *hu)
return 0;
}
static int ath_flush(struct hci_uart *hu)
{
struct ath_struct *ath = hu->priv;
BT_DBG("hu %p", hu);
skb_queue_purge(&ath->txq);
return 0;
}
static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
u8 buf[10];
int err;
buf[0] = 0x01;
buf[1] = 0x01;
buf[2] = 0x00;
buf[3] = sizeof(bdaddr_t);
memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Change address command failed (%d)",
hdev->name, err);
return err;
}
kfree_skb(skb);
return 0;
}
static int ath_setup(struct hci_uart *hu)
{
BT_DBG("hu %p", hu);
hu->hdev->set_bdaddr = ath_set_bdaddr;
return 0;
}
static const struct h4_recv_pkt ath_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
};
static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
return err;
}
return count;
}
#define HCI_OP_ATH_SLEEP 0xFC04
/* Enqueue frame for transmittion */
static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct ath_struct *ath = hu->priv;
@ -159,8 +209,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
return 0;
}
/*
* Update power management enable flag with parameters of
/* Update power management enable flag with parameters of
* HCI sleep enable vendor specific HCI command.
*/
if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
@ -190,37 +239,16 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
return skb_dequeue(&ath->txq);
}
static const struct h4_recv_pkt ath_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
};
/* Recv data */
static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
return err;
}
return count;
}
static const struct hci_uart_proto athp = {
.id = HCI_UART_ATH3K,
.name = "ATH3K",
.open = ath_open,
.close = ath_close,
.flush = ath_flush,
.setup = ath_setup,
.recv = ath_recv,
.enqueue = ath_enqueue,
.dequeue = ath_dequeue,
.flush = ath_flush,
};
int __init ath_init(void)

Some files were not shown because too many files have changed in this diff Show More