Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/cadence/macb.c drivers/net/phy/phy.c include/linux/skbuff.h net/ipv4/tcp.c net/switchdev/switchdev.c Switchdev was a case of RTNH_H_{EXTERNAL --> OFFLOAD} renaming overlapping with net-next changes of various sorts. phy.c was a case of two changes, one adding a local variable to a function whilst the second was removing one. tcp.c overlapped a deadlock fix with the addition of new tcp_info statistic values. macb.c involved the addition of two zyncq device entries. skbuff.h involved adding back ipv4_daddr to nf_bridge_info whilst net-next changes put two other existing members of that struct into a union. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
36583eb54d
|
@ -17,7 +17,8 @@ Required properties:
|
|||
- #clock-cells: from common clock binding; shall be set to 1.
|
||||
- clocks: from common clock binding; list of parent clock
|
||||
handles, shall be xtal reference clock or xtal and clkin for
|
||||
si5351c only.
|
||||
si5351c only. Corresponding clock input names are "xtal" and
|
||||
"clkin" respectively.
|
||||
- #address-cells: shall be set to 1.
|
||||
- #size-cells: shall be set to 0.
|
||||
|
||||
|
@ -71,6 +72,7 @@ i2c-master-node {
|
|||
|
||||
/* connect xtal input to 25MHz reference */
|
||||
clocks = <&ref25>;
|
||||
clock-names = "xtal";
|
||||
|
||||
/* connect xtal input as source of pll0 and pll1 */
|
||||
silabs,pll-source = <0 0>, <1 0>;
|
||||
|
|
|
@ -8,8 +8,8 @@ Required properties:
|
|||
is not Linux-only, but in case of Linux, see the "m25p_ids"
|
||||
table in drivers/mtd/devices/m25p80.c for the list of supported
|
||||
chips.
|
||||
Must also include "nor-jedec" for any SPI NOR flash that can be
|
||||
identified by the JEDEC READ ID opcode (0x9F).
|
||||
Must also include "jedec,spi-nor" for any SPI NOR flash that can
|
||||
be identified by the JEDEC READ ID opcode (0x9F).
|
||||
- reg : Chip-Select number
|
||||
- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
|
||||
|
||||
|
@ -25,7 +25,7 @@ Example:
|
|||
flash: m25p80@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "spansion,m25p80", "nor-jedec";
|
||||
compatible = "spansion,m25p80", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <40000000>;
|
||||
m25p,fast-read;
|
|
@ -3,7 +3,8 @@
|
|||
Required properties:
|
||||
- compatible: Should be "cdns,[<chip>-]{emac}"
|
||||
Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
|
||||
or the generic form: "cdns,emac".
|
||||
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
|
||||
Or the generic form: "cdns,emac".
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain macb interrupt
|
||||
- phy-mode: see ethernet.txt file in the same directory.
|
||||
|
|
|
@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write
|
|||
|
||||
TTY_OTHER_CLOSED Device is a pty and the other side has closed.
|
||||
|
||||
TTY_OTHER_DONE Device is a pty and the other side has closed and
|
||||
all pending input processing has been completed.
|
||||
|
||||
TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
|
||||
smaller chunks.
|
||||
|
||||
|
|
|
@ -169,6 +169,10 @@ Shadow pages contain the following information:
|
|||
Contains the value of cr4.smep && !cr0.wp for which the page is valid
|
||||
(pages for which this is true are different from other pages; see the
|
||||
treatment of cr0.wp=0 below).
|
||||
role.smap_andnot_wp:
|
||||
Contains the value of cr4.smap && !cr0.wp for which the page is valid
|
||||
(pages for which this is true are different from other pages; see the
|
||||
treatment of cr0.wp=0 below).
|
||||
gfn:
|
||||
Either the guest page table containing the translations shadowed by this
|
||||
page, or the base page frame for linear translations. See role.direct.
|
||||
|
@ -344,10 +348,16 @@ on fault type:
|
|||
|
||||
(user write faults generate a #PF)
|
||||
|
||||
In the first case there is an additional complication if CR4.SMEP is
|
||||
enabled: since we've turned the page into a kernel page, the kernel may now
|
||||
execute it. We handle this by also setting spte.nx. If we get a user
|
||||
fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
|
||||
In the first case there are two additional complications:
|
||||
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
|
||||
the kernel may now execute it. We handle this by also setting spte.nx.
|
||||
If we get a user fetch or read fault, we'll change spte.u=1 and
|
||||
spte.nx=gpte.nx back.
|
||||
- if CR4.SMAP is disabled: since the page has been changed to a kernel
|
||||
page, it can not be reused when CR4.SMAP is enabled. We set
|
||||
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
|
||||
here we do not care the case that CR4.SMAP is enabled since KVM will
|
||||
directly inject #PF to guest due to failed permission check.
|
||||
|
||||
To prevent an spte that was converted into a kernel page with cr0.wp=0
|
||||
from being written by the kernel after cr0.wp has changed to 1, we make
|
||||
|
|
25
MAINTAINERS
25
MAINTAINERS
|
@ -973,7 +973,7 @@ S: Maintained
|
|||
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
|
||||
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
T: git git://git.berlios.de/gemini-board
|
||||
T: git git://github.com/ulli-kroll/linux.git
|
||||
S: Maintained
|
||||
F: arch/arm/mach-gemini/
|
||||
|
||||
|
@ -1192,7 +1192,7 @@ ARM/MAGICIAN MACHINE SUPPORT
|
|||
M: Philipp Zabel <philipp.zabel@gmail.com>
|
||||
S: Maintained
|
||||
|
||||
ARM/Marvell Armada 370 and Armada XP SOC support
|
||||
ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Gregory Clement <gregory.clement@free-electrons.com>
|
||||
|
@ -1201,12 +1201,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
|||
S: Maintained
|
||||
F: arch/arm/mach-mvebu/
|
||||
F: drivers/rtc/rtc-armada38x.c
|
||||
F: arch/arm/boot/dts/armada*
|
||||
F: arch/arm/boot/dts/kirkwood*
|
||||
|
||||
|
||||
ARM/Marvell Berlin SoC support
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-berlin/
|
||||
F: arch/arm/boot/dts/berlin*
|
||||
|
||||
|
||||
ARM/Marvell Dove/MV78xx0/Orion SOC support
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
|
@ -1219,6 +1224,9 @@ F: arch/arm/mach-dove/
|
|||
F: arch/arm/mach-mv78xx0/
|
||||
F: arch/arm/mach-orion5x/
|
||||
F: arch/arm/plat-orion/
|
||||
F: arch/arm/boot/dts/dove*
|
||||
F: arch/arm/boot/dts/orion5x*
|
||||
|
||||
|
||||
ARM/Orion SoC/Technologic Systems TS-78xx platform support
|
||||
M: Alexander Clouter <alex@digriz.org.uk>
|
||||
|
@ -1370,6 +1378,7 @@ N: rockchip
|
|||
|
||||
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
||||
M: Kukjin Kim <kgene@kernel.org>
|
||||
M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -1934,7 +1943,7 @@ S: Maintained
|
|||
F: drivers/net/wireless/b43legacy/
|
||||
|
||||
BACKLIGHT CLASS/SUBSYSTEM
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
M: Lee Jones <lee.jones@linaro.org>
|
||||
S: Maintained
|
||||
F: drivers/video/backlight/
|
||||
|
@ -3917,7 +3926,7 @@ F: drivers/extcon/
|
|||
F: Documentation/extcon/
|
||||
|
||||
EXYNOS DP DRIVER
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/exynos/exynos_dp*
|
||||
|
@ -4526,7 +4535,7 @@ M: Jean Delvare <jdelvare@suse.de>
|
|||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
W: http://www.lm-sensors.org/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
||||
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/
|
||||
|
@ -7556,7 +7565,7 @@ S: Maintained
|
|||
F: drivers/pci/host/*rcar*
|
||||
|
||||
PCI DRIVER FOR SAMSUNG EXYNOS
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||
|
@ -7564,7 +7573,7 @@ S: Maintained
|
|||
F: drivers/pci/host/pci-exynos.c
|
||||
|
||||
PCI DRIVER FOR SYNOPSIS DESIGNWARE
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pci/host/*designware*
|
||||
|
@ -8520,7 +8529,7 @@ S: Supported
|
|||
F: sound/soc/samsung/
|
||||
|
||||
SAMSUNG FRAMEBUFFER DRIVER
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
M: Jingoo Han <jingoohan1@gmail.com>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/video/fbdev/s3c-fb.c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -2,19 +2,6 @@ menu "Kernel hacking"
|
|||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config EARLY_PRINTK
|
||||
bool "Early printk" if EMBEDDED
|
||||
default y
|
||||
help
|
||||
Write kernel log output directly into the VGA buffer or to a serial
|
||||
port.
|
||||
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
early before the console code is initialized. For normal operation
|
||||
it is not recommended because it looks ugly and doesn't cooperate
|
||||
with klogd/syslogd or the X server. You should normally N here,
|
||||
unless you want to debug such a crash.
|
||||
|
||||
config 16KSTACKS
|
||||
bool "Use 16Kb for kernel stacks instead of 8Kb"
|
||||
help
|
||||
|
|
|
@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
|||
atomic_ops_unlock(flags); \
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
|
|
|
@ -266,7 +266,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
|
|||
* Machine specific helpers for Entire D-Cache or Per Line ops
|
||||
*/
|
||||
|
||||
static unsigned int __before_dc_op(const int op)
|
||||
static inline unsigned int __before_dc_op(const int op)
|
||||
{
|
||||
unsigned int reg = reg;
|
||||
|
||||
|
@ -284,7 +284,7 @@ static unsigned int __before_dc_op(const int op)
|
|||
return reg;
|
||||
}
|
||||
|
||||
static void __after_dc_op(const int op, unsigned int reg)
|
||||
static inline void __after_dc_op(const int op, unsigned int reg)
|
||||
{
|
||||
if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
|
||||
while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
mainpll: mainpll {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <2000000000>;
|
||||
clock-frequency = <1000000000>;
|
||||
};
|
||||
/* 25 MHz reference crystal */
|
||||
refclk: oscillator {
|
||||
|
|
|
@ -585,7 +585,7 @@
|
|||
mainpll: mainpll {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <2000000000>;
|
||||
clock-frequency = <1000000000>;
|
||||
};
|
||||
|
||||
/* 25 MHz reference crystal */
|
||||
|
|
|
@ -502,7 +502,7 @@
|
|||
mainpll: mainpll {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <2000000000>;
|
||||
clock-frequency = <1000000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -87,6 +87,7 @@
|
|||
|
||||
/* connect xtal input to 25MHz reference */
|
||||
clocks = <&ref25>;
|
||||
clock-names = "xtal";
|
||||
|
||||
/* connect xtal input as source of pll0 and pll1 */
|
||||
silabs,pll-source = <0 0>, <1 0>;
|
||||
|
|
|
@ -711,6 +711,7 @@
|
|||
num-slots = <1>;
|
||||
broken-cd;
|
||||
cap-sdio-irq;
|
||||
keep-power-in-suspend;
|
||||
card-detect-delay = <200>;
|
||||
clock-frequency = <400000000>;
|
||||
samsung,dw-mshc-ciu-div = <1>;
|
||||
|
|
|
@ -674,6 +674,7 @@
|
|||
num-slots = <1>;
|
||||
broken-cd;
|
||||
cap-sdio-irq;
|
||||
keep-power-in-suspend;
|
||||
card-detect-delay = <200>;
|
||||
clock-frequency = <400000000>;
|
||||
samsung,dw-mshc-ciu-div = <1>;
|
||||
|
|
|
@ -826,7 +826,7 @@
|
|||
<&tegra_car TEGRA124_CLK_PLL_U>,
|
||||
<&tegra_car TEGRA124_CLK_USBD>;
|
||||
clock-names = "reg", "pll_u", "utmi-pads";
|
||||
resets = <&tegra_car 59>, <&tegra_car 22>;
|
||||
resets = <&tegra_car 22>, <&tegra_car 22>;
|
||||
reset-names = "usb", "utmi-pads";
|
||||
nvidia,hssync-start-delay = <0>;
|
||||
nvidia,idle-wait-delay = <17>;
|
||||
|
@ -838,6 +838,7 @@
|
|||
nvidia,hssquelch-level = <2>;
|
||||
nvidia,hsdiscon-level = <5>;
|
||||
nvidia,xcvr-hsslew = <12>;
|
||||
nvidia,has-utmi-pad-registers;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -862,7 +863,7 @@
|
|||
<&tegra_car TEGRA124_CLK_PLL_U>,
|
||||
<&tegra_car TEGRA124_CLK_USBD>;
|
||||
clock-names = "reg", "pll_u", "utmi-pads";
|
||||
resets = <&tegra_car 22>, <&tegra_car 22>;
|
||||
resets = <&tegra_car 58>, <&tegra_car 22>;
|
||||
reset-names = "usb", "utmi-pads";
|
||||
nvidia,hssync-start-delay = <0>;
|
||||
nvidia,idle-wait-delay = <17>;
|
||||
|
@ -874,7 +875,6 @@
|
|||
nvidia,hssquelch-level = <2>;
|
||||
nvidia,hsdiscon-level = <5>;
|
||||
nvidia,xcvr-hsslew = <12>;
|
||||
nvidia,has-utmi-pad-registers;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -899,7 +899,7 @@
|
|||
<&tegra_car TEGRA124_CLK_PLL_U>,
|
||||
<&tegra_car TEGRA124_CLK_USBD>;
|
||||
clock-names = "reg", "pll_u", "utmi-pads";
|
||||
resets = <&tegra_car 58>, <&tegra_car 22>;
|
||||
resets = <&tegra_car 59>, <&tegra_car 22>;
|
||||
reset-names = "usb", "utmi-pads";
|
||||
nvidia,hssync-start-delay = <0>;
|
||||
nvidia,idle-wait-delay = <17>;
|
||||
|
|
|
@ -191,6 +191,7 @@
|
|||
compatible = "arm,cortex-a15-pmu";
|
||||
interrupts = <0 68 4>,
|
||||
<0 69 4>;
|
||||
interrupt-affinity = <&cpu0>, <&cpu1>;
|
||||
};
|
||||
|
||||
oscclk6a: oscclk6a {
|
||||
|
|
|
@ -33,28 +33,28 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
cpu@0 {
|
||||
A9_0: cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
|
||||
cpu@1 {
|
||||
A9_1: cpu@1 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <1>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
|
||||
cpu@2 {
|
||||
A9_2: cpu@2 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <2>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
|
||||
cpu@3 {
|
||||
A9_3: cpu@3 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <3>;
|
||||
|
@ -170,6 +170,7 @@
|
|||
compatible = "arm,pl310-cache";
|
||||
reg = <0x1e00a000 0x1000>;
|
||||
interrupts = <0 43 4>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,data-latency = <1 1 1>;
|
||||
arm,tag-latency = <1 1 1>;
|
||||
|
@ -181,6 +182,8 @@
|
|||
<0 61 4>,
|
||||
<0 62 4>,
|
||||
<0 63 4>;
|
||||
interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>;
|
||||
|
||||
};
|
||||
|
||||
dcc {
|
||||
|
|
|
@ -193,7 +193,7 @@
|
|||
};
|
||||
|
||||
gem0: ethernet@e000b000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000b000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 22 4>;
|
||||
|
@ -204,7 +204,7 @@
|
|||
};
|
||||
|
||||
gem1: ethernet@e000c000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000c000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 45 4>;
|
||||
|
|
|
@ -159,6 +159,8 @@ extern void exynos_enter_aftr(void);
|
|||
|
||||
extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
|
||||
|
||||
extern void exynos_set_delayed_reset_assertion(bool enable);
|
||||
|
||||
extern void s5p_init_cpu(void __iomem *cpuid_addr);
|
||||
extern unsigned int samsung_rev(void);
|
||||
extern void __iomem *cpu_boot_reg_base(void);
|
||||
|
|
|
@ -166,6 +166,33 @@ static void __init exynos_init_io(void)
|
|||
exynos_map_io();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code
|
||||
* and suspend.
|
||||
*
|
||||
* This is necessary only on Exynos4 SoCs. When system is running
|
||||
* USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down
|
||||
* feature could properly detect global idle state when secondary CPU is
|
||||
* powered down.
|
||||
*
|
||||
* However this should not be set when such system is going into suspend.
|
||||
*/
|
||||
void exynos_set_delayed_reset_assertion(bool enable)
|
||||
{
|
||||
if (of_machine_is_compatible("samsung,exynos4")) {
|
||||
unsigned int tmp, core_id;
|
||||
|
||||
for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
|
||||
tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
|
||||
if (enable)
|
||||
tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
|
||||
else
|
||||
tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
|
||||
pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Apparently, these SoCs are not able to wake-up from suspend using
|
||||
* the PMU. Too bad. Should they suddenly become capable of such a
|
||||
|
|
|
@ -34,30 +34,6 @@
|
|||
|
||||
extern void exynos4_secondary_startup(void);
|
||||
|
||||
/*
|
||||
* Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
|
||||
* during hot-(un)plugging CPUx.
|
||||
*
|
||||
* The feature can be cleared safely during first boot of secondary CPU.
|
||||
*
|
||||
* Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
|
||||
* down a CPU so the CPU idle clock down feature could properly detect global
|
||||
* idle state when CPUx is off.
|
||||
*/
|
||||
static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
|
||||
{
|
||||
if (soc_is_exynos4()) {
|
||||
unsigned int tmp;
|
||||
|
||||
tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
|
||||
if (enable)
|
||||
tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
|
||||
else
|
||||
tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
|
||||
pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static inline void cpu_leave_lowpower(u32 core_id)
|
||||
{
|
||||
|
@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id)
|
|||
: "=&r" (v)
|
||||
: "Ir" (CR_C), "Ir" (0x40)
|
||||
: "cc");
|
||||
|
||||
exynos_set_delayed_reset_assertion(core_id, false);
|
||||
}
|
||||
|
||||
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
||||
|
@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
|||
/* Turn the CPU off on next WFI instruction. */
|
||||
exynos_cpu_power_down(core_id);
|
||||
|
||||
/*
|
||||
* Exynos4 SoCs require setting
|
||||
* USE_DELAYED_RESET_ASSERTION so the CPU idle
|
||||
* clock down feature could properly detect
|
||||
* global idle state when CPUx is off.
|
||||
*/
|
||||
exynos_set_delayed_reset_assertion(core_id, true);
|
||||
|
||||
wfi();
|
||||
|
||||
if (pen_release == core_id) {
|
||||
|
@ -371,9 +337,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
udelay(10);
|
||||
}
|
||||
|
||||
/* No harm if this is called during first boot of secondary CPU */
|
||||
exynos_set_delayed_reset_assertion(core_id, false);
|
||||
|
||||
/*
|
||||
* now the secondary core is starting up let it run its
|
||||
* calibrations, then wait for it to finish
|
||||
|
@ -420,6 +383,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
|
|||
|
||||
exynos_sysram_init();
|
||||
|
||||
exynos_set_delayed_reset_assertion(true);
|
||||
|
||||
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
|
||||
scu_enable(scu_base_addr());
|
||||
|
||||
|
|
|
@ -188,7 +188,7 @@ no_clk:
|
|||
args.np = np;
|
||||
args.args_count = 0;
|
||||
child_domain = of_genpd_get_from_provider(&args);
|
||||
if (!child_domain)
|
||||
if (IS_ERR(child_domain))
|
||||
continue;
|
||||
|
||||
if (of_parse_phandle_with_args(np, "power-domains",
|
||||
|
@ -196,7 +196,7 @@ no_clk:
|
|||
continue;
|
||||
|
||||
parent_domain = of_genpd_get_from_provider(&args);
|
||||
if (!parent_domain)
|
||||
if (IS_ERR(parent_domain))
|
||||
continue;
|
||||
|
||||
if (pm_genpd_add_subdomain(parent_domain, child_domain))
|
||||
|
|
|
@ -342,6 +342,8 @@ static void exynos_pm_enter_sleep_mode(void)
|
|||
|
||||
static void exynos_pm_prepare(void)
|
||||
{
|
||||
exynos_set_delayed_reset_assertion(false);
|
||||
|
||||
/* Set wake-up mask registers */
|
||||
exynos_pm_set_wakeup_mask();
|
||||
|
||||
|
@ -482,6 +484,7 @@ early_wakeup:
|
|||
|
||||
/* Clear SLEEP mode set in INFORM1 */
|
||||
pmu_raw_writel(0x0, S5P_INFORM1);
|
||||
exynos_set_delayed_reset_assertion(true);
|
||||
}
|
||||
|
||||
static void exynos3250_pm_resume(void)
|
||||
|
@ -723,8 +726,10 @@ void __init exynos_pm_init(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
|
||||
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
|
||||
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pm_data = (const struct exynos_pm_data *) match->data;
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#ifndef __GEMINI_COMMON_H__
|
||||
#define __GEMINI_COMMON_H__
|
||||
|
||||
#include <linux/reboot.h>
|
||||
|
||||
struct mtd_partition;
|
||||
|
||||
extern void gemini_map_io(void);
|
||||
|
@ -26,6 +28,6 @@ extern int platform_register_pflash(unsigned int size,
|
|||
struct mtd_partition *parts,
|
||||
unsigned int nr_parts);
|
||||
|
||||
extern void gemini_restart(char mode, const char *cmd);
|
||||
extern void gemini_restart(enum reboot_mode mode, const char *cmd);
|
||||
|
||||
#endif /* __GEMINI_COMMON_H__ */
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
#include <mach/hardware.h>
|
||||
#include <mach/global_reg.h>
|
||||
|
||||
void gemini_restart(char mode, const char *cmd)
|
||||
#include "common.h"
|
||||
|
||||
void gemini_restart(enum reboot_mode mode, const char *cmd)
|
||||
{
|
||||
__raw_writel(RESET_GLOBAL | RESET_CPU1,
|
||||
IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET);
|
||||
|
|
|
@ -171,6 +171,12 @@
|
|||
*/
|
||||
#define LINKS_PER_OCP_IF 2
|
||||
|
||||
/*
|
||||
* Address offset (in bytes) between the reset control and the reset
|
||||
* status registers: 4 bytes on OMAP4
|
||||
*/
|
||||
#define OMAP4_RST_CTRL_ST_OFFSET 4
|
||||
|
||||
/**
|
||||
* struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
|
||||
* @enable_module: function to enable a module (via MODULEMODE)
|
||||
|
@ -3016,10 +3022,12 @@ static int _omap4_deassert_hardreset(struct omap_hwmod *oh,
|
|||
if (ohri->st_shift)
|
||||
pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
|
||||
oh->name, ohri->name);
|
||||
return omap_prm_deassert_hardreset(ohri->rst_shift, 0,
|
||||
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift,
|
||||
oh->clkdm->pwrdm.ptr->prcm_partition,
|
||||
oh->clkdm->pwrdm.ptr->prcm_offs,
|
||||
oh->prcm.omap4.rstctrl_offs, 0);
|
||||
oh->prcm.omap4.rstctrl_offs,
|
||||
oh->prcm.omap4.rstctrl_offs +
|
||||
OMAP4_RST_CTRL_ST_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3047,27 +3055,6 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
|
|||
oh->prcm.omap4.rstctrl_offs);
|
||||
}
|
||||
|
||||
/**
|
||||
* _am33xx_assert_hardreset - call AM33XX PRM hardreset fn with hwmod args
|
||||
* @oh: struct omap_hwmod * to assert hardreset
|
||||
* @ohri: hardreset line data
|
||||
*
|
||||
* Call am33xx_prminst_assert_hardreset() with parameters extracted
|
||||
* from the hwmod @oh and the hardreset line data @ohri. Only
|
||||
* intended for use as an soc_ops function pointer. Passes along the
|
||||
* return value from am33xx_prminst_assert_hardreset(). XXX This
|
||||
* function is scheduled for removal when the PRM code is moved into
|
||||
* drivers/.
|
||||
*/
|
||||
static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
|
||||
struct omap_hwmod_rst_info *ohri)
|
||||
|
||||
{
|
||||
return omap_prm_assert_hardreset(ohri->rst_shift, 0,
|
||||
oh->clkdm->pwrdm.ptr->prcm_offs,
|
||||
oh->prcm.omap4.rstctrl_offs);
|
||||
}
|
||||
|
||||
/**
|
||||
* _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args
|
||||
* @oh: struct omap_hwmod * to deassert hardreset
|
||||
|
@ -3083,32 +3070,13 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
|
|||
static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
|
||||
struct omap_hwmod_rst_info *ohri)
|
||||
{
|
||||
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0,
|
||||
return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift,
|
||||
oh->clkdm->pwrdm.ptr->prcm_partition,
|
||||
oh->clkdm->pwrdm.ptr->prcm_offs,
|
||||
oh->prcm.omap4.rstctrl_offs,
|
||||
oh->prcm.omap4.rstst_offs);
|
||||
}
|
||||
|
||||
/**
|
||||
* _am33xx_is_hardreset_asserted - call AM33XX PRM hardreset fn with hwmod args
|
||||
* @oh: struct omap_hwmod * to test hardreset
|
||||
* @ohri: hardreset line data
|
||||
*
|
||||
* Call am33xx_prminst_is_hardreset_asserted() with parameters
|
||||
* extracted from the hwmod @oh and the hardreset line data @ohri.
|
||||
* Only intended for use as an soc_ops function pointer. Passes along
|
||||
* the return value from am33xx_prminst_is_hardreset_asserted(). XXX
|
||||
* This function is scheduled for removal when the PRM code is moved
|
||||
* into drivers/.
|
||||
*/
|
||||
static int _am33xx_is_hardreset_asserted(struct omap_hwmod *oh,
|
||||
struct omap_hwmod_rst_info *ohri)
|
||||
{
|
||||
return omap_prm_is_hardreset_asserted(ohri->rst_shift, 0,
|
||||
oh->clkdm->pwrdm.ptr->prcm_offs,
|
||||
oh->prcm.omap4.rstctrl_offs);
|
||||
}
|
||||
|
||||
/* Public functions */
|
||||
|
||||
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
|
||||
|
@ -3908,21 +3876,13 @@ void __init omap_hwmod_init(void)
|
|||
soc_ops.init_clkdm = _init_clkdm;
|
||||
soc_ops.update_context_lost = _omap4_update_context_lost;
|
||||
soc_ops.get_context_lost = _omap4_get_context_lost;
|
||||
} else if (soc_is_am43xx()) {
|
||||
} else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) {
|
||||
soc_ops.enable_module = _omap4_enable_module;
|
||||
soc_ops.disable_module = _omap4_disable_module;
|
||||
soc_ops.wait_target_ready = _omap4_wait_target_ready;
|
||||
soc_ops.assert_hardreset = _omap4_assert_hardreset;
|
||||
soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
|
||||
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
|
||||
soc_ops.init_clkdm = _init_clkdm;
|
||||
} else if (cpu_is_ti816x() || soc_is_am33xx()) {
|
||||
soc_ops.enable_module = _omap4_enable_module;
|
||||
soc_ops.disable_module = _omap4_disable_module;
|
||||
soc_ops.wait_target_ready = _omap4_wait_target_ready;
|
||||
soc_ops.assert_hardreset = _am33xx_assert_hardreset;
|
||||
soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
|
||||
soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
|
||||
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
|
||||
soc_ops.init_clkdm = _init_clkdm;
|
||||
} else {
|
||||
WARN(1, "omap_hwmod: unknown SoC type\n");
|
||||
|
|
|
@ -544,6 +544,44 @@ static struct omap_hwmod am43xx_hdq1w_hwmod = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = {
|
||||
.rev_offs = 0x0,
|
||||
.sysc_offs = 0x104,
|
||||
.sysc_flags = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE,
|
||||
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
|
||||
MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO),
|
||||
.sysc_fields = &omap_hwmod_sysc_type2,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_class am43xx_vpfe_hwmod_class = {
|
||||
.name = "vpfe",
|
||||
.sysc = &am43xx_vpfe_sysc,
|
||||
};
|
||||
|
||||
static struct omap_hwmod am43xx_vpfe0_hwmod = {
|
||||
.name = "vpfe0",
|
||||
.class = &am43xx_vpfe_hwmod_class,
|
||||
.clkdm_name = "l3s_clkdm",
|
||||
.prcm = {
|
||||
.omap4 = {
|
||||
.modulemode = MODULEMODE_SWCTRL,
|
||||
.clkctrl_offs = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct omap_hwmod am43xx_vpfe1_hwmod = {
|
||||
.name = "vpfe1",
|
||||
.class = &am43xx_vpfe_hwmod_class,
|
||||
.clkdm_name = "l3s_clkdm",
|
||||
.prcm = {
|
||||
.omap4 = {
|
||||
.modulemode = MODULEMODE_SWCTRL,
|
||||
.clkctrl_offs = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/* Interfaces */
|
||||
static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
|
||||
.master = &am33xx_l3_main_hwmod,
|
||||
|
@ -825,6 +863,34 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = {
|
|||
.user = OCP_USER_MPU | OCP_USER_SDMA,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = {
|
||||
.master = &am43xx_vpfe0_hwmod,
|
||||
.slave = &am33xx_l3_main_hwmod,
|
||||
.clk = "l3_gclk",
|
||||
.user = OCP_USER_MPU | OCP_USER_SDMA,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = {
|
||||
.master = &am43xx_vpfe1_hwmod,
|
||||
.slave = &am33xx_l3_main_hwmod,
|
||||
.clk = "l3_gclk",
|
||||
.user = OCP_USER_MPU | OCP_USER_SDMA,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = {
|
||||
.master = &am33xx_l4_ls_hwmod,
|
||||
.slave = &am43xx_vpfe0_hwmod,
|
||||
.clk = "l4ls_gclk",
|
||||
.user = OCP_USER_MPU | OCP_USER_SDMA,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
|
||||
.master = &am33xx_l4_ls_hwmod,
|
||||
.slave = &am43xx_vpfe1_hwmod,
|
||||
.clk = "l4ls_gclk",
|
||||
.user = OCP_USER_MPU | OCP_USER_SDMA,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&am33xx_l4_wkup__synctimer,
|
||||
&am43xx_l4_ls__timer8,
|
||||
|
@ -925,6 +991,10 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&am43xx_l4_ls__dss_dispc,
|
||||
&am43xx_l4_ls__dss_rfbi,
|
||||
&am43xx_l4_ls__hdq1w,
|
||||
&am43xx_l3__vpfe0,
|
||||
&am43xx_l3__vpfe1,
|
||||
&am43xx_l4_ls__vpfe0,
|
||||
&am43xx_l4_ls__vpfe1,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -144,5 +144,6 @@
|
|||
#define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0
|
||||
#define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20
|
||||
#define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET 0x04a0
|
||||
|
||||
#define AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET 0x0068
|
||||
#define AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET 0x0070
|
||||
#endif
|
||||
|
|
|
@ -87,12 +87,6 @@ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
|
|||
return v;
|
||||
}
|
||||
|
||||
/*
|
||||
* Address offset (in bytes) between the reset control and the reset
|
||||
* status registers: 4 bytes on OMAP4
|
||||
*/
|
||||
#define OMAP4_RST_CTRL_ST_OFFSET 4
|
||||
|
||||
/**
|
||||
* omap4_prminst_is_hardreset_asserted - read the HW reset line state of
|
||||
* submodules contained in the hwmod module
|
||||
|
@ -141,11 +135,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
|
|||
* omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and
|
||||
* wait
|
||||
* @shift: register bit shift corresponding to the reset line to deassert
|
||||
* @st_shift: status bit offset, not used for OMAP4+
|
||||
* @st_shift: status bit offset corresponding to the reset line
|
||||
* @part: PRM partition
|
||||
* @inst: PRM instance offset
|
||||
* @rstctrl_offs: reset register offset
|
||||
* @st_offs: reset status register offset, not used for OMAP4+
|
||||
* @rstst_offs: reset status register offset
|
||||
*
|
||||
* Some IPs like dsp, ipu or iva contain processors that require an HW
|
||||
* reset line to be asserted / deasserted in order to fully enable the
|
||||
|
@ -157,11 +151,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
|
|||
* of reset, or -EBUSY if the submodule did not exit reset promptly.
|
||||
*/
|
||||
int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
|
||||
u16 rstctrl_offs, u16 st_offs)
|
||||
u16 rstctrl_offs, u16 rstst_offs)
|
||||
{
|
||||
int c;
|
||||
u32 mask = 1 << shift;
|
||||
u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET;
|
||||
u32 st_mask = 1 << st_shift;
|
||||
|
||||
/* Check the current status to avoid de-asserting the line twice */
|
||||
if (omap4_prminst_is_hardreset_asserted(shift, part, inst,
|
||||
|
@ -169,13 +163,13 @@ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
|
|||
return -EEXIST;
|
||||
|
||||
/* Clear the reset status by writing 1 to the status bit */
|
||||
omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst,
|
||||
omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst,
|
||||
rstst_offs);
|
||||
/* de-assert the reset control line */
|
||||
omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs);
|
||||
/* wait the status to be set */
|
||||
omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst,
|
||||
rstst_offs),
|
||||
omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part,
|
||||
inst, rstst_offs),
|
||||
MAX_MODULE_HARDRESET_WAIT, c);
|
||||
|
||||
return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
|
||||
|
|
|
@ -298,15 +298,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
|
|||
if (IS_ERR(src))
|
||||
return PTR_ERR(src);
|
||||
|
||||
if (clk_get_parent(timer->fclk) != src) {
|
||||
r = clk_set_parent(timer->fclk, src);
|
||||
if (r < 0) {
|
||||
pr_warn("%s: %s cannot set source\n", __func__,
|
||||
oh->name);
|
||||
pr_warn("%s: %s cannot set source\n", __func__, oh->name);
|
||||
clk_put(src);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
clk_put(src);
|
||||
|
||||
|
|
|
@ -44,11 +44,9 @@ static void __iomem *rk3288_bootram_base;
|
|||
static phys_addr_t rk3288_bootram_phy;
|
||||
|
||||
static struct regmap *pmu_regmap;
|
||||
static struct regmap *grf_regmap;
|
||||
static struct regmap *sgrf_regmap;
|
||||
|
||||
static u32 rk3288_pmu_pwr_mode_con;
|
||||
static u32 rk3288_grf_soc_con0;
|
||||
static u32 rk3288_sgrf_soc_con0;
|
||||
|
||||
static inline u32 rk3288_l2_config(void)
|
||||
|
@ -72,25 +70,11 @@ static void rk3288_slp_mode_set(int level)
|
|||
{
|
||||
u32 mode_set, mode_set1;
|
||||
|
||||
regmap_read(grf_regmap, RK3288_GRF_SOC_CON0, &rk3288_grf_soc_con0);
|
||||
|
||||
regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0);
|
||||
|
||||
regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
|
||||
&rk3288_pmu_pwr_mode_con);
|
||||
|
||||
/*
|
||||
* We need set this bit GRF_FORCE_JTAG here, for the debug module,
|
||||
* otherwise, it may become inaccessible after resume.
|
||||
* This creates a potential security issue, as the sdmmc pins may
|
||||
* accept jtag data for a short time during resume if no card is
|
||||
* inserted.
|
||||
* But this is of course also true for the regular boot, before we
|
||||
* turn of the jtag/sdmmc autodetect.
|
||||
*/
|
||||
regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, GRF_FORCE_JTAG |
|
||||
GRF_FORCE_JTAG_WRITE);
|
||||
|
||||
/*
|
||||
* SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
|
||||
* PCLK_WDT_GATE - disable WDT during suspend.
|
||||
|
@ -151,9 +135,6 @@ static void rk3288_slp_mode_set_resume(void)
|
|||
regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
|
||||
rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
|
||||
| SGRF_FAST_BOOT_EN_WRITE);
|
||||
|
||||
regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, rk3288_grf_soc_con0 |
|
||||
GRF_FORCE_JTAG_WRITE);
|
||||
}
|
||||
|
||||
static int rockchip_lpmode_enter(unsigned long arg)
|
||||
|
@ -212,13 +193,6 @@ static int rk3288_suspend_init(struct device_node *np)
|
|||
return PTR_ERR(pmu_regmap);
|
||||
}
|
||||
|
||||
grf_regmap = syscon_regmap_lookup_by_compatible(
|
||||
"rockchip,rk3288-grf");
|
||||
if (IS_ERR(grf_regmap)) {
|
||||
pr_err("%s: could not find grf regmap\n", __func__);
|
||||
return PTR_ERR(pmu_regmap);
|
||||
}
|
||||
|
||||
sram_np = of_find_compatible_node(NULL, NULL,
|
||||
"rockchip,rk3288-pmu-sram");
|
||||
if (!sram_np) {
|
||||
|
|
|
@ -48,10 +48,6 @@ static inline void rockchip_suspend_init(void)
|
|||
#define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44
|
||||
#define RK3288_PMU_PWRMODE_CON1 0x90
|
||||
|
||||
#define RK3288_GRF_SOC_CON0 0x244
|
||||
#define GRF_FORCE_JTAG BIT(12)
|
||||
#define GRF_FORCE_JTAG_WRITE BIT(28)
|
||||
|
||||
#define RK3288_SGRF_SOC_CON0 (0x0000)
|
||||
#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
|
||||
#define SGRF_PCLK_WDT_GATE BIT(6)
|
||||
|
|
|
@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
|
|||
void xen_arch_post_suspend(int suspend_cancelled) { }
|
||||
void xen_timer_resume(void) { }
|
||||
void xen_arch_resume(void) { }
|
||||
void xen_arch_suspend(void) { }
|
||||
|
||||
|
||||
/* In the hypervisor.S file. */
|
||||
|
|
|
@ -21,6 +21,20 @@
|
|||
clock-output-names = "juno_mb:clk25mhz";
|
||||
};
|
||||
|
||||
v2m_refclk1mhz: refclk1mhz {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <1000000>;
|
||||
clock-output-names = "juno_mb:refclk1mhz";
|
||||
};
|
||||
|
||||
v2m_refclk32khz: refclk32khz {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <32768>;
|
||||
clock-output-names = "juno_mb:refclk32khz";
|
||||
};
|
||||
|
||||
motherboard {
|
||||
compatible = "arm,vexpress,v2p-p1", "simple-bus";
|
||||
#address-cells = <2>; /* SMB chipselect number and offset */
|
||||
|
@ -66,6 +80,15 @@
|
|||
#size-cells = <1>;
|
||||
ranges = <0 3 0 0x200000>;
|
||||
|
||||
v2m_sysctl: sysctl@020000 {
|
||||
compatible = "arm,sp810", "arm,primecell";
|
||||
reg = <0x020000 0x1000>;
|
||||
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
|
||||
clock-names = "refclk", "timclk", "apb_pclk";
|
||||
#clock-cells = <1>;
|
||||
clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
|
||||
};
|
||||
|
||||
mmci@050000 {
|
||||
compatible = "arm,pl180", "arm,primecell";
|
||||
reg = <0x050000 0x1000>;
|
||||
|
@ -106,16 +129,16 @@
|
|||
compatible = "arm,sp804", "arm,primecell";
|
||||
reg = <0x110000 0x10000>;
|
||||
interrupts = <9>;
|
||||
clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
|
||||
clock-names = "timclken1", "apb_pclk";
|
||||
clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>;
|
||||
clock-names = "timclken1", "timclken2", "apb_pclk";
|
||||
};
|
||||
|
||||
v2m_timer23: timer@120000 {
|
||||
compatible = "arm,sp804", "arm,primecell";
|
||||
reg = <0x120000 0x10000>;
|
||||
interrupts = <9>;
|
||||
clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
|
||||
clock-names = "timclken1", "apb_pclk";
|
||||
clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>;
|
||||
clock-names = "timclken1", "timclken2", "apb_pclk";
|
||||
};
|
||||
|
||||
rtc@170000 {
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/insn.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||
|
@ -34,48 +33,6 @@ struct alt_region {
|
|||
struct alt_instr *end;
|
||||
};
|
||||
|
||||
/*
|
||||
* Decode the imm field of a b/bl instruction, and return the byte
|
||||
* offset as a signed value (so it can be used when computing a new
|
||||
* branch target).
|
||||
*/
|
||||
static s32 get_branch_offset(u32 insn)
|
||||
{
|
||||
s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
|
||||
|
||||
/* sign-extend the immediate before turning it into a byte offset */
|
||||
return (imm << 6) >> 4;
|
||||
}
|
||||
|
||||
static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
aarch64_insn_read(altinsnptr, &insn);
|
||||
|
||||
/* Stop the world on instructions we don't support... */
|
||||
BUG_ON(aarch64_insn_is_cbz(insn));
|
||||
BUG_ON(aarch64_insn_is_cbnz(insn));
|
||||
BUG_ON(aarch64_insn_is_bcond(insn));
|
||||
/* ... and there is probably more. */
|
||||
|
||||
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
|
||||
enum aarch64_insn_branch_type type;
|
||||
unsigned long target;
|
||||
|
||||
if (aarch64_insn_is_b(insn))
|
||||
type = AARCH64_INSN_BRANCH_NOLINK;
|
||||
else
|
||||
type = AARCH64_INSN_BRANCH_LINK;
|
||||
|
||||
target = (unsigned long)altinsnptr + get_branch_offset(insn);
|
||||
insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
|
||||
target, type);
|
||||
}
|
||||
|
||||
return insn;
|
||||
}
|
||||
|
||||
static int __apply_alternatives(void *alt_region)
|
||||
{
|
||||
struct alt_instr *alt;
|
||||
|
@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region)
|
|||
u8 *origptr, *replptr;
|
||||
|
||||
for (alt = region->begin; alt < region->end; alt++) {
|
||||
u32 insn;
|
||||
int i;
|
||||
|
||||
if (!cpus_have_cap(alt->cpufeature))
|
||||
continue;
|
||||
|
||||
|
@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region)
|
|||
|
||||
origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
|
||||
replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
|
||||
|
||||
for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
|
||||
insn = get_alt_insn(origptr + i, replptr + i);
|
||||
aarch64_insn_write(origptr + i, insn);
|
||||
}
|
||||
|
||||
memcpy(origptr, replptr, alt->alt_len);
|
||||
flush_icache_range((uintptr_t)origptr,
|
||||
(uintptr_t)(origptr + alt->alt_len));
|
||||
}
|
||||
|
|
|
@ -1315,15 +1315,15 @@ static int armpmu_device_probe(struct platform_device *pdev)
|
|||
if (!cpu_pmu)
|
||||
return -ENODEV;
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Don't bother with PPIs; they're already affine */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq))
|
||||
return 0;
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < pdev->num_resources; ++i) {
|
||||
struct device_node *dn;
|
||||
int cpu;
|
||||
|
|
|
@ -328,10 +328,12 @@ static int ptdump_init(void)
|
|||
for (j = 0; j < pg_level[i].num; j++)
|
||||
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
address_markers[VMEMMAP_START_NR].start_address =
|
||||
(unsigned long)virt_to_page(PAGE_OFFSET);
|
||||
address_markers[VMEMMAP_END_NR].start_address =
|
||||
(unsigned long)virt_to_page(high_memory);
|
||||
#endif
|
||||
|
||||
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
|
|
|
@ -487,7 +487,7 @@ emit_cond_jmp:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
imm64 = (u64)insn1.imm << 32 | imm;
|
||||
imm64 = (u64)insn1.imm << 32 | (u32)imm;
|
||||
emit_a64_mov_i64(dst, imm64, ctx);
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -277,7 +277,7 @@ LDFLAGS += -m $(ld-emul)
|
|||
ifdef CONFIG_MIPS
|
||||
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
|
||||
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
|
||||
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
|
||||
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
|
||||
ifdef CONFIG_64BIT
|
||||
CHECKFLAGS += -m64
|
||||
endif
|
||||
|
|
|
@ -304,7 +304,7 @@ do { \
|
|||
\
|
||||
current->thread.abi = &mips_abi; \
|
||||
\
|
||||
current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \
|
||||
current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
|
||||
} while (0)
|
||||
|
||||
#endif /* CONFIG_32BIT */
|
||||
|
@ -366,7 +366,7 @@ do { \
|
|||
else \
|
||||
current->thread.abi = &mips_abi; \
|
||||
\
|
||||
current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \
|
||||
current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \
|
||||
\
|
||||
p = personality(current->personality); \
|
||||
if (p != PER_LINUX32 && p != PER_LINUX) \
|
||||
|
|
|
@ -176,7 +176,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
|
|||
|
||||
__get_user(value, data + 64);
|
||||
fcr31 = child->thread.fpu.fcr31;
|
||||
mask = current_cpu_data.fpu_msk31;
|
||||
mask = boot_cpu_data.fpu_msk31;
|
||||
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
|
||||
|
||||
/* FIR may not be written. */
|
||||
|
|
|
@ -92,7 +92,7 @@ static void __init cps_smp_setup(void)
|
|||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
cpumask_set_cpu(0, &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
}
|
||||
|
||||
|
|
|
@ -269,7 +269,6 @@ static void __show_regs(const struct pt_regs *regs)
|
|||
*/
|
||||
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
|
||||
(void *) regs->cp0_epc);
|
||||
printk(" %s\n", print_tainted());
|
||||
printk("ra : %0*lx %pS\n", field, regs->regs[31],
|
||||
(void *) regs->regs[31]);
|
||||
|
||||
|
|
|
@ -2389,7 +2389,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
unsigned long curr_pc;
|
||||
|
||||
if (run->mmio.len > sizeof(*gpr)) {
|
||||
kvm_err("Bad MMIO length: %d", run->mmio.len);
|
||||
|
@ -2397,11 +2396,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update PC and hold onto current PC in case there is
|
||||
* an error and we want to rollback the PC
|
||||
*/
|
||||
curr_pc = vcpu->arch.pc;
|
||||
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
|
||||
if (er == EMULATE_FAIL)
|
||||
return er;
|
||||
|
|
|
@ -889,7 +889,7 @@ static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
break;
|
||||
|
||||
case FPCREG_RID:
|
||||
value = current_cpu_data.fpu_id;
|
||||
value = boot_cpu_data.fpu_id;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -921,7 +921,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
|||
(void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
|
||||
|
||||
/* Preserve read-only bits. */
|
||||
mask = current_cpu_data.fpu_msk31;
|
||||
mask = boot_cpu_data.fpu_msk31;
|
||||
fcr31 = (value & ~mask) | (fcr31 & mask);
|
||||
break;
|
||||
|
||||
|
|
|
@ -495,7 +495,7 @@ static void r4k_tlb_configure(void)
|
|||
|
||||
if (cpu_has_rixi) {
|
||||
/*
|
||||
* Enable the no read, no exec bits, and enable large virtual
|
||||
* Enable the no read, no exec bits, and enable large physical
|
||||
* address.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
|
|
|
@ -130,9 +130,9 @@ struct platform_device ip32_rtc_device = {
|
|||
.resource = ip32_rtc_resources,
|
||||
};
|
||||
|
||||
+static int __init sgio2_rtc_devinit(void)
|
||||
static __init int sgio2_rtc_devinit(void)
|
||||
{
|
||||
return platform_device_register(&ip32_rtc_device);
|
||||
}
|
||||
|
||||
device_initcall(sgio2_cmos_devinit);
|
||||
device_initcall(sgio2_rtc_devinit);
|
||||
|
|
|
@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */
|
|||
|
||||
#define ELF_HWCAP 0
|
||||
|
||||
#define STACK_RND_MASK (is_32bit_task() ? \
|
||||
0x7ff >> (PAGE_SHIFT - 12) : \
|
||||
0x3ffff >> (PAGE_SHIFT - 12))
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
|
|
@ -181,9 +181,12 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy architecture-specific thread state
|
||||
*/
|
||||
int
|
||||
copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
unsigned long kthread_arg, struct task_struct *p)
|
||||
{
|
||||
struct pt_regs *cregs = &(p->thread.regs);
|
||||
void *stack = task_stack_page(p);
|
||||
|
@ -195,11 +198,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
extern void * const child_return;
|
||||
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
/* kernel thread */
|
||||
memset(cregs, 0, sizeof(struct pt_regs));
|
||||
if (!usp) /* idle thread */
|
||||
return 0;
|
||||
|
||||
/* kernel thread */
|
||||
/* Must exit via ret_from_kernel_thread in order
|
||||
* to call schedule_tail()
|
||||
*/
|
||||
|
@ -215,7 +217,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
#else
|
||||
cregs->gr[26] = usp;
|
||||
#endif
|
||||
cregs->gr[25] = arg;
|
||||
cregs->gr[25] = kthread_arg;
|
||||
} else {
|
||||
/* user thread */
|
||||
/* usp must be word aligned. This also prevents users from
|
||||
|
|
|
@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
|
|||
if (stack_base > STACK_SIZE_MAX)
|
||||
stack_base = STACK_SIZE_MAX;
|
||||
|
||||
/* Add space for stack randomization. */
|
||||
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
|
||||
|
||||
return PAGE_ALIGN(STACK_TOP - stack_base);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
uint64_t nip, uint64_t addr)
|
||||
{
|
||||
uint64_t srr1;
|
||||
int index = __this_cpu_inc_return(mce_nest_count);
|
||||
int index = __this_cpu_inc_return(mce_nest_count) - 1;
|
||||
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
|
||||
|
||||
/*
|
||||
|
@ -184,7 +184,7 @@ void machine_check_queue_event(void)
|
|||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
|
||||
index = __this_cpu_inc_return(mce_queue_count);
|
||||
index = __this_cpu_inc_return(mce_queue_count) - 1;
|
||||
/* If queue is full, just return for now. */
|
||||
if (index >= MAX_MC_EVT) {
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
|
|
|
@ -213,6 +213,7 @@ SECTIONS
|
|||
*(.opd)
|
||||
}
|
||||
|
||||
. = ALIGN(256);
|
||||
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
||||
__toc_start = .;
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
|
|
|
@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
|
|||
*/
|
||||
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vcpu *vcpu, *vnext;
|
||||
int i;
|
||||
int srcu_idx;
|
||||
|
||||
|
@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
*/
|
||||
if ((threads_per_core > 1) &&
|
||||
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
|
||||
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
||||
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
|
||||
arch.run_list) {
|
||||
vcpu->arch.ret = -EBUSY;
|
||||
kvmppc_remove_runnable(vc, vcpu);
|
||||
wake_up(&vcpu->arch.cpu_run);
|
||||
|
|
|
@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|||
struct page *
|
||||
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
||||
{
|
||||
pte_t *ptep;
|
||||
struct page *page;
|
||||
pte_t *ptep, pte;
|
||||
unsigned shift;
|
||||
unsigned long mask, flags;
|
||||
struct page *page = ERR_PTR(-EINVAL);
|
||||
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
if (!ptep)
|
||||
goto no_page;
|
||||
pte = READ_ONCE(*ptep);
|
||||
/*
|
||||
* Verify it is a huge page else bail.
|
||||
* Transparent hugepages are handled by generic code. We can skip them
|
||||
* here.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
|
||||
goto no_page;
|
||||
|
||||
/* Verify it is a huge page else bail. */
|
||||
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
|
||||
local_irq_restore(flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!pte_present(pte)) {
|
||||
page = NULL;
|
||||
goto no_page;
|
||||
}
|
||||
mask = (1UL << shift) - 1;
|
||||
page = pte_page(*ptep);
|
||||
page = pte_page(pte);
|
||||
if (page)
|
||||
page += (address & mask) / PAGE_SIZE;
|
||||
|
||||
no_page:
|
||||
local_irq_restore(flags);
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
|||
* hash fault look at them.
|
||||
*/
|
||||
memset(pgtable, 0, PTE_FRAG_SIZE);
|
||||
/*
|
||||
* Serialize against find_linux_pte_or_hugepte which does lock-less
|
||||
* lookup in page tables with local interrupts disabled. For huge pages
|
||||
* it casts pmd_t to pte_t. Since format of pte_t is different from
|
||||
* pmd_t we want to prevent transit from pmd pointing to page table
|
||||
* to pmd pointing to huge page (and back) while interrupts are disabled.
|
||||
* We clear pmd to possibly replace it with page table pointer in
|
||||
* different code paths. So make sure we wait for the parallel
|
||||
* find_linux_pte_or_hugepage to finish.
|
||||
*/
|
||||
kick_all_cpus_sync();
|
||||
return old_pmd;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,11 +16,12 @@
|
|||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_ctx {
|
||||
u8 icv[16];
|
||||
u8 key[16];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
u8 icv[GHASH_BLOCK_SIZE];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
u8 buffer[GHASH_BLOCK_SIZE];
|
||||
u32 bytes;
|
||||
};
|
||||
|
@ -28,8 +29,10 @@ struct ghash_desc_ctx {
|
|||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
memset(dctx, 0, sizeof(*dctx));
|
||||
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
|||
}
|
||||
|
||||
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
||||
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
|
|||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
unsigned int n;
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
|
@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
src += n;
|
||||
|
||||
if (!dctx->bytes) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
|
||||
GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
|
@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
|
||||
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
||||
if (n) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
|
||||
if (ret != n)
|
||||
return -EIO;
|
||||
src += n;
|
||||
|
@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
static int ghash_flush(struct ghash_desc_ctx *dctx)
|
||||
{
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
|
@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|||
|
||||
memset(pos, 0, dctx->bytes);
|
||||
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
int ret;
|
||||
|
||||
ret = ghash_flush(ctx, dctx);
|
||||
ret = ghash_flush(dctx);
|
||||
if (!ret)
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
|
|||
/* fill page with urandom bytes */
|
||||
get_random_bytes(pg, PAGE_SIZE);
|
||||
/* exor page with stckf values */
|
||||
for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
|
||||
for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
|
||||
u64 *p = ((u64 *)pg) + n;
|
||||
*p ^= get_tod_clock_fast();
|
||||
}
|
||||
|
|
|
@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
|
|||
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
|
||||
}
|
||||
|
||||
static inline int pmd_pfn(pmd_t pmd)
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
|
|
|
@ -443,8 +443,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
|
|||
|
||||
/*
|
||||
* Compile one eBPF instruction into s390x code
|
||||
*
|
||||
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
|
||||
* stack space for the large switch statement.
|
||||
*/
|
||||
static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
{
|
||||
struct bpf_insn *insn = &fp->insnsi[i];
|
||||
int jmp_off, last, insn_count = 1;
|
||||
|
@ -588,8 +591,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
|||
EMIT4(0xb9160000, dst_reg, rc_reg);
|
||||
break;
|
||||
}
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
|
||||
{
|
||||
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
|
||||
|
||||
|
@ -602,10 +605,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
|||
EMIT4_IMM(0xa7090000, REG_W0, 0);
|
||||
/* lgr %w1,%dst */
|
||||
EMIT4(0xb9040000, REG_W1, dst_reg);
|
||||
/* llgfr %dst,%src (u32 cast) */
|
||||
EMIT4(0xb9160000, dst_reg, src_reg);
|
||||
/* dlgr %w0,%dst */
|
||||
EMIT4(0xb9870000, REG_W0, dst_reg);
|
||||
EMIT4(0xb9870000, REG_W0, src_reg);
|
||||
/* lgr %dst,%rc */
|
||||
EMIT4(0xb9040000, dst_reg, rc_reg);
|
||||
break;
|
||||
|
@ -632,8 +633,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
|||
EMIT4(0xb9160000, dst_reg, rc_reg);
|
||||
break;
|
||||
}
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
|
||||
{
|
||||
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
|
||||
|
||||
|
@ -649,7 +650,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
|||
EMIT4(0xb9040000, REG_W1, dst_reg);
|
||||
/* dlg %w0,<d(imm)>(%l) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
|
||||
EMIT_CONST_U64((u32) imm));
|
||||
EMIT_CONST_U64(imm));
|
||||
/* lgr %dst,%rc */
|
||||
EMIT4(0xb9040000, dst_reg, rc_reg);
|
||||
break;
|
||||
|
|
|
@ -207,6 +207,7 @@ union kvm_mmu_page_role {
|
|||
unsigned nxe:1;
|
||||
unsigned cr0_wp:1;
|
||||
unsigned smep_andnot_wp:1;
|
||||
unsigned smap_andnot_wp:1;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
|
|||
struct kvm_mmu_memory_cache mmu_page_header_cache;
|
||||
|
||||
struct fpu guest_fpu;
|
||||
bool eager_fpu;
|
||||
u64 xcr0;
|
||||
u64 guest_supported_xcr0;
|
||||
u32 guest_xstate_size;
|
||||
|
@ -743,6 +745,7 @@ struct kvm_x86_ops {
|
|||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
void (*fpu_activate)(struct kvm_vcpu *vcpu);
|
||||
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*tlb_flush)(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
|
|||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
|
||||
[ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
|
||||
|
@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
|
|||
[ C(OP_READ) ] = {
|
||||
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01b7,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
|
||||
|
@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
|
|||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
||||
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
|
||||
[ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
|
|
|
@ -722,6 +722,7 @@ static int __init rapl_pmu_init(void)
|
|||
break;
|
||||
case 60: /* Haswell */
|
||||
case 69: /* Haswell-Celeron */
|
||||
case 61: /* Broadwell */
|
||||
rapl_cntr_mask = RAPL_IDX_HSW;
|
||||
rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
|
||||
break;
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/i387.h> /* For use_eager_fpu. Ugh! */
|
||||
#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */
|
||||
#include <asm/user.h>
|
||||
#include <asm/xsave.h>
|
||||
#include "cpuid.h"
|
||||
|
@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
|||
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
|
||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
||||
|
||||
vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
|
||||
|
||||
/*
|
||||
* The existing code assumes virtual address is 48-bit in the canonical
|
||||
* address checks; exit if it is ever changed.
|
||||
|
|
|
@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -3736,7 +3736,7 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
}
|
||||
|
||||
void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
||||
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu, bool ept)
|
||||
{
|
||||
unsigned bit, byte, pfec;
|
||||
|
@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
|
||||
bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
|
||||
struct kvm_mmu *context = &vcpu->arch.mmu;
|
||||
|
||||
MMU_WARN_ON(VALID_PAGE(context->root_hpa));
|
||||
|
@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
|||
context->base_role.cr0_wp = is_write_protection(vcpu);
|
||||
context->base_role.smep_andnot_wp
|
||||
= smep && !is_write_protection(vcpu);
|
||||
context->base_role.smap_andnot_wp
|
||||
= smap && !is_write_protection(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
|
||||
|
||||
|
@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
const u8 *new, int bytes)
|
||||
{
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
union kvm_mmu_page_role mask = { .word = 0 };
|
||||
struct kvm_mmu_page *sp;
|
||||
LIST_HEAD(invalid_list);
|
||||
u64 entry, gentry, *spte;
|
||||
int npte;
|
||||
bool remote_flush, local_flush, zap_page;
|
||||
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
|
||||
.cr0_wp = 1,
|
||||
.cr4_pae = 1,
|
||||
.nxe = 1,
|
||||
.smep_andnot_wp = 1,
|
||||
.smap_andnot_wp = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* If we don't have indirect shadow pages, it means no page is
|
||||
|
@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
|
|
|
@ -71,8 +71,6 @@ enum {
|
|||
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
||||
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
bool ept);
|
||||
|
||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||
{
|
||||
|
@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||
int index = (pfec >> 1) +
|
||||
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
|
||||
|
||||
WARN_ON(pfec & PFERR_RSVD_MASK);
|
||||
|
||||
return (mmu->permissions[index] >> pte_access) & 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|||
mmu_is_nested(vcpu));
|
||||
if (likely(r != RET_MMIO_PF_INVALID))
|
||||
return r;
|
||||
|
||||
/*
|
||||
* page fault with PFEC.RSVD = 1 is caused by shadow
|
||||
* page fault, should not be used to walk guest page
|
||||
* table.
|
||||
*/
|
||||
error_code &= ~PFERR_RSVD_MASK;
|
||||
};
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
|
|
|
@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|||
.cache_reg = svm_cache_reg,
|
||||
.get_rflags = svm_get_rflags,
|
||||
.set_rflags = svm_set_rflags,
|
||||
.fpu_activate = svm_fpu_activate,
|
||||
.fpu_deactivate = svm_fpu_deactivate,
|
||||
|
||||
.tlb_flush = svm_flush_tlb,
|
||||
|
|
|
@ -10185,6 +10185,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|||
.cache_reg = vmx_cache_reg,
|
||||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
.fpu_activate = vmx_fpu_activate,
|
||||
.fpu_deactivate = vmx_fpu_deactivate,
|
||||
|
||||
.tlb_flush = vmx_flush_tlb,
|
||||
|
|
|
@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
|
|||
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
|
||||
X86_CR4_PAE | X86_CR4_SMEP;
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP | X86_CR4_SMAP;
|
||||
|
||||
if (cr4 & CR4_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
|
@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
|
||||
update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
|
||||
|
||||
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
|
||||
kvm_update_cpuid(vcpu);
|
||||
|
||||
|
@ -6197,6 +6195,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
|||
return;
|
||||
|
||||
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||
if (is_error_page(page))
|
||||
return;
|
||||
kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
|
||||
|
||||
/*
|
||||
|
@ -7060,7 +7060,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
fpu_save_init(&vcpu->arch.guest_fpu);
|
||||
__kernel_fpu_end();
|
||||
++vcpu->stat.fpu_reload;
|
||||
if (!vcpu->arch.eager_fpu)
|
||||
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
||||
|
||||
trace_kvm_fpu(0);
|
||||
}
|
||||
|
||||
|
@ -7076,11 +7078,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
|
||||
printk_once(KERN_WARNING
|
||||
"kvm: SMP vm created on host with unstable TSC; "
|
||||
"guest TSC will not be reliable\n");
|
||||
return kvm_x86_ops->vcpu_create(kvm, id);
|
||||
|
||||
vcpu = kvm_x86_ops->vcpu_create(kvm, id);
|
||||
|
||||
/*
|
||||
* Activate fpu unconditionally in case the guest needs eager FPU. It will be
|
||||
* deactivated soon if it doesn't.
|
||||
*/
|
||||
kvm_x86_ops->fpu_activate(vcpu);
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
|
|||
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
|
||||
$(call if_changed,vdso)
|
||||
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
|
||||
hostprogs-y += vdso2c
|
||||
|
||||
quiet_cmd_vdso2c = VDSO2C $@
|
||||
|
|
|
@ -734,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_init_queue_node);
|
||||
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
spinlock_t *lock)
|
||||
|
@ -1578,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
blk_rq_bio_prep(req->q, req, bio);
|
||||
}
|
||||
|
||||
void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
||||
struct blk_plug *plug;
|
||||
|
@ -1686,7 +1688,6 @@ out_unlock:
|
|||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
|
||||
|
||||
/*
|
||||
* If bio->bi_dev is a partition, remap the location
|
||||
|
|
|
@ -33,7 +33,7 @@ struct aead_ctx {
|
|||
/*
|
||||
* RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
|
||||
* can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
|
||||
* bytes
|
||||
* pages
|
||||
*/
|
||||
#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
|
||||
struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
|
||||
|
@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
|
|||
if (err < 0)
|
||||
goto unlock;
|
||||
usedpages += err;
|
||||
/* chain the new scatterlist with initial list */
|
||||
/* chain the new scatterlist with previous one */
|
||||
if (cnt)
|
||||
scatterwalk_crypto_chain(ctx->rsgl[0].sg,
|
||||
ctx->rsgl[cnt].sg, 1,
|
||||
sg_nents(ctx->rsgl[cnt-1].sg));
|
||||
af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
|
||||
|
||||
/* we do not need more iovecs as we have sufficient memory */
|
||||
if (outlen <= usedpages)
|
||||
break;
|
||||
|
|
|
@ -102,19 +102,12 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
|
|||
{"_SB_", ACPI_TYPE_DEVICE, NULL},
|
||||
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
|
||||
{"_TZ_", ACPI_TYPE_DEVICE, NULL},
|
||||
/*
|
||||
* March, 2015:
|
||||
* The _REV object is in the process of being deprecated, because
|
||||
* other ACPI implementations permanently return 2. Thus, it
|
||||
* has little or no value. Return 2 for compatibility with
|
||||
* other ACPI implementations.
|
||||
*/
|
||||
{"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
|
||||
{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
|
||||
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
|
||||
{"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
|
||||
{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
|
||||
|
||||
#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
|
||||
{"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
|
||||
{"_OSI", ACPI_TYPE_METHOD, (char *)1},
|
||||
#endif
|
||||
|
||||
/* Table terminator */
|
||||
|
|
|
@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
|
|||
request_mem_region(addr, length, desc);
|
||||
}
|
||||
|
||||
static int __init acpi_reserve_resources(void)
|
||||
static void __init acpi_reserve_resources(void)
|
||||
{
|
||||
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
|
||||
"ACPI PM1a_EVT_BLK");
|
||||
|
@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
|
|||
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
|
||||
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
|
||||
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(acpi_reserve_resources);
|
||||
|
||||
void acpi_os_printf(const char *fmt, ...)
|
||||
{
|
||||
|
@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
|
|||
|
||||
acpi_status __init acpi_os_initialize1(void)
|
||||
{
|
||||
acpi_reserve_resources();
|
||||
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
|
||||
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
|
||||
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
|
||||
|
|
|
@ -2257,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
|||
page_code = GET_INQ_PAGE_CODE(cmd);
|
||||
alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
|
||||
|
||||
inq_response = kmalloc(alloc_len, GFP_KERNEL);
|
||||
inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
|
||||
GFP_KERNEL);
|
||||
if (inq_response == NULL) {
|
||||
res = -ENOMEM;
|
||||
goto out_mem;
|
||||
|
|
|
@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
|
|||
{ USB_DEVICE(0x04CA, 0x3007) },
|
||||
{ USB_DEVICE(0x04CA, 0x3008) },
|
||||
{ USB_DEVICE(0x04CA, 0x300b) },
|
||||
{ USB_DEVICE(0x04CA, 0x300f) },
|
||||
{ USB_DEVICE(0x04CA, 0x3010) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
|
@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
|
|||
{ USB_DEVICE(0x0cf3, 0xe003) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE004) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE005) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE006) },
|
||||
{ USB_DEVICE(0x13d3, 0x3362) },
|
||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||
{ USB_DEVICE(0x13d3, 0x3393) },
|
||||
|
@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
|||
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
|
|
|
@ -186,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -218,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* QCA ROME chipset */
|
||||
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
|
||||
|
||||
|
|
|
@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
|
|||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata->clk_xtal = of_clk_get(np, 0);
|
||||
if (!IS_ERR(pdata->clk_xtal))
|
||||
clk_put(pdata->clk_xtal);
|
||||
pdata->clk_clkin = of_clk_get(np, 1);
|
||||
if (!IS_ERR(pdata->clk_clkin))
|
||||
clk_put(pdata->clk_clkin);
|
||||
|
||||
/*
|
||||
* property silabs,pll-source : <num src>, [<..>]
|
||||
* allow to selectively set pll source
|
||||
|
@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
i2c_set_clientdata(client, drvdata);
|
||||
drvdata->client = client;
|
||||
drvdata->variant = variant;
|
||||
drvdata->pxtal = pdata->clk_xtal;
|
||||
drvdata->pclkin = pdata->clk_clkin;
|
||||
drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
|
||||
drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
|
||||
|
||||
if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
|
||||
PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/*
|
||||
* Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
|
||||
* VARIANT_C can have CLKIN instead.
|
||||
*/
|
||||
if (IS_ERR(drvdata->pxtal) &&
|
||||
(drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
|
||||
dev_err(&client->dev, "missing parent clock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
|
||||
if (IS_ERR(drvdata->regmap)) {
|
||||
|
@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
}
|
||||
}
|
||||
|
||||
if (!IS_ERR(drvdata->pxtal))
|
||||
clk_prepare_enable(drvdata->pxtal);
|
||||
if (!IS_ERR(drvdata->pclkin))
|
||||
clk_prepare_enable(drvdata->pclkin);
|
||||
|
||||
/* register xtal input clock gate */
|
||||
memset(&init, 0, sizeof(init));
|
||||
init.name = si5351_input_names[0];
|
||||
|
@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
clk = devm_clk_register(&client->dev, &drvdata->xtal);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return PTR_ERR(clk);
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register clkin input clock gate */
|
||||
|
@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return PTR_ERR(clk);
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register PLLB or VXCO (Si5351B) */
|
||||
|
@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register clk multisync and clk out divider */
|
||||
|
@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
|
||||
|
||||
if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
|
||||
!drvdata->onecell.clks))
|
||||
return -ENOMEM;
|
||||
!drvdata->onecell.clks)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
for (n = 0; n < num_clocks; n++) {
|
||||
drvdata->msynth[n].num = n;
|
||||
|
@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
drvdata->onecell.clks[n] = clk;
|
||||
|
||||
|
@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
|||
&drvdata->onecell);
|
||||
if (ret) {
|
||||
dev_err(&client->dev, "unable to add clk provider\n");
|
||||
return ret;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk:
|
||||
if (!IS_ERR(drvdata->pxtal))
|
||||
clk_disable_unprepare(drvdata->pxtal);
|
||||
if (!IS_ERR(drvdata->pclkin))
|
||||
clk_disable_unprepare(drvdata->pclkin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct i2c_device_id si5351_i2c_ids[] = {
|
||||
|
|
|
@ -1475,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
|
|||
*/
|
||||
if (clk->prepare_count) {
|
||||
clk_core_prepare(parent);
|
||||
flags = clk_enable_lock();
|
||||
clk_core_enable(parent);
|
||||
clk_core_enable(clk);
|
||||
clk_enable_unlock(flags);
|
||||
}
|
||||
|
||||
/* update the clk tree topology */
|
||||
|
@ -1491,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
|
|||
struct clk_core *parent,
|
||||
struct clk_core *old_parent)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Finish the migration of prepare state and undo the changes done
|
||||
* for preventing a race with clk_enable().
|
||||
*/
|
||||
if (core->prepare_count) {
|
||||
flags = clk_enable_lock();
|
||||
clk_core_disable(core);
|
||||
clk_core_disable(old_parent);
|
||||
clk_enable_unlock(flags);
|
||||
clk_core_unprepare(old_parent);
|
||||
}
|
||||
}
|
||||
|
@ -1525,8 +1531,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
|
|||
clk_enable_unlock(flags);
|
||||
|
||||
if (clk->prepare_count) {
|
||||
flags = clk_enable_lock();
|
||||
clk_core_disable(clk);
|
||||
clk_core_disable(parent);
|
||||
clk_enable_unlock(flags);
|
||||
clk_core_unprepare(parent);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -71,8 +71,8 @@ static const char *gcc_xo_gpll0_bimc[] = {
|
|||
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
|
||||
{ P_XO, 0 },
|
||||
{ P_GPLL0_AUX, 3 },
|
||||
{ P_GPLL2_AUX, 2 },
|
||||
{ P_GPLL1, 1 },
|
||||
{ P_GPLL2_AUX, 2 },
|
||||
};
|
||||
|
||||
static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
|
||||
|
@ -1115,7 +1115,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
|
|||
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
|
||||
F(100000000, P_GPLL0, 8, 0, 0),
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(228570000, P_GPLL0, 5, 0, 0),
|
||||
F(228570000, P_GPLL0, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
|
|||
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS5433) += clk-exynos5433.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
|
||||
|
|
|
@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
|
|||
{ .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
|
||||
{ .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
|
||||
{ .offset = SRC_MASK_ISP, .value = 0x11111000, },
|
||||
{ .offset = GATE_BUS_TOP, .value = 0xffffffff, },
|
||||
{ .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
|
||||
{ .offset = GATE_IP_PERIC, .value = 0xffffffff, },
|
||||
};
|
||||
|
|
|
@ -748,7 +748,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
|||
PLL_35XX_RATE(825000000U, 275, 4, 1),
|
||||
PLL_35XX_RATE(800000000U, 400, 6, 1),
|
||||
PLL_35XX_RATE(733000000U, 733, 12, 1),
|
||||
PLL_35XX_RATE(700000000U, 360, 6, 1),
|
||||
PLL_35XX_RATE(700000000U, 175, 3, 1),
|
||||
PLL_35XX_RATE(667000000U, 222, 4, 1),
|
||||
PLL_35XX_RATE(633000000U, 211, 4, 1),
|
||||
PLL_35XX_RATE(600000000U, 500, 5, 2),
|
||||
|
@ -760,14 +760,14 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
|||
PLL_35XX_RATE(444000000U, 370, 5, 2),
|
||||
PLL_35XX_RATE(420000000U, 350, 5, 2),
|
||||
PLL_35XX_RATE(400000000U, 400, 6, 2),
|
||||
PLL_35XX_RATE(350000000U, 360, 6, 2),
|
||||
PLL_35XX_RATE(350000000U, 350, 6, 2),
|
||||
PLL_35XX_RATE(333000000U, 222, 4, 2),
|
||||
PLL_35XX_RATE(300000000U, 500, 5, 3),
|
||||
PLL_35XX_RATE(266000000U, 532, 6, 3),
|
||||
PLL_35XX_RATE(200000000U, 400, 6, 3),
|
||||
PLL_35XX_RATE(166000000U, 332, 6, 3),
|
||||
PLL_35XX_RATE(160000000U, 320, 6, 3),
|
||||
PLL_35XX_RATE(133000000U, 552, 6, 4),
|
||||
PLL_35XX_RATE(133000000U, 532, 6, 4),
|
||||
PLL_35XX_RATE(100000000U, 400, 6, 4),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -1490,7 +1490,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
|
|||
|
||||
/* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
|
||||
GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
|
||||
ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0),
|
||||
ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
|
||||
|
||||
/* ENABLE_PCLK_MIF_SECURE_RTC */
|
||||
GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
|
||||
|
@ -3665,7 +3665,7 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
|
|||
ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
|
||||
ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll",
|
||||
GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
|
||||
ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
|
||||
};
|
||||
|
||||
|
@ -3927,7 +3927,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
|
|||
#define ENABLE_PCLK_MSCL 0x0900
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x000c
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x090c
|
||||
#define ENABLE_SCLK_MSCL 0x0a00
|
||||
#define ENABLE_IP_MSCL0 0x0b00
|
||||
#define ENABLE_IP_MSCL1 0x0b04
|
||||
|
|
|
@ -119,6 +119,18 @@ static int usb_extcon_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(info->id_gpiod);
|
||||
}
|
||||
|
||||
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
|
||||
if (IS_ERR(info->edev)) {
|
||||
dev_err(dev, "failed to allocate extcon device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = devm_extcon_dev_register(dev, info->edev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to register extcon device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = gpiod_set_debounce(info->id_gpiod,
|
||||
USB_GPIO_DEBOUNCE_MS * 1000);
|
||||
if (ret < 0)
|
||||
|
@ -142,18 +154,6 @@ static int usb_extcon_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
|
||||
if (IS_ERR(info->edev)) {
|
||||
dev_err(dev, "failed to allocate extcon device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = devm_extcon_dev_register(dev, info->edev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to register extcon device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, info);
|
||||
device_init_wakeup(dev, 1);
|
||||
|
||||
|
|
|
@ -499,19 +499,19 @@ static int __init dmi_present(const u8 *buf)
|
|||
buf += 16;
|
||||
|
||||
if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
|
||||
if (smbios_ver)
|
||||
dmi_ver = smbios_ver;
|
||||
else
|
||||
dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
|
||||
dmi_num = get_unaligned_le16(buf + 12);
|
||||
dmi_len = get_unaligned_le16(buf + 6);
|
||||
dmi_base = get_unaligned_le32(buf + 8);
|
||||
|
||||
if (dmi_walk_early(dmi_decode) == 0) {
|
||||
if (smbios_ver) {
|
||||
dmi_ver = smbios_ver;
|
||||
pr_info("SMBIOS %d.%d%s present.\n",
|
||||
dmi_ver >> 8, dmi_ver & 0xFF,
|
||||
(dmi_ver < 0x0300) ? "" : ".x");
|
||||
pr_info("SMBIOS %d.%d present.\n",
|
||||
dmi_ver >> 8, dmi_ver & 0xFF);
|
||||
} else {
|
||||
dmi_ver = (buf[14] & 0xF0) << 4 |
|
||||
(buf[14] & 0x0F);
|
||||
pr_info("Legacy DMI %d.%d present.\n",
|
||||
dmi_ver >> 8, dmi_ver & 0xFF);
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
|
|||
|
||||
static void decon_clear_channel(struct decon_context *ctx)
|
||||
{
|
||||
int win, ch_enabled = 0;
|
||||
unsigned int win, ch_enabled = 0;
|
||||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
|
@ -710,7 +710,7 @@ static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops decon_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
|
||||
.dpms = decon_dpms,
|
||||
.mode_fixup = decon_mode_fixup,
|
||||
.commit = decon_commit,
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <drm/bridge/ptn3460.h>
|
||||
|
||||
#include "exynos_dp_core.h"
|
||||
#include "exynos_drm_fimd.h"
|
||||
|
||||
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
|
||||
connector)
|
||||
|
@ -196,7 +195,7 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
|
|||
}
|
||||
}
|
||||
|
||||
dev_err(dp->dev, "EDID Read success!\n");
|
||||
dev_dbg(dp->dev, "EDID Read success!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1066,6 +1065,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
|
|||
|
||||
static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
||||
{
|
||||
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
|
||||
|
||||
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
|
@ -1076,7 +1077,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
|||
}
|
||||
}
|
||||
|
||||
fimd_dp_clock_enable(dp_to_crtc(dp), true);
|
||||
if (crtc->ops->clock_enable)
|
||||
crtc->ops->clock_enable(dp_to_crtc(dp), true);
|
||||
|
||||
clk_prepare_enable(dp->clock);
|
||||
exynos_dp_phy_init(dp);
|
||||
|
@ -1087,6 +1089,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
|||
|
||||
static void exynos_dp_poweroff(struct exynos_dp_device *dp)
|
||||
{
|
||||
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
|
||||
|
||||
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
|
@ -1102,7 +1106,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
|
|||
exynos_dp_phy_exit(dp);
|
||||
clk_disable_unprepare(dp->clock);
|
||||
|
||||
fimd_dp_clock_enable(dp_to_crtc(dp), false);
|
||||
if (crtc->ops->clock_enable)
|
||||
crtc->ops->clock_enable(dp_to_crtc(dp), false);
|
||||
|
||||
if (dp->panel) {
|
||||
if (drm_panel_unprepare(dp->panel))
|
||||
|
|
|
@ -241,7 +241,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
|
|||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
struct exynos_drm_crtc_ops *ops,
|
||||
const struct exynos_drm_crtc_ops *ops,
|
||||
void *ctx)
|
||||
{
|
||||
struct exynos_drm_crtc *exynos_crtc;
|
||||
|
|
|
@ -21,7 +21,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
|
|||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
struct exynos_drm_crtc_ops *ops,
|
||||
const struct exynos_drm_crtc_ops *ops,
|
||||
void *context);
|
||||
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
|
||||
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
|
||||
|
|
|
@ -71,13 +71,6 @@ enum exynos_drm_output_type {
|
|||
* @dma_addr: array of bus(accessed by dma) address to the memory region
|
||||
* allocated for a overlay.
|
||||
* @zpos: order of overlay layer(z position).
|
||||
* @index_color: if using color key feature then this value would be used
|
||||
* as index color.
|
||||
* @default_win: a window to be enabled.
|
||||
* @color_key: color key on or off.
|
||||
* @local_path: in case of lcd type, local path mode on or off.
|
||||
* @transparency: transparency on or off.
|
||||
* @activated: activated or not.
|
||||
* @enabled: enabled or not.
|
||||
* @resume: to resume or not.
|
||||
*
|
||||
|
@ -108,13 +101,7 @@ struct exynos_drm_plane {
|
|||
uint32_t pixel_format;
|
||||
dma_addr_t dma_addr[MAX_FB_BUFFER];
|
||||
unsigned int zpos;
|
||||
unsigned int index_color;
|
||||
|
||||
bool default_win:1;
|
||||
bool color_key:1;
|
||||
bool local_path:1;
|
||||
bool transparency:1;
|
||||
bool activated:1;
|
||||
bool enabled:1;
|
||||
bool resume:1;
|
||||
};
|
||||
|
@ -181,6 +168,10 @@ struct exynos_drm_display {
|
|||
* @win_disable: disable hardware specific overlay.
|
||||
* @te_handler: trigger to transfer video image at the tearing effect
|
||||
* synchronization signal if there is a page flip request.
|
||||
* @clock_enable: optional function enabling/disabling display domain clock,
|
||||
* called from exynos-dp driver before powering up (with
|
||||
* 'enable' argument as true) and after powering down (with
|
||||
* 'enable' as false).
|
||||
*/
|
||||
struct exynos_drm_crtc;
|
||||
struct exynos_drm_crtc_ops {
|
||||
|
@ -195,6 +186,7 @@ struct exynos_drm_crtc_ops {
|
|||
void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
|
||||
void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
|
||||
void (*te_handler)(struct exynos_drm_crtc *crtc);
|
||||
void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -221,7 +213,7 @@ struct exynos_drm_crtc {
|
|||
unsigned int dpms;
|
||||
wait_queue_head_t pending_flip_queue;
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct exynos_drm_crtc_ops *ops;
|
||||
const struct exynos_drm_crtc_ops *ops;
|
||||
void *ctx;
|
||||
};
|
||||
|
||||
|
|
|
@ -171,43 +171,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
|
|||
return &exynos_fb->fb;
|
||||
}
|
||||
|
||||
static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
unsigned int cnt = 0;
|
||||
|
||||
if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
|
||||
return drm_format_num_planes(mode_cmd->pixel_format);
|
||||
|
||||
while (cnt != MAX_FB_BUFFER) {
|
||||
if (!mode_cmd->handles[cnt])
|
||||
break;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if NV12 or NV12M.
|
||||
*
|
||||
* NV12
|
||||
* handles[0] = base1, offsets[0] = 0
|
||||
* handles[1] = base1, offsets[1] = Y_size
|
||||
*
|
||||
* NV12M
|
||||
* handles[0] = base1, offsets[0] = 0
|
||||
* handles[1] = base2, offsets[1] = 0
|
||||
*/
|
||||
if (cnt == 2) {
|
||||
/*
|
||||
* in case of NV12 format, offsets[1] is not 0 and
|
||||
* handles[0] is same as handles[1].
|
||||
*/
|
||||
if (mode_cmd->offsets[1] &&
|
||||
mode_cmd->handles[0] == mode_cmd->handles[1])
|
||||
cnt = 1;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
|
@ -230,7 +193,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
|||
|
||||
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
|
||||
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
|
||||
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
|
||||
exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
|
||||
|
||||
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "exynos_drm_crtc.h"
|
||||
#include "exynos_drm_plane.h"
|
||||
#include "exynos_drm_iommu.h"
|
||||
#include "exynos_drm_fimd.h"
|
||||
|
||||
/*
|
||||
* FIMD stands for Fully Interactive Mobile Display and
|
||||
|
@ -216,7 +215,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
|
|||
DRM_DEBUG_KMS("vblank wait timed out.\n");
|
||||
}
|
||||
|
||||
static void fimd_enable_video_output(struct fimd_context *ctx, int win,
|
||||
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = readl(ctx->regs + WINCON(win));
|
||||
|
@ -229,7 +228,8 @@ static void fimd_enable_video_output(struct fimd_context *ctx, int win,
|
|||
writel(val, ctx->regs + WINCON(win));
|
||||
}
|
||||
|
||||
static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
|
||||
static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
|
||||
unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = readl(ctx->regs + SHADOWCON);
|
||||
|
@ -244,7 +244,7 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
|
|||
|
||||
static void fimd_clear_channel(struct fimd_context *ctx)
|
||||
{
|
||||
int win, ch_enabled = 0;
|
||||
unsigned int win, ch_enabled = 0;
|
||||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
|
@ -946,7 +946,24 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
|
|||
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
||||
static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
|
||||
{
|
||||
struct fimd_context *ctx = crtc->ctx;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
|
||||
* clock. On these SoCs the bootloader may enable it but any
|
||||
* power domain off/on will reset it to disable state.
|
||||
*/
|
||||
if (ctx->driver_data != &exynos5_fimd_driver_data)
|
||||
return;
|
||||
|
||||
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
|
||||
writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
|
||||
}
|
||||
|
||||
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
||||
.dpms = fimd_dpms,
|
||||
.mode_fixup = fimd_mode_fixup,
|
||||
.commit = fimd_commit,
|
||||
|
@ -956,6 +973,7 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
|||
.win_commit = fimd_win_commit,
|
||||
.win_disable = fimd_win_disable,
|
||||
.te_handler = fimd_te_handler,
|
||||
.clock_enable = fimd_dp_clock_enable,
|
||||
};
|
||||
|
||||
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
|
||||
|
@ -1025,12 +1043,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ctx->display)
|
||||
exynos_drm_create_enc_conn(drm_dev, ctx->display);
|
||||
|
||||
ret = fimd_iommu_attach_devices(ctx, drm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
return fimd_iommu_attach_devices(ctx, drm_dev);
|
||||
}
|
||||
|
||||
static void fimd_unbind(struct device *dev, struct device *master,
|
||||
|
@ -1192,24 +1205,6 @@ static int fimd_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
|
||||
{
|
||||
struct fimd_context *ctx = crtc->ctx;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
|
||||
* clock. On these SoCs the bootloader may enable it but any
|
||||
* power domain off/on will reset it to disable state.
|
||||
*/
|
||||
if (ctx->driver_data != &exynos5_fimd_driver_data)
|
||||
return;
|
||||
|
||||
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
|
||||
writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
|
||||
|
||||
struct platform_driver fimd_driver = {
|
||||
.probe = fimd_probe,
|
||||
.remove = fimd_remove,
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _EXYNOS_DRM_FIMD_H_
|
||||
#define _EXYNOS_DRM_FIMD_H_
|
||||
|
||||
extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
|
||||
|
||||
#endif /* _EXYNOS_DRM_FIMD_H_ */
|
|
@ -76,7 +76,7 @@ int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
exynos_plane->dma_addr[i] = buffer->dma_addr;
|
||||
exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
|
||||
|
||||
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
|
||||
i, (unsigned long)exynos_plane->dma_addr[i]);
|
||||
|
|
|
@ -217,7 +217,7 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops vidi_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
|
||||
.dpms = vidi_dpms,
|
||||
.enable_vblank = vidi_enable_vblank,
|
||||
.disable_vblank = vidi_disable_vblank,
|
||||
|
|
|
@ -44,6 +44,12 @@
|
|||
#define MIXER_WIN_NR 3
|
||||
#define MIXER_DEFAULT_WIN 0
|
||||
|
||||
/* The pixelformats that are natively supported by the mixer. */
|
||||
#define MXR_FORMAT_RGB565 4
|
||||
#define MXR_FORMAT_ARGB1555 5
|
||||
#define MXR_FORMAT_ARGB4444 6
|
||||
#define MXR_FORMAT_ARGB8888 7
|
||||
|
||||
struct mixer_resources {
|
||||
int irq;
|
||||
void __iomem *mixer_regs;
|
||||
|
@ -327,7 +333,8 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
|
|||
mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
|
||||
}
|
||||
|
||||
static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
|
||||
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
u32 val = enable ? ~0 : 0;
|
||||
|
@ -359,8 +366,6 @@ static void mixer_run(struct mixer_context *ctx)
|
|||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
||||
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void mixer_stop(struct mixer_context *ctx)
|
||||
|
@ -373,16 +378,13 @@ static void mixer_stop(struct mixer_context *ctx)
|
|||
while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
|
||||
--timeout)
|
||||
usleep_range(10000, 12000);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void vp_video_buffer(struct mixer_context *ctx, int win)
|
||||
static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
unsigned long flags;
|
||||
struct exynos_drm_plane *plane;
|
||||
unsigned int buf_num = 1;
|
||||
dma_addr_t luma_addr[2], chroma_addr[2];
|
||||
bool tiled_mode = false;
|
||||
bool crcb_mode = false;
|
||||
|
@ -393,27 +395,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
|
|||
switch (plane->pixel_format) {
|
||||
case DRM_FORMAT_NV12:
|
||||
crcb_mode = false;
|
||||
buf_num = 2;
|
||||
break;
|
||||
/* TODO: single buffer format NV12, NV21 */
|
||||
case DRM_FORMAT_NV21:
|
||||
crcb_mode = true;
|
||||
break;
|
||||
default:
|
||||
/* ignore pixel format at disable time */
|
||||
if (!plane->dma_addr[0])
|
||||
break;
|
||||
|
||||
DRM_ERROR("pixel format for vp is wrong [%d].\n",
|
||||
plane->pixel_format);
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf_num == 2) {
|
||||
luma_addr[0] = plane->dma_addr[0];
|
||||
chroma_addr[0] = plane->dma_addr[1];
|
||||
} else {
|
||||
luma_addr[0] = plane->dma_addr[0];
|
||||
chroma_addr[0] = plane->dma_addr[0]
|
||||
+ (plane->pitch * plane->fb_height);
|
||||
}
|
||||
|
||||
if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
|
||||
ctx->interlace = true;
|
||||
|
@ -484,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
|
|||
mixer_vsync_set_update(ctx, true);
|
||||
spin_unlock_irqrestore(&res->reg_slock, flags);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
vp_regs_dump(ctx);
|
||||
}
|
||||
|
||||
|
@ -518,7 +512,7 @@ fail:
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
||||
static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
unsigned long flags;
|
||||
|
@ -531,20 +525,27 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
|||
|
||||
plane = &ctx->planes[win];
|
||||
|
||||
#define RGB565 4
|
||||
#define ARGB1555 5
|
||||
#define ARGB4444 6
|
||||
#define ARGB8888 7
|
||||
switch (plane->pixel_format) {
|
||||
case DRM_FORMAT_XRGB4444:
|
||||
fmt = MXR_FORMAT_ARGB4444;
|
||||
break;
|
||||
|
||||
switch (plane->bpp) {
|
||||
case 16:
|
||||
fmt = ARGB4444;
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
fmt = MXR_FORMAT_ARGB1555;
|
||||
break;
|
||||
case 32:
|
||||
fmt = ARGB8888;
|
||||
|
||||
case DRM_FORMAT_RGB565:
|
||||
fmt = MXR_FORMAT_RGB565;
|
||||
break;
|
||||
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
fmt = MXR_FORMAT_ARGB8888;
|
||||
break;
|
||||
|
||||
default:
|
||||
fmt = ARGB8888;
|
||||
DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if mixer supports requested scaling setup */
|
||||
|
@ -617,6 +618,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
|||
|
||||
mixer_vsync_set_update(ctx, true);
|
||||
spin_unlock_irqrestore(&res->reg_slock, flags);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void vp_win_reset(struct mixer_context *ctx)
|
||||
|
@ -1070,6 +1073,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
|
|||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
mixer_stop(ctx);
|
||||
mixer_regs_dump(ctx);
|
||||
mixer_window_suspend(ctx);
|
||||
|
||||
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
|
||||
|
@ -1126,7 +1130,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops mixer_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
|
||||
.dpms = mixer_dpms,
|
||||
.enable_vblank = mixer_enable_vblank,
|
||||
.disable_vblank = mixer_disable_vblank,
|
||||
|
@ -1156,7 +1160,7 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
|
|||
.has_sclk = 1,
|
||||
};
|
||||
|
||||
static struct platform_device_id mixer_driver_types[] = {
|
||||
static const struct platform_device_id mixer_driver_types[] = {
|
||||
{
|
||||
.name = "s5p-mixer",
|
||||
.driver_data = (unsigned long)&exynos4210_mxr_drv_data,
|
||||
|
|
|
@ -699,6 +699,16 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
intel_init_pch_refclk(dev);
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
/*
|
||||
* Interrupts have to be enabled before any batches are run. If not the
|
||||
* GPU will hang. i915_gem_init_hw() will initiate batches to
|
||||
* update/restore the context.
|
||||
*
|
||||
* Modeset enabling in intel_modeset_init_hw() also needs working
|
||||
* interrupts.
|
||||
*/
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (i915_gem_init_hw(dev)) {
|
||||
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
|
||||
|
@ -706,9 +716,6 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue