Merge branch 'timers/urgent' into timers/core, to pick up dependent fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
57957fb519
1
.mailmap
1
.mailmap
|
@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
|
|||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
|
||||
Mark Brown <broonie@sirena.org.uk>
|
||||
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
|
|
|
@ -341,10 +341,7 @@ GuC
|
|||
GuC-specific firmware loader
|
||||
----------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
|
||||
:doc: GuC-specific firmware loader
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
|
||||
:internal:
|
||||
|
||||
GuC-based command submission
|
||||
|
|
|
@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
|
|||
... unused hole ...
|
||||
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
|
||||
... unused hole ...
|
||||
fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
|
||||
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
|
||||
vaddr_end for KASLR
|
||||
fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
|
||||
fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
|
||||
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
... unused hole ...
|
||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||
|
@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
|
|||
... unused hole ...
|
||||
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
|
||||
... unused hole ...
|
||||
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
|
||||
vaddr_end for KASLR
|
||||
fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
|
||||
... unused hole ...
|
||||
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
... unused hole ...
|
||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space
|
||||
ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
|
||||
[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
|
||||
ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
|
||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||
|
@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised later if needed).
|
|||
The mappings are not part of any other kernel PGD and are only available
|
||||
during EFI runtime calls.
|
||||
|
||||
The module mapping space size changes based on the CONFIG requirements for the
|
||||
following fixmap section.
|
||||
|
||||
Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
|
||||
physical memory, vmalloc/ioremap space and virtual memory map are randomized.
|
||||
Their order is preserved but their base will be offset early at boot time.
|
||||
|
||||
Be very careful vs. KASLR when changing anything here. The KASLR address
|
||||
range must not overlap with anything except the KASAN shadow area, which is
|
||||
correct as KASAN disables KASLR.
|
||||
|
|
|
@ -5149,15 +5149,15 @@ F: sound/usb/misc/ua101.c
|
|||
EFI TEST DRIVER
|
||||
L: linux-efi@vger.kernel.org
|
||||
M: Ivan Hu <ivan.hu@canonical.com>
|
||||
M: Matt Fleming <matt@codeblueprint.co.uk>
|
||||
M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
||||
S: Maintained
|
||||
F: drivers/firmware/efi/test/
|
||||
|
||||
EFI VARIABLE FILESYSTEM
|
||||
M: Matthew Garrett <matthew.garrett@nebula.com>
|
||||
M: Jeremy Kerr <jk@ozlabs.org>
|
||||
M: Matt Fleming <matt@codeblueprint.co.uk>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
|
||||
M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
|
||||
L: linux-efi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: fs/efivarfs/
|
||||
|
@ -5318,7 +5318,6 @@ S: Supported
|
|||
F: security/integrity/evm/
|
||||
|
||||
EXTENSIBLE FIRMWARE INTERFACE (EFI)
|
||||
M: Matt Fleming <matt@codeblueprint.co.uk>
|
||||
M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
|
||||
L: linux-efi@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
|
||||
|
|
|
@ -219,7 +219,7 @@
|
|||
compatible = "aspeed,ast2400-vuart";
|
||||
reg = <0x1e787000 0x40>;
|
||||
reg-shift = <2>;
|
||||
interrupts = <10>;
|
||||
interrupts = <8>;
|
||||
clocks = <&clk_uart>;
|
||||
no-loopback-test;
|
||||
status = "disabled";
|
||||
|
|
|
@ -221,6 +221,7 @@
|
|||
jc42@18 {
|
||||
compatible = "nxp,se97b", "jedec,jc-42.4-temp";
|
||||
reg = <0x18>;
|
||||
smbus-timeout-disable;
|
||||
};
|
||||
|
||||
dpot: mcp4651-104@28 {
|
||||
|
|
|
@ -178,7 +178,7 @@
|
|||
*/
|
||||
battery {
|
||||
pinctrl-names = "default";
|
||||
pintctrl-0 = <&battery_pins>;
|
||||
pinctrl-0 = <&battery_pins>;
|
||||
compatible = "lego,ev3-battery";
|
||||
io-channels = <&adc 4>, <&adc 3>;
|
||||
io-channel-names = "voltage", "current";
|
||||
|
@ -392,7 +392,7 @@
|
|||
batt_volt_en {
|
||||
gpio-hog;
|
||||
gpios = <6 GPIO_ACTIVE_HIGH>;
|
||||
output-low;
|
||||
output-high;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -664,6 +664,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&mixer {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
/* eMMC flash */
|
||||
&mmc_0 {
|
||||
status = "okay";
|
||||
|
|
|
@ -215,7 +215,7 @@
|
|||
reg = <0x2a>;
|
||||
VDDA-supply = <®_3p3v>;
|
||||
VDDIO-supply = <®_3p3v>;
|
||||
clocks = <&sys_mclk 1>;
|
||||
clocks = <&sys_mclk>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -187,7 +187,7 @@
|
|||
reg = <0x0a>;
|
||||
VDDA-supply = <®_3p3v>;
|
||||
VDDIO-supply = <®_3p3v>;
|
||||
clocks = <&sys_mclk 1>;
|
||||
clocks = <&sys_mclk>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -83,6 +83,10 @@
|
|||
};
|
||||
};
|
||||
|
||||
&cpu0 {
|
||||
cpu0-supply = <&vdd_arm>;
|
||||
};
|
||||
|
||||
&i2c1 {
|
||||
status = "okay";
|
||||
clock-frequency = <400000>;
|
||||
|
|
|
@ -956,7 +956,7 @@
|
|||
iep_mmu: iommu@ff900800 {
|
||||
compatible = "rockchip,iommu";
|
||||
reg = <0x0 0xff900800 0x0 0x40>;
|
||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "iep_mmu";
|
||||
#iommu-cells = <0>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -502,8 +502,8 @@
|
|||
reg = <0x01c16000 0x1000>;
|
||||
interrupts = <58>;
|
||||
clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
|
||||
<&ccu 9>,
|
||||
<&ccu 18>;
|
||||
<&ccu CLK_PLL_VIDEO0_2X>,
|
||||
<&ccu CLK_PLL_VIDEO1_2X>;
|
||||
clock-names = "ahb", "mod", "pll-0", "pll-1";
|
||||
dmas = <&dma SUN4I_DMA_NORMAL 16>,
|
||||
<&dma SUN4I_DMA_NORMAL 16>,
|
||||
|
|
|
@ -82,8 +82,8 @@
|
|||
reg = <0x01c16000 0x1000>;
|
||||
interrupts = <58>;
|
||||
clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
|
||||
<&ccu 9>,
|
||||
<&ccu 16>;
|
||||
<&ccu CLK_PLL_VIDEO0_2X>,
|
||||
<&ccu CLK_PLL_VIDEO1_2X>;
|
||||
clock-names = "ahb", "mod", "pll-0", "pll-1";
|
||||
dmas = <&dma SUN4I_DMA_NORMAL 16>,
|
||||
<&dma SUN4I_DMA_NORMAL 16>,
|
||||
|
|
|
@ -429,8 +429,8 @@
|
|||
interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>,
|
||||
<&ccu CLK_HDMI_DDC>,
|
||||
<&ccu 7>,
|
||||
<&ccu 13>;
|
||||
<&ccu CLK_PLL_VIDEO0_2X>,
|
||||
<&ccu CLK_PLL_VIDEO1_2X>;
|
||||
clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1";
|
||||
resets = <&ccu RST_AHB1_HDMI>;
|
||||
reset-names = "ahb";
|
||||
|
|
|
@ -581,8 +581,8 @@
|
|||
reg = <0x01c16000 0x1000>;
|
||||
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>,
|
||||
<&ccu 9>,
|
||||
<&ccu 18>;
|
||||
<&ccu CLK_PLL_VIDEO0_2X>,
|
||||
<&ccu CLK_PLL_VIDEO1_2X>;
|
||||
clock-names = "ahb", "mod", "pll-0", "pll-1";
|
||||
dmas = <&dma SUN4I_DMA_NORMAL 16>,
|
||||
<&dma SUN4I_DMA_NORMAL 16>,
|
||||
|
|
|
@ -146,6 +146,7 @@
|
|||
status = "okay";
|
||||
|
||||
axp81x: pmic@3a3 {
|
||||
compatible = "x-powers,axp813";
|
||||
reg = <0x3a3>;
|
||||
interrupt-parent = <&r_intc>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
|
|
@ -156,7 +156,6 @@
|
|||
reg = <0x6e000 0x400>;
|
||||
ranges = <0 0x6e000 0x400>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupt-controller;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
|
|
@ -793,7 +793,6 @@ void abort(void)
|
|||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
EXPORT_SYMBOL(abort);
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
|
|
|
@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = {
|
|||
{ "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) },
|
||||
{ "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) },
|
||||
{ "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) },
|
||||
{ "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
|
||||
{ "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
|
||||
{ "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
|
||||
{ "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
|
||||
{ "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) },
|
||||
{ "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) },
|
||||
{ "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) },
|
||||
{ "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) },
|
||||
};
|
||||
|
||||
static struct edma_soc_info dm365_edma_pdata = {
|
||||
|
@ -925,12 +925,14 @@ static struct resource edma_resources[] = {
|
|||
/* not using TC*_ERR */
|
||||
};
|
||||
|
||||
static struct platform_device dm365_edma_device = {
|
||||
.name = "edma",
|
||||
.id = 0,
|
||||
.dev.platform_data = &dm365_edma_pdata,
|
||||
.num_resources = ARRAY_SIZE(edma_resources),
|
||||
.resource = edma_resources,
|
||||
static const struct platform_device_info dm365_edma_device __initconst = {
|
||||
.name = "edma",
|
||||
.id = 0,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
.res = edma_resources,
|
||||
.num_res = ARRAY_SIZE(edma_resources),
|
||||
.data = &dm365_edma_pdata,
|
||||
.size_data = sizeof(dm365_edma_pdata),
|
||||
};
|
||||
|
||||
static struct resource dm365_asp_resources[] = {
|
||||
|
@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
|
|||
|
||||
static int __init dm365_init_devices(void)
|
||||
{
|
||||
struct platform_device *edma_pdev;
|
||||
int ret = 0;
|
||||
|
||||
if (!cpu_is_davinci_dm365())
|
||||
return 0;
|
||||
|
||||
davinci_cfg_reg(DM365_INT_EDMA_CC);
|
||||
platform_device_register(&dm365_edma_device);
|
||||
edma_pdev = platform_device_register_full(&dm365_edma_device);
|
||||
if (IS_ERR(edma_pdev)) {
|
||||
pr_warn("%s: Failed to register eDMA\n", __func__);
|
||||
return PTR_ERR(edma_pdev);
|
||||
}
|
||||
|
||||
platform_device_register(&dm365_mdio_device);
|
||||
platform_device_register(&dm365_emac_device);
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
pinctrl-0 = <&rgmii_pins>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
phy-supply = <®_dc1sw>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -77,6 +77,7 @@
|
|||
pinctrl-0 = <&rmii_pins>;
|
||||
phy-mode = "rmii";
|
||||
phy-handle = <&ext_rmii_phy1>;
|
||||
phy-supply = <®_dc1sw>;
|
||||
status = "okay";
|
||||
|
||||
};
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
pinctrl-0 = <&rgmii_pins>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
phy-supply = <®_dc1sw>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -95,7 +96,7 @@
|
|||
&mmc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc2_pins>;
|
||||
vmmc-supply = <®_vcc3v3>;
|
||||
vmmc-supply = <®_dcdc1>;
|
||||
vqmmc-supply = <®_vcc1v8>;
|
||||
bus-width = <8>;
|
||||
non-removable;
|
||||
|
|
|
@ -45,19 +45,10 @@
|
|||
|
||||
#include "sun50i-a64.dtsi"
|
||||
|
||||
/ {
|
||||
reg_vcc3v3: vcc3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
};
|
||||
};
|
||||
|
||||
&mmc0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc0_pins>;
|
||||
vmmc-supply = <®_vcc3v3>;
|
||||
vmmc-supply = <®_dcdc1>;
|
||||
non-removable;
|
||||
disable-wp;
|
||||
bus-width = <4>;
|
||||
|
|
|
@ -71,7 +71,7 @@
|
|||
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
|
||||
vmmc-supply = <®_vcc3v3>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -255,7 +255,6 @@
|
|||
&avb {
|
||||
pinctrl-0 = <&avb_pins>;
|
||||
pinctrl-names = "default";
|
||||
renesas,no-ether-link;
|
||||
phy-handle = <&phy0>;
|
||||
status = "okay";
|
||||
|
||||
|
|
|
@ -145,7 +145,6 @@
|
|||
&avb {
|
||||
pinctrl-0 = <&avb_pins>;
|
||||
pinctrl-names = "default";
|
||||
renesas,no-ether-link;
|
||||
phy-handle = <&phy0>;
|
||||
status = "okay";
|
||||
|
||||
|
|
|
@ -132,6 +132,8 @@
|
|||
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
|
||||
assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
|
||||
clock_in_out = "input";
|
||||
/* shows instability at 1GBit right now */
|
||||
max-speed = <100>;
|
||||
phy-supply = <&vcc_io>;
|
||||
phy-mode = "rgmii";
|
||||
pinctrl-names = "default";
|
||||
|
|
|
@ -514,7 +514,7 @@
|
|||
tsadc: tsadc@ff250000 {
|
||||
compatible = "rockchip,rk3328-tsadc";
|
||||
reg = <0x0 0xff250000 0x0 0x100>;
|
||||
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
|
||||
assigned-clocks = <&cru SCLK_TSADC>;
|
||||
assigned-clock-rates = <50000>;
|
||||
clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
|
||||
|
|
|
@ -155,17 +155,6 @@
|
|||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
};
|
||||
|
||||
vdd_log: vdd-log {
|
||||
compatible = "pwm-regulator";
|
||||
pwms = <&pwm2 0 25000 0>;
|
||||
regulator-name = "vdd_log";
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <1400000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
status = "okay";
|
||||
};
|
||||
};
|
||||
|
||||
&cpu_b0 {
|
||||
|
|
|
@ -198,8 +198,8 @@
|
|||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
gpio-ranges = <&pinctrl 0 0 0>,
|
||||
<&pinctrl 96 0 0>,
|
||||
<&pinctrl 160 0 0>;
|
||||
<&pinctrl 104 0 0>,
|
||||
<&pinctrl 168 0 0>;
|
||||
gpio-ranges-group-names = "gpio_range0",
|
||||
"gpio_range1",
|
||||
"gpio_range2";
|
||||
|
|
|
@ -122,7 +122,6 @@ void abort(void)
|
|||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
EXPORT_SYMBOL(abort);
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
|
|
|
@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess);
|
|||
|
||||
void disable_sacf_uaccess(mm_segment_t old_fs)
|
||||
{
|
||||
current->thread.mm_segment = old_fs;
|
||||
if (old_fs == USER_DS && test_facility(27)) {
|
||||
__ctl_load(S390_lowcore.user_asce, 1, 1);
|
||||
clear_cpu_flag(CIF_ASCE_PRIMARY);
|
||||
}
|
||||
current->thread.mm_segment = old_fs;
|
||||
}
|
||||
EXPORT_SYMBOL(disable_sacf_uaccess);
|
||||
|
||||
|
|
|
@ -181,6 +181,9 @@ out_unlock:
|
|||
static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
|
||||
size_t size, int flags)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
|
||||
* translations when previously invalid translation-table entries are
|
||||
|
@ -196,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
|
||||
PAGE_ALIGN(size));
|
||||
ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
|
||||
PAGE_ALIGN(size));
|
||||
if (ret == -ENOMEM && !s390_iommu_strict) {
|
||||
/* enable the hypervisor to free some resources */
|
||||
if (zpci_refresh_global(zdev))
|
||||
goto out;
|
||||
|
||||
spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
|
||||
bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
|
||||
zdev->lazy_bitmap, zdev->iommu_pages);
|
||||
bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
|
||||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
|
||||
|
|
|
@ -89,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
|
|||
if (cc)
|
||||
zpci_err_insn(cc, status, addr, range);
|
||||
|
||||
if (cc == 1 && (status == 4 || status == 16))
|
||||
return -ENOMEM;
|
||||
|
||||
return (cc) ? -EIO : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -298,7 +298,6 @@ void abort(void)
|
|||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
EXPORT_SYMBOL(abort);
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#include "../perf_event.h"
|
||||
|
@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
|
|||
|
||||
static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
|
||||
{
|
||||
unsigned long start = (unsigned long)cea;
|
||||
phys_addr_t pa;
|
||||
size_t msz = 0;
|
||||
|
||||
pa = virt_to_phys(addr);
|
||||
|
||||
preempt_disable();
|
||||
for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
|
||||
cea_set_pte(cea, pa, prot);
|
||||
|
||||
/*
|
||||
* This is a cross-CPU update of the cpu_entry_area, we must shoot down
|
||||
* all TLB entries for it.
|
||||
*/
|
||||
flush_tlb_kernel_range(start, start + size);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void ds_clear_cea(void *cea, size_t size)
|
||||
{
|
||||
unsigned long start = (unsigned long)cea;
|
||||
size_t msz = 0;
|
||||
|
||||
preempt_disable();
|
||||
for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
|
||||
cea_set_pte(cea, 0, PAGE_NONE);
|
||||
|
||||
flush_tlb_kernel_range(start, start + size);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
|
||||
|
|
|
@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||
".popsection\n" \
|
||||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
|
||||
".popsection"
|
||||
".popsection\n"
|
||||
|
||||
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
|
||||
OLDINSTR_2(oldinstr, 1, 2) \
|
||||
|
@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
|
||||
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
|
||||
".popsection"
|
||||
".popsection\n"
|
||||
|
||||
/*
|
||||
* Alternative instructions for different CPU types or capabilities.
|
||||
|
|
|
@ -341,6 +341,6 @@
|
|||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
|
||||
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
|
||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
|
||||
#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
|
||||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
|
|||
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
|
||||
/*
|
||||
* See Documentation/x86/x86_64/mm.txt for a description of the memory map.
|
||||
*
|
||||
* Be very careful vs. KASLR when changing anything here. The KASLR address
|
||||
* range must not overlap with anything except the KASAN shadow area, which
|
||||
* is correct as KASAN disables KASLR.
|
||||
*/
|
||||
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
|
||||
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
|
@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
|
|||
# define VMALLOC_SIZE_TB _AC(32, UL)
|
||||
# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
|
||||
# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
|
||||
# define LDT_PGD_ENTRY _AC(-4, UL)
|
||||
# define LDT_PGD_ENTRY _AC(-3, UL)
|
||||
# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
|
||||
#endif
|
||||
|
||||
|
@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
|
|||
|
||||
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
|
||||
/* The module sections ends with the start of the fixmap */
|
||||
#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
|
||||
#define MODULES_END _AC(0xffffffffff000000, UL)
|
||||
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
|
||||
|
||||
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
|
||||
#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
|
||||
|
||||
#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
|
||||
#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
|
||||
#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
|
||||
|
||||
#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
|
||||
|
|
|
@ -924,7 +924,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD)
|
||||
setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
fpu__init_system(c);
|
||||
|
||||
|
|
|
@ -906,9 +906,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
set_bit(EFI_BOOT, &efi.flags);
|
||||
set_bit(EFI_64BIT, &efi.flags);
|
||||
}
|
||||
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
efi_memblock_x86_reserve_range();
|
||||
#endif
|
||||
|
||||
x86_init.oem.arch_setup();
|
||||
|
@ -962,6 +959,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
parse_early_param();
|
||||
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
efi_memblock_x86_reserve_range();
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Memory used by the kernel cannot be hot-removed because Linux
|
||||
|
|
|
@ -61,10 +61,10 @@ enum address_markers_idx {
|
|||
KASAN_SHADOW_START_NR,
|
||||
KASAN_SHADOW_END_NR,
|
||||
#endif
|
||||
CPU_ENTRY_AREA_NR,
|
||||
#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
|
||||
LDT_NR,
|
||||
#endif
|
||||
CPU_ENTRY_AREA_NR,
|
||||
#ifdef CONFIG_X86_ESPFIX64
|
||||
ESPFIX_START_NR,
|
||||
#endif
|
||||
|
|
|
@ -868,7 +868,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
|
|||
.next_asid = 1,
|
||||
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(cpu_tlbstate);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
|
||||
|
||||
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
|
||||
{
|
||||
|
|
|
@ -34,25 +34,14 @@
|
|||
#define TB_SHIFT 40
|
||||
|
||||
/*
|
||||
* Virtual address start and end range for randomization. The end changes base
|
||||
* on configuration to have the highest amount of space for randomization.
|
||||
* It increases the possible random position for each randomized region.
|
||||
* Virtual address start and end range for randomization.
|
||||
*
|
||||
* You need to add an if/def entry if you introduce a new memory region
|
||||
* compatible with KASLR. Your entry must be in logical order with memory
|
||||
* layout. For example, ESPFIX is before EFI because its virtual address is
|
||||
* before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
|
||||
* ensure that this order is correct and won't be changed.
|
||||
* The end address could depend on more configuration options to make the
|
||||
* highest amount of space for randomization available, but that's too hard
|
||||
* to keep straight and caused issues already.
|
||||
*/
|
||||
static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
|
||||
|
||||
#if defined(CONFIG_X86_ESPFIX64)
|
||||
static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
|
||||
#elif defined(CONFIG_EFI)
|
||||
static const unsigned long vaddr_end = EFI_VA_END;
|
||||
#else
|
||||
static const unsigned long vaddr_end = __START_KERNEL_map;
|
||||
#endif
|
||||
static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
|
||||
|
||||
/* Default values */
|
||||
unsigned long page_offset_base = __PAGE_OFFSET_BASE;
|
||||
|
@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
|
|||
unsigned long remain_entropy;
|
||||
|
||||
/*
|
||||
* All these BUILD_BUG_ON checks ensures the memory layout is
|
||||
* consistent with the vaddr_start/vaddr_end variables.
|
||||
* These BUILD_BUG_ON checks ensure the memory layout is consistent
|
||||
* with the vaddr_start/vaddr_end variables. These checks are very
|
||||
* limited....
|
||||
*/
|
||||
BUILD_BUG_ON(vaddr_start >= vaddr_end);
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
|
||||
vaddr_end >= EFI_VA_END);
|
||||
BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
|
||||
IS_ENABLED(CONFIG_EFI)) &&
|
||||
vaddr_end >= __START_KERNEL_map);
|
||||
BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
|
||||
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
|
||||
|
||||
if (!kaslr_memory_enabled())
|
||||
|
|
|
@ -56,13 +56,13 @@
|
|||
|
||||
static void __init pti_print_if_insecure(const char *reason)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
|
||||
if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
pr_info("%s\n", reason);
|
||||
}
|
||||
|
||||
static void __init pti_print_if_secure(const char *reason)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
pr_info("%s\n", reason);
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
|
|||
}
|
||||
|
||||
autosel:
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
return;
|
||||
enable:
|
||||
setup_force_cpu_cap(X86_FEATURE_PTI);
|
||||
|
|
|
@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
|
|||
/*
|
||||
* Update the first page pointer to skip over the CSH header.
|
||||
*/
|
||||
cap_info->pages[0] += csh->headersize;
|
||||
cap_info->phys[0] += csh->headersize;
|
||||
|
||||
/*
|
||||
* cap_info->capsule should point at a virtual mapping of the entire
|
||||
* capsule, starting at the capsule header. Our image has the Quark
|
||||
* security header prepended, so we cannot rely on the default vmap()
|
||||
* mapping created by the generic capsule code.
|
||||
* Given that the Quark firmware does not appear to care about the
|
||||
* virtual mapping, let's just point cap_info->capsule at our copy
|
||||
* of the capsule header.
|
||||
*/
|
||||
cap_info->capsule = &cap_info->header;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
|
|||
unsigned int i;
|
||||
|
||||
list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
|
||||
ctx->rcvused -= rsgl->sg_num_bytes;
|
||||
atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
|
||||
af_alg_free_sg(&rsgl->sgl);
|
||||
list_del(&rsgl->list);
|
||||
if (rsgl != &areq->first_rsgl)
|
||||
|
@ -1163,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
|
|||
|
||||
areq->last_rsgl = rsgl;
|
||||
len += err;
|
||||
ctx->rcvused += err;
|
||||
atomic_add(err, &ctx->rcvused);
|
||||
rsgl->sg_num_bytes = err;
|
||||
iov_iter_advance(&msg->msg_iter, err);
|
||||
}
|
||||
|
|
|
@ -571,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
|
|||
INIT_LIST_HEAD(&ctx->tsgl_list);
|
||||
ctx->len = len;
|
||||
ctx->used = 0;
|
||||
ctx->rcvused = 0;
|
||||
atomic_set(&ctx->rcvused, 0);
|
||||
ctx->more = 0;
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
|
|
|
@ -390,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
|||
INIT_LIST_HEAD(&ctx->tsgl_list);
|
||||
ctx->len = len;
|
||||
ctx->used = 0;
|
||||
ctx->rcvused = 0;
|
||||
atomic_set(&ctx->rcvused, 0);
|
||||
ctx->more = 0;
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
|
|
|
@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
algt->mask));
|
||||
if (IS_ERR(poly))
|
||||
return PTR_ERR(poly);
|
||||
poly_hash = __crypto_hash_alg_common(poly);
|
||||
|
||||
err = -EINVAL;
|
||||
if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
|
||||
goto out_put_poly;
|
||||
|
||||
err = -ENOMEM;
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
|
@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
|
||||
ctx = aead_instance_ctx(inst);
|
||||
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
|
||||
poly_hash = __crypto_hash_alg_common(poly);
|
||||
err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
|
||||
aead_crypto_instance(inst));
|
||||
if (err)
|
||||
|
|
|
@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
|
|||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static void pcrypt_free(struct aead_instance *inst)
|
||||
{
|
||||
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_aead(&ctx->spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int pcrypt_init_instance(struct crypto_instance *inst,
|
||||
struct crypto_alg *alg)
|
||||
{
|
||||
|
@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
inst->alg.encrypt = pcrypt_aead_encrypt;
|
||||
inst->alg.decrypt = pcrypt_aead_decrypt;
|
||||
|
||||
inst->free = pcrypt_free;
|
||||
|
||||
err = aead_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto out_drop_aead;
|
||||
|
@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void pcrypt_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_aead(&ctx->spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int pcrypt_cpumask_change_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
|
@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
|
|||
static struct crypto_template pcrypt_tmpl = {
|
||||
.name = "pcrypt",
|
||||
.create = pcrypt_create,
|
||||
.free = pcrypt_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
|
|
@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
|
|||
.match = sunxi_rsb_device_match,
|
||||
.probe = sunxi_rsb_device_probe,
|
||||
.remove = sunxi_rsb_device_remove,
|
||||
.uevent = of_device_uevent_modalias,
|
||||
};
|
||||
|
||||
static void sunxi_rsb_dev_release(struct device *dev)
|
||||
|
|
|
@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
|
|||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_GF128MUL
|
||||
---help---
|
||||
The Chelsio Crypto Co-processor driver for T6 adapters.
|
||||
|
||||
|
|
|
@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
|
|||
ndesc = ctx->handle_result(priv, ring, sreq->req,
|
||||
&should_complete, &ret);
|
||||
if (ndesc < 0) {
|
||||
kfree(sreq);
|
||||
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include "safexcel.h"
|
||||
|
||||
|
@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
|
|||
unsigned int key_len;
|
||||
};
|
||||
|
||||
struct safexcel_cipher_req {
|
||||
bool needs_inv;
|
||||
};
|
||||
|
||||
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
|
||||
struct crypto_async_request *async,
|
||||
struct safexcel_command_desc *cdesc,
|
||||
|
@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct safexcel_result_desc *rdesc;
|
||||
|
@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
|
|||
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
||||
|
||||
request->req = &req->base;
|
||||
ctx->base.handle_result = safexcel_handle_result;
|
||||
|
||||
*commands = n_cdesc;
|
||||
*results = n_rdesc;
|
||||
|
@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
|||
|
||||
ring = safexcel_select_ring(priv);
|
||||
ctx->base.ring = ring;
|
||||
ctx->base.needs_inv = false;
|
||||
ctx->base.send = safexcel_aes_send;
|
||||
|
||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
|
||||
|
@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
|||
return ndesc;
|
||||
}
|
||||
|
||||
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||
int err;
|
||||
|
||||
if (sreq->needs_inv) {
|
||||
sreq->needs_inv = false;
|
||||
err = safexcel_handle_inv_result(priv, ring, async,
|
||||
should_complete, ret);
|
||||
} else {
|
||||
err = safexcel_handle_req_result(priv, ring, async,
|
||||
should_complete, ret);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
|
||||
int ring, struct safexcel_request *request,
|
||||
int *commands, int *results)
|
||||
|
@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
|
|||
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||
int ret;
|
||||
|
||||
ctx->base.handle_result = safexcel_handle_inv_result;
|
||||
|
||||
ret = safexcel_invalidate_cache(async, &ctx->base, priv,
|
||||
ctx->base.ctxr_dma, ring, request);
|
||||
if (unlikely(ret))
|
||||
|
@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int safexcel_send(struct crypto_async_request *async,
|
||||
int ring, struct safexcel_request *request,
|
||||
int *commands, int *results)
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||
int ret;
|
||||
|
||||
if (sreq->needs_inv)
|
||||
ret = safexcel_cipher_send_inv(async, ring, request,
|
||||
commands, results);
|
||||
else
|
||||
ret = safexcel_aes_send(async, ring, request,
|
||||
commands, results);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||
struct skcipher_request req;
|
||||
SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
|
||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||
struct safexcel_inv_result result = {};
|
||||
int ring = ctx->base.ring;
|
||||
|
||||
memset(&req, 0, sizeof(struct skcipher_request));
|
||||
memset(req, 0, sizeof(struct skcipher_request));
|
||||
|
||||
/* create invalidation request */
|
||||
init_completion(&result.completion);
|
||||
skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
safexcel_inv_complete, &result);
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
safexcel_inv_complete, &result);
|
||||
|
||||
skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
|
||||
ctx = crypto_tfm_ctx(req.base.tfm);
|
||||
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
|
||||
ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
ctx->base.exit_inv = true;
|
||||
ctx->base.send = safexcel_cipher_send_inv;
|
||||
sreq->needs_inv = true;
|
||||
|
||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
|
||||
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||
|
||||
if (!priv->ring[ring].need_dequeue)
|
||||
|
@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
|
|||
enum safexcel_cipher_direction dir, u32 mode)
|
||||
{
|
||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||
int ret, ring;
|
||||
|
||||
sreq->needs_inv = false;
|
||||
ctx->direction = dir;
|
||||
ctx->mode = mode;
|
||||
|
||||
if (ctx->base.ctxr) {
|
||||
if (ctx->base.needs_inv)
|
||||
ctx->base.send = safexcel_cipher_send_inv;
|
||||
if (ctx->base.needs_inv) {
|
||||
sreq->needs_inv = true;
|
||||
ctx->base.needs_inv = false;
|
||||
}
|
||||
} else {
|
||||
ctx->base.ring = safexcel_select_ring(priv);
|
||||
ctx->base.send = safexcel_aes_send;
|
||||
|
||||
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
|
||||
EIP197_GFP_FLAGS(req->base),
|
||||
&ctx->base.ctxr_dma);
|
||||
|
@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
|
|||
alg.skcipher.base);
|
||||
|
||||
ctx->priv = tmpl->priv;
|
||||
ctx->base.send = safexcel_send;
|
||||
ctx->base.handle_result = safexcel_handle_result;
|
||||
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct safexcel_cipher_req));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,10 @@ struct safexcel_ahash_req {
|
|||
bool last_req;
|
||||
bool finish;
|
||||
bool hmac;
|
||||
bool needs_inv;
|
||||
|
||||
u8 state_sz; /* expected sate size, only set once */
|
||||
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
|
||||
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
|
||||
|
||||
u64 len;
|
||||
u64 processed;
|
||||
|
@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
{
|
||||
struct safexcel_result_desc *rdesc;
|
||||
struct ahash_request *areq = ahash_request_cast(async);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
|
||||
int cache_len, result_sz = sreq->state_sz;
|
||||
int cache_len;
|
||||
|
||||
*ret = 0;
|
||||
|
||||
|
@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
|||
spin_unlock_bh(&priv->ring[ring].egress_lock);
|
||||
|
||||
if (sreq->finish)
|
||||
result_sz = crypto_ahash_digestsize(ahash);
|
||||
memcpy(sreq->state, areq->result, result_sz);
|
||||
memcpy(areq->result, sreq->state,
|
||||
crypto_ahash_digestsize(ahash));
|
||||
|
||||
dma_unmap_sg(priv->dev, areq->src,
|
||||
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
|
||||
|
@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
|
||||
struct safexcel_request *request, int *commands,
|
||||
int *results)
|
||||
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
||||
struct safexcel_request *request,
|
||||
int *commands, int *results)
|
||||
{
|
||||
struct ahash_request *areq = ahash_request_cast(async);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
|
@ -273,7 +274,7 @@ send_command:
|
|||
/* Add the token */
|
||||
safexcel_hash_token(first_cdesc, len, req->state_sz);
|
||||
|
||||
ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
|
||||
ctx->base.result_dma = dma_map_single(priv->dev, req->state,
|
||||
req->state_sz, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -292,7 +293,6 @@ send_command:
|
|||
|
||||
req->processed += len;
|
||||
request->req = &areq->base;
|
||||
ctx->base.handle_result = safexcel_handle_result;
|
||||
|
||||
*commands = n_cdesc;
|
||||
*results = 1;
|
||||
|
@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
|||
|
||||
ring = safexcel_select_ring(priv);
|
||||
ctx->base.ring = ring;
|
||||
ctx->base.needs_inv = false;
|
||||
ctx->base.send = safexcel_ahash_send;
|
||||
|
||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
|
||||
|
@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
|
||||
struct crypto_async_request *async,
|
||||
bool *should_complete, int *ret)
|
||||
{
|
||||
struct ahash_request *areq = ahash_request_cast(async);
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
int err;
|
||||
|
||||
if (req->needs_inv) {
|
||||
req->needs_inv = false;
|
||||
err = safexcel_handle_inv_result(priv, ring, async,
|
||||
should_complete, ret);
|
||||
} else {
|
||||
err = safexcel_handle_req_result(priv, ring, async,
|
||||
should_complete, ret);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
|
||||
int ring, struct safexcel_request *request,
|
||||
int *commands, int *results)
|
||||
|
@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
|
|||
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
|
||||
int ret;
|
||||
|
||||
ctx->base.handle_result = safexcel_handle_inv_result;
|
||||
ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
|
||||
ctx->base.ctxr_dma, ring, request);
|
||||
if (unlikely(ret))
|
||||
|
@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int safexcel_ahash_send(struct crypto_async_request *async,
|
||||
int ring, struct safexcel_request *request,
|
||||
int *commands, int *results)
|
||||
{
|
||||
struct ahash_request *areq = ahash_request_cast(async);
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
int ret;
|
||||
|
||||
if (req->needs_inv)
|
||||
ret = safexcel_ahash_send_inv(async, ring, request,
|
||||
commands, results);
|
||||
else
|
||||
ret = safexcel_ahash_send_req(async, ring, request,
|
||||
commands, results);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||
struct ahash_request req;
|
||||
AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
|
||||
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
|
||||
struct safexcel_inv_result result = {};
|
||||
int ring = ctx->base.ring;
|
||||
|
||||
memset(&req, 0, sizeof(struct ahash_request));
|
||||
memset(req, 0, sizeof(struct ahash_request));
|
||||
|
||||
/* create invalidation request */
|
||||
init_completion(&result.completion);
|
||||
ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
safexcel_inv_complete, &result);
|
||||
|
||||
ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
|
||||
ctx = crypto_tfm_ctx(req.base.tfm);
|
||||
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
|
||||
ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
ctx->base.exit_inv = true;
|
||||
ctx->base.send = safexcel_ahash_send_inv;
|
||||
rctx->needs_inv = true;
|
||||
|
||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||
crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
|
||||
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||
|
||||
if (!priv->ring[ring].need_dequeue)
|
||||
|
@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
|
|||
struct safexcel_crypto_priv *priv = ctx->priv;
|
||||
int ret, ring;
|
||||
|
||||
ctx->base.send = safexcel_ahash_send;
|
||||
req->needs_inv = false;
|
||||
|
||||
if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
|
||||
ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
|
||||
|
||||
if (ctx->base.ctxr) {
|
||||
if (ctx->base.needs_inv)
|
||||
ctx->base.send = safexcel_ahash_send_inv;
|
||||
if (ctx->base.needs_inv) {
|
||||
ctx->base.needs_inv = false;
|
||||
req->needs_inv = true;
|
||||
}
|
||||
} else {
|
||||
ctx->base.ring = safexcel_select_ring(priv);
|
||||
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
|
||||
|
@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
|
|||
struct safexcel_alg_template, alg.ahash);
|
||||
|
||||
ctx->priv = tmpl->priv;
|
||||
ctx->base.send = safexcel_ahash_send;
|
||||
ctx->base.handle_result = safexcel_handle_result;
|
||||
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct safexcel_ahash_req));
|
||||
|
|
|
@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
|
|||
CWQ_ENTRY_SIZE, 0, NULL);
|
||||
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
|
||||
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
|
||||
queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
|
|||
{
|
||||
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
|
||||
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
|
||||
queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
|
||||
queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
|
||||
}
|
||||
|
||||
static long spu_queue_register_workfn(void *arg)
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
|
||||
#define NO_FURTHER_WRITE_ACTION -1
|
||||
|
||||
#ifndef phys_to_page
|
||||
#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* efi_free_all_buff_pages - free all previous allocated buffer pages
|
||||
* @cap_info: pointer to current instance of capsule_info structure
|
||||
|
@ -35,7 +31,7 @@
|
|||
static void efi_free_all_buff_pages(struct capsule_info *cap_info)
|
||||
{
|
||||
while (cap_info->index > 0)
|
||||
__free_page(phys_to_page(cap_info->pages[--cap_info->index]));
|
||||
__free_page(cap_info->pages[--cap_info->index]);
|
||||
|
||||
cap_info->index = NO_FURTHER_WRITE_ACTION;
|
||||
}
|
||||
|
@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
|
|||
|
||||
cap_info->pages = temp_page;
|
||||
|
||||
temp_page = krealloc(cap_info->phys,
|
||||
pages_needed * sizeof(phys_addr_t *),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!temp_page)
|
||||
return -ENOMEM;
|
||||
|
||||
cap_info->phys = temp_page;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
|
|||
**/
|
||||
static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
|
||||
{
|
||||
bool do_vunmap = false;
|
||||
int ret;
|
||||
|
||||
ret = efi_capsule_update(&cap_info->header, cap_info->pages);
|
||||
/*
|
||||
* cap_info->capsule may have been assigned already by a quirk
|
||||
* handler, so only overwrite it if it is NULL
|
||||
*/
|
||||
if (!cap_info->capsule) {
|
||||
cap_info->capsule = vmap(cap_info->pages, cap_info->index,
|
||||
VM_MAP, PAGE_KERNEL);
|
||||
if (!cap_info->capsule)
|
||||
return -ENOMEM;
|
||||
do_vunmap = true;
|
||||
}
|
||||
|
||||
ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
|
||||
if (do_vunmap)
|
||||
vunmap(cap_info->capsule);
|
||||
if (ret) {
|
||||
pr_err("capsule update failed\n");
|
||||
return ret;
|
||||
|
@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
cap_info->pages[cap_info->index++] = page_to_phys(page);
|
||||
cap_info->pages[cap_info->index] = page;
|
||||
cap_info->phys[cap_info->index] = page_to_phys(page);
|
||||
cap_info->page_bytes_remain = PAGE_SIZE;
|
||||
cap_info->index++;
|
||||
} else {
|
||||
page = phys_to_page(cap_info->pages[cap_info->index - 1]);
|
||||
page = cap_info->pages[cap_info->index - 1];
|
||||
}
|
||||
|
||||
kbuff = kmap(page);
|
||||
|
@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
|
|||
struct capsule_info *cap_info = file->private_data;
|
||||
|
||||
kfree(cap_info->pages);
|
||||
kfree(cap_info->phys);
|
||||
kfree(file->private_data);
|
||||
file->private_data = NULL;
|
||||
return 0;
|
||||
|
@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
|
||||
if (!cap_info->phys) {
|
||||
kfree(cap_info->pages);
|
||||
kfree(cap_info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
file->private_data = cap_info;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
|
|||
|
||||
void dpp1_cm_set_output_csc_default(
|
||||
struct dpp *dpp_base,
|
||||
const struct default_adjustment *default_adjust);
|
||||
enum dc_color_space colorspace);
|
||||
|
||||
void dpp1_cm_set_gamut_remap(
|
||||
struct dpp *dpp,
|
||||
|
|
|
@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
|
|||
|
||||
void dpp1_cm_set_output_csc_default(
|
||||
struct dpp *dpp_base,
|
||||
const struct default_adjustment *default_adjust)
|
||||
enum dc_color_space colorspace)
|
||||
{
|
||||
|
||||
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
|
||||
uint32_t ocsc_mode = 0;
|
||||
|
||||
if (default_adjust != NULL) {
|
||||
switch (default_adjust->out_color_space) {
|
||||
switch (colorspace) {
|
||||
case COLOR_SPACE_SRGB:
|
||||
case COLOR_SPACE_2020_RGB_FULLRANGE:
|
||||
ocsc_mode = 0;
|
||||
|
@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
|
|||
case COLOR_SPACE_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
|
||||
|
|
|
@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
|
|||
tbl_entry.color_space = color_space;
|
||||
//tbl_entry.regval = matrix;
|
||||
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
|
||||
} else {
|
||||
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
|
||||
}
|
||||
}
|
||||
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
|
||||
|
|
|
@ -64,7 +64,7 @@ struct dpp_funcs {
|
|||
|
||||
void (*opp_set_csc_default)(
|
||||
struct dpp *dpp,
|
||||
const struct default_adjustment *default_adjust);
|
||||
enum dc_color_space colorspace);
|
||||
|
||||
void (*opp_set_csc_adjustment)(
|
||||
struct dpp *dpp,
|
||||
|
|
|
@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
|
|||
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
|
||||
int x, int y)
|
||||
{
|
||||
const struct drm_format_info *format = fb->format;
|
||||
unsigned int num_planes = format->num_planes;
|
||||
u32 addr = drm_fb_obj(fb)->dev_addr;
|
||||
int num_planes = fb->format->num_planes;
|
||||
int i;
|
||||
|
||||
if (num_planes > 3)
|
||||
num_planes = 3;
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
|
||||
x * format->cpp[0];
|
||||
|
||||
y /= format->vsub;
|
||||
x /= format->hsub;
|
||||
|
||||
for (i = 1; i < num_planes; i++)
|
||||
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
|
||||
x * fb->format->cpp[i];
|
||||
x * format->cpp[i];
|
||||
for (; i < 3; i++)
|
||||
addrs[i] = 0;
|
||||
}
|
||||
|
@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
|
|||
if (plane->fb)
|
||||
drm_framebuffer_put(plane->fb);
|
||||
|
||||
/* Power down the Y/U/V FIFOs */
|
||||
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
|
||||
|
||||
/* Power down most RAMs and FIFOs if this is the primary plane */
|
||||
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
|
||||
sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
|
||||
CFG_PDWN32x32 | CFG_PDWN64x66;
|
||||
sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
|
||||
CFG_PDWN32x32 | CFG_PDWN64x66;
|
||||
dma_ctrl0_mask = CFG_GRA_ENA;
|
||||
} else {
|
||||
/* Power down the Y/U/V FIFOs */
|
||||
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
|
||||
dma_ctrl0_mask = CFG_DMA_ENA;
|
||||
}
|
||||
|
||||
|
@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
|
||||
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
|
||||
dcrtc);
|
||||
if (ret < 0) {
|
||||
kfree(dcrtc);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto err_crtc;
|
||||
|
||||
if (dcrtc->variant->init) {
|
||||
ret = dcrtc->variant->init(dcrtc, dev);
|
||||
if (ret) {
|
||||
kfree(dcrtc);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_crtc;
|
||||
}
|
||||
|
||||
/* Ensure AXI pipeline is enabled */
|
||||
|
@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
dcrtc->crtc.port = port;
|
||||
|
||||
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
|
||||
if (!primary)
|
||||
return -ENOMEM;
|
||||
if (!primary) {
|
||||
ret = -ENOMEM;
|
||||
goto err_crtc;
|
||||
}
|
||||
|
||||
ret = armada_drm_plane_init(primary);
|
||||
if (ret) {
|
||||
kfree(primary);
|
||||
return ret;
|
||||
goto err_crtc;
|
||||
}
|
||||
|
||||
ret = drm_universal_plane_init(drm, &primary->base, 0,
|
||||
|
@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
DRM_PLANE_TYPE_PRIMARY, NULL);
|
||||
if (ret) {
|
||||
kfree(primary);
|
||||
return ret;
|
||||
goto err_crtc;
|
||||
}
|
||||
|
||||
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
|
||||
|
@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
|
||||
err_crtc_init:
|
||||
primary->base.funcs->destroy(&primary->base);
|
||||
err_crtc:
|
||||
kfree(dcrtc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,8 @@ struct armada_plane_work {
|
|||
};
|
||||
|
||||
struct armada_plane_state {
|
||||
u16 src_x;
|
||||
u16 src_y;
|
||||
u32 src_hw;
|
||||
u32 dst_hw;
|
||||
u32 dst_yx;
|
||||
|
|
|
@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
{
|
||||
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
const struct drm_format_info *format;
|
||||
struct drm_rect src = {
|
||||
.x1 = src_x,
|
||||
.y1 = src_y,
|
||||
|
@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
};
|
||||
uint32_t val, ctrl0;
|
||||
unsigned idx = 0;
|
||||
bool visible;
|
||||
bool visible, fb_changed;
|
||||
int ret;
|
||||
|
||||
trace_armada_ovl_plane_update(plane, crtc, fb,
|
||||
|
@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
if (!visible)
|
||||
ctrl0 &= ~CFG_DMA_ENA;
|
||||
|
||||
/*
|
||||
* Shifting a YUV packed format image by one pixel causes the U/V
|
||||
* planes to swap. Compensate for it by also toggling the UV swap.
|
||||
*/
|
||||
format = fb->format;
|
||||
if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
|
||||
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
|
||||
|
||||
fb_changed = plane->fb != fb ||
|
||||
dplane->base.state.src_x != src.x1 >> 16 ||
|
||||
dplane->base.state.src_y != src.y1 >> 16;
|
||||
|
||||
if (!dcrtc->plane) {
|
||||
dcrtc->plane = plane;
|
||||
armada_ovl_update_attr(&dplane->prop, dcrtc);
|
||||
|
@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
|
||||
/* FIXME: overlay on an interlaced display */
|
||||
/* Just updating the position/size? */
|
||||
if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
|
||||
if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
|
||||
val = (drm_rect_height(&src) & 0xffff0000) |
|
||||
drm_rect_width(&src) >> 16;
|
||||
dplane->base.state.src_hw = val;
|
||||
|
@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
|
||||
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
|
||||
|
||||
if (plane->fb != fb) {
|
||||
u32 addrs[3], pixel_format;
|
||||
int num_planes, hsub;
|
||||
if (fb_changed) {
|
||||
u32 addrs[3];
|
||||
|
||||
/*
|
||||
* Take a reference on the new framebuffer - we want to
|
||||
|
@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
if (plane->fb)
|
||||
armada_ovl_retire_fb(dplane, plane->fb);
|
||||
|
||||
src_y = src.y1 >> 16;
|
||||
src_x = src.x1 >> 16;
|
||||
dplane->base.state.src_y = src_y = src.y1 >> 16;
|
||||
dplane->base.state.src_x = src_x = src.x1 >> 16;
|
||||
|
||||
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
|
||||
|
||||
pixel_format = fb->format->format;
|
||||
hsub = drm_format_horz_chroma_subsampling(pixel_format);
|
||||
num_planes = fb->format->num_planes;
|
||||
|
||||
/*
|
||||
* Annoyingly, shifting a YUYV-format image by one pixel
|
||||
* causes the U/V planes to toggle. Toggle the UV swap.
|
||||
* (Unfortunately, this causes momentary colour flickering.)
|
||||
*/
|
||||
if (src_x & (hsub - 1) && num_planes == 1)
|
||||
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
|
||||
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
|
||||
LCD_SPU_DMA_START_ADDR_Y0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
|
||||
|
|
|
@ -2368,6 +2368,9 @@ struct drm_i915_private {
|
|||
*/
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* ordered wq for modesets */
|
||||
struct workqueue_struct *modeset_wq;
|
||||
|
||||
/* Display functions */
|
||||
struct drm_i915_display_funcs display;
|
||||
|
||||
|
|
|
@ -6977,6 +6977,7 @@ enum {
|
|||
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
|
||||
|
||||
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
|
||||
#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
|
||||
#define MASK_WAKEMEM (1<<13)
|
||||
|
||||
#define SKL_DFSM _MMIO(0x51000)
|
||||
|
@ -8522,6 +8523,7 @@ enum skl_power_gate {
|
|||
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
|
||||
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
|
||||
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
|
||||
#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
|
||||
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
|
||||
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
|
||||
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
|
||||
|
|
|
@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
|
||||
{
|
||||
int min_cdclk = skl_calc_cdclk(0, vco);
|
||||
u32 val;
|
||||
|
||||
WARN_ON(vco != 8100000 && vco != 8640000);
|
||||
|
||||
/* select the minimum CDCLK before enabling DPLL 0 */
|
||||
val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
|
||||
I915_WRITE(CDCLK_CTL, val);
|
||||
POSTING_READ(CDCLK_CTL);
|
||||
|
||||
/*
|
||||
* We always enable DPLL0 with the lowest link rate possible, but still
|
||||
* taking into account the VCO required to operate the eDP panel at the
|
||||
|
@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
int vco = cdclk_state->vco;
|
||||
u32 freq_select, pcu_ack;
|
||||
u32 freq_select, pcu_ack, cdclk_ctl;
|
||||
int ret;
|
||||
|
||||
WARN_ON((cdclk == 24000) != (vco == 0));
|
||||
|
@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
return;
|
||||
}
|
||||
|
||||
/* set CDCLK_CTL */
|
||||
/* Choose frequency for this cdclk */
|
||||
switch (cdclk) {
|
||||
case 450000:
|
||||
case 432000:
|
||||
|
@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
dev_priv->cdclk.hw.vco != vco)
|
||||
skl_dpll0_disable(dev_priv);
|
||||
|
||||
cdclk_ctl = I915_READ(CDCLK_CTL);
|
||||
|
||||
if (dev_priv->cdclk.hw.vco != vco) {
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
|
||||
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
|
||||
I915_WRITE(CDCLK_CTL, cdclk_ctl);
|
||||
}
|
||||
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
|
||||
I915_WRITE(CDCLK_CTL, cdclk_ctl);
|
||||
POSTING_READ(CDCLK_CTL);
|
||||
|
||||
if (dev_priv->cdclk.hw.vco != vco)
|
||||
skl_dpll0_enable(dev_priv, vco);
|
||||
|
||||
I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
|
||||
I915_WRITE(CDCLK_CTL, cdclk_ctl);
|
||||
|
||||
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
|
||||
I915_WRITE(CDCLK_CTL, cdclk_ctl);
|
||||
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
|
||||
I915_WRITE(CDCLK_CTL, cdclk_ctl);
|
||||
POSTING_READ(CDCLK_CTL);
|
||||
|
||||
/* inform PCU of the change */
|
||||
|
|
|
@ -12544,11 +12544,15 @@ static int intel_atomic_commit(struct drm_device *dev,
|
|||
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
|
||||
|
||||
i915_sw_fence_commit(&intel_state->commit_ready);
|
||||
if (nonblock)
|
||||
if (nonblock && intel_state->modeset) {
|
||||
queue_work(dev_priv->modeset_wq, &state->commit_work);
|
||||
} else if (nonblock) {
|
||||
queue_work(system_unbound_wq, &state->commit_work);
|
||||
else
|
||||
} else {
|
||||
if (intel_state->modeset)
|
||||
flush_workqueue(dev_priv->modeset_wq);
|
||||
intel_atomic_commit_tail(state);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -14462,6 +14466,8 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
dev->mode_config.min_width = 0;
|
||||
|
@ -15270,6 +15276,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
intel_cleanup_gt_powersave(dev_priv);
|
||||
|
||||
intel_teardown_gmbus(dev_priv);
|
||||
|
||||
destroy_workqueue(dev_priv->modeset_wq);
|
||||
}
|
||||
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
|
|
|
@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
i915_reg_t psr_ctl;
|
||||
i915_reg_t psr_status;
|
||||
u32 psr_status_mask;
|
||||
|
||||
if (dev_priv->psr.aux_frame_sync)
|
||||
|
@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
|
|||
0);
|
||||
|
||||
if (dev_priv->psr.psr2_support) {
|
||||
psr_ctl = EDP_PSR2_CTL;
|
||||
psr_status = EDP_PSR2_STATUS_CTL;
|
||||
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
|
||||
|
||||
I915_WRITE(psr_ctl,
|
||||
I915_READ(psr_ctl) &
|
||||
I915_WRITE(EDP_PSR2_CTL,
|
||||
I915_READ(EDP_PSR2_CTL) &
|
||||
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
|
||||
|
||||
} else {
|
||||
psr_ctl = EDP_PSR_STATUS_CTL;
|
||||
psr_status = EDP_PSR_STATUS_CTL;
|
||||
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
|
||||
|
||||
I915_WRITE(psr_ctl,
|
||||
I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
|
||||
I915_WRITE(EDP_PSR_CTL,
|
||||
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
psr_ctl, psr_status_mask, 0,
|
||||
psr_status, psr_status_mask, 0,
|
||||
2000))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
|
|
|
@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
|||
|
||||
DRM_DEBUG_KMS("Enabling DC5\n");
|
||||
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
|
||||
SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
|
||||
}
|
||||
|
||||
|
@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
DRM_DEBUG_KMS("Disabling DC6\n");
|
||||
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
|
||||
SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
|
@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT_ULL(POWER_DOMAIN_MODESET) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_A) | \
|
||||
BIT_ULL(POWER_DOMAIN_GMBUS) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
|
||||
|
|
|
@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
|
|||
|
||||
/* then read the message */
|
||||
msg.len = cnt & 0xf;
|
||||
if (msg.len > CEC_MAX_MSG_SIZE - 2)
|
||||
msg.len = CEC_MAX_MSG_SIZE - 2;
|
||||
msg.msg[0] = hdmi_read_reg(core->base,
|
||||
HDMI_CEC_RX_CMD_HEADER);
|
||||
msg.msg[1] = hdmi_read_reg(core->base,
|
||||
|
@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
|
|||
}
|
||||
}
|
||||
|
||||
static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
|
||||
{
|
||||
if (stat1 & 2) {
|
||||
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
|
||||
|
||||
cec_transmit_done(core->adap,
|
||||
CEC_TX_STATUS_NACK |
|
||||
CEC_TX_STATUS_MAX_RETRIES,
|
||||
0, (dbg3 >> 4) & 7, 0, 0);
|
||||
} else if (stat1 & 1) {
|
||||
cec_transmit_done(core->adap,
|
||||
CEC_TX_STATUS_ARB_LOST |
|
||||
CEC_TX_STATUS_MAX_RETRIES,
|
||||
0, 0, 0, 0);
|
||||
} else if (stat1 == 0) {
|
||||
cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
|
||||
0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void hdmi4_cec_irq(struct hdmi_core_data *core)
|
||||
{
|
||||
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
|
||||
|
@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
|
|||
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
|
||||
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
|
||||
|
||||
if (stat0 & 0x40)
|
||||
if (stat0 & 0x20) {
|
||||
cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
|
||||
0, 0, 0, 0);
|
||||
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
|
||||
else if (stat0 & 0x24)
|
||||
hdmi_cec_transmit_fifo_empty(core, stat1);
|
||||
if (stat1 & 2) {
|
||||
} else if (stat1 & 0x02) {
|
||||
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
|
||||
|
||||
cec_transmit_done(core->adap,
|
||||
CEC_TX_STATUS_NACK |
|
||||
CEC_TX_STATUS_MAX_RETRIES,
|
||||
0, (dbg3 >> 4) & 7, 0, 0);
|
||||
} else if (stat1 & 1) {
|
||||
cec_transmit_done(core->adap,
|
||||
CEC_TX_STATUS_ARB_LOST |
|
||||
CEC_TX_STATUS_MAX_RETRIES,
|
||||
0, 0, 0, 0);
|
||||
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
|
||||
}
|
||||
if (stat0 & 0x02)
|
||||
hdmi_cec_received_msg(core);
|
||||
if (stat1 & 0x3)
|
||||
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
|
||||
}
|
||||
|
||||
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
|
||||
|
@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||
/*
|
||||
* Enable CEC interrupts:
|
||||
* Transmit Buffer Full/Empty Change event
|
||||
* Transmitter FIFO Empty event
|
||||
* Receiver FIFO Not Empty event
|
||||
*/
|
||||
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
|
||||
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
|
||||
/*
|
||||
* Enable CEC interrupts:
|
||||
* RX FIFO Overrun Error event
|
||||
* Short Pulse Detected event
|
||||
* Frame Retransmit Count Exceeded event
|
||||
* Start Bit Irregularity event
|
||||
*/
|
||||
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
|
||||
hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
|
||||
|
||||
/* cec calibration enable (self clearing) */
|
||||
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
|
||||
|
|
|
@ -1007,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
|||
pr_info("Initializing pool allocator\n");
|
||||
|
||||
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
||||
if (!_manager)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
|
||||
|
||||
|
|
|
@ -1543,6 +1543,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
|
|||
rtsx_pci_power_off(pcr, HOST_ENTER_S1);
|
||||
|
||||
pci_disable_device(pcidev);
|
||||
free_irq(pcr->irq, (void *)pcr);
|
||||
if (pcr->msi_en)
|
||||
pci_disable_msi(pcr->pci);
|
||||
}
|
||||
|
||||
#else /* CONFIG_PM */
|
||||
|
|
|
@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
|
|||
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* For path verification work we need to stick with the path that was
|
||||
* originally chosen so that the per path configuration data is
|
||||
* assigned correctly.
|
||||
*/
|
||||
if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
|
||||
erp->lpm = cqr->lpm;
|
||||
}
|
||||
|
||||
if (device->features & DASD_FEATURE_ERPLOG) {
|
||||
/* print current erp_chain */
|
||||
dev_err(&device->cdev->dev,
|
||||
|
|
|
@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
|
|||
CFLAGS_sclp_early_core.o += -march=z900
|
||||
endif
|
||||
|
||||
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
|
||||
|
||||
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
|
||||
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
|
||||
sclp_early.o sclp_early_core.o
|
||||
|
|
|
@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
pvcalls_exit();
|
||||
return ret;
|
||||
}
|
||||
map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
|
||||
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
|
||||
if (map2 == NULL) {
|
||||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags);
|
||||
|
|
|
@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
|
|||
|
||||
spin_lock(&root->inode_lock);
|
||||
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
|
||||
|
||||
if (node) {
|
||||
if (btrfs_inode->delayed_node) {
|
||||
refcount_inc(&node->refs); /* can be accessed */
|
||||
|
@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
|
|||
spin_unlock(&root->inode_lock);
|
||||
return node;
|
||||
}
|
||||
btrfs_inode->delayed_node = node;
|
||||
/* can be accessed and cached in the inode */
|
||||
refcount_add(2, &node->refs);
|
||||
|
||||
/*
|
||||
* It's possible that we're racing into the middle of removing
|
||||
* this node from the radix tree. In this case, the refcount
|
||||
* was zero and it should never go back to one. Just return
|
||||
* NULL like it was never in the radix at all; our release
|
||||
* function is in the process of removing it.
|
||||
*
|
||||
* Some implementations of refcount_inc refuse to bump the
|
||||
* refcount once it has hit zero. If we don't do this dance
|
||||
* here, refcount_inc() may decide to just WARN_ONCE() instead
|
||||
* of actually bumping the refcount.
|
||||
*
|
||||
* If this node is properly in the radix, we want to bump the
|
||||
* refcount twice, once for the inode and once for this get
|
||||
* operation.
|
||||
*/
|
||||
if (refcount_inc_not_zero(&node->refs)) {
|
||||
refcount_inc(&node->refs);
|
||||
btrfs_inode->delayed_node = node;
|
||||
} else {
|
||||
node = NULL;
|
||||
}
|
||||
|
||||
spin_unlock(&root->inode_lock);
|
||||
return node;
|
||||
}
|
||||
|
@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
|
|||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
if (refcount_dec_and_test(&delayed_node->refs)) {
|
||||
bool free = false;
|
||||
struct btrfs_root *root = delayed_node->root;
|
||||
|
||||
spin_lock(&root->inode_lock);
|
||||
if (refcount_read(&delayed_node->refs) == 0) {
|
||||
radix_tree_delete(&root->delayed_nodes_tree,
|
||||
delayed_node->inode_id);
|
||||
free = true;
|
||||
}
|
||||
/*
|
||||
* Once our refcount goes to zero, nobody is allowed to bump it
|
||||
* back up. We can delete it now.
|
||||
*/
|
||||
ASSERT(refcount_read(&delayed_node->refs) == 0);
|
||||
radix_tree_delete(&root->delayed_nodes_tree,
|
||||
delayed_node->inode_id);
|
||||
spin_unlock(&root->inode_lock);
|
||||
if (free)
|
||||
kmem_cache_free(delayed_node_cache, delayed_node);
|
||||
kmem_cache_free(delayed_node_cache, delayed_node);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,6 @@ static struct btrfs_device *__alloc_device(void)
|
|||
kfree(dev);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
bio_get(dev->flush_bio);
|
||||
|
||||
INIT_LIST_HEAD(&dev->dev_list);
|
||||
INIT_LIST_HEAD(&dev->dev_alloc_list);
|
||||
|
|
|
@ -570,11 +570,14 @@ out:
|
|||
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
||||
struct userfaultfd_wait_queue *ewq)
|
||||
{
|
||||
struct userfaultfd_ctx *release_new_ctx;
|
||||
|
||||
if (WARN_ON_ONCE(current->flags & PF_EXITING))
|
||||
goto out;
|
||||
|
||||
ewq->ctx = ctx;
|
||||
init_waitqueue_entry(&ewq->wq, current);
|
||||
release_new_ctx = NULL;
|
||||
|
||||
spin_lock(&ctx->event_wqh.lock);
|
||||
/*
|
||||
|
@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|||
new = (struct userfaultfd_ctx *)
|
||||
(unsigned long)
|
||||
ewq->msg.arg.reserved.reserved1;
|
||||
|
||||
userfaultfd_ctx_put(new);
|
||||
release_new_ctx = new;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|||
__set_current_state(TASK_RUNNING);
|
||||
spin_unlock(&ctx->event_wqh.lock);
|
||||
|
||||
if (release_new_ctx) {
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = release_new_ctx->mm;
|
||||
|
||||
/* the various vma->vm_userfaultfd_ctx still points to it */
|
||||
down_write(&mm->mmap_sem);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
userfaultfd_ctx_put(release_new_ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* ctx may go away after this if the userfault pseudo fd is
|
||||
* already released.
|
||||
|
|
|
@ -399,7 +399,7 @@ xfs_map_blocks(
|
|||
(ip->i_df.if_flags & XFS_IFEXTENTS));
|
||||
ASSERT(offset <= mp->m_super->s_maxbytes);
|
||||
|
||||
if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
|
||||
if (offset > mp->m_super->s_maxbytes - count)
|
||||
count = mp->m_super->s_maxbytes - offset;
|
||||
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
|
@ -1312,7 +1312,7 @@ xfs_get_blocks(
|
|||
lockmode = xfs_ilock_data_map_shared(ip);
|
||||
|
||||
ASSERT(offset <= mp->m_super->s_maxbytes);
|
||||
if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
|
||||
if (offset > mp->m_super->s_maxbytes - size)
|
||||
size = mp->m_super->s_maxbytes - offset;
|
||||
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
|
|
|
@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
|
|||
}
|
||||
|
||||
ASSERT(offset <= mp->m_super->s_maxbytes);
|
||||
if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
|
||||
if (offset > mp->m_super->s_maxbytes - length)
|
||||
length = mp->m_super->s_maxbytes - offset;
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
end_fsb = XFS_B_TO_FSB(mp, offset + length);
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
|
||||
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
|
||||
|
||||
|
||||
STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
|
||||
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
|
||||
/*
|
||||
* We use the batch lookup interface to iterate over the dquots as it
|
||||
|
@ -695,9 +695,17 @@ xfs_qm_init_quotainfo(
|
|||
qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
|
||||
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
|
||||
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
|
||||
register_shrinker(&qinf->qi_shrinker);
|
||||
|
||||
error = register_shrinker(&qinf->qi_shrinker);
|
||||
if (error)
|
||||
goto out_free_inos;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_inos:
|
||||
mutex_destroy(&qinf->qi_quotaofflock);
|
||||
mutex_destroy(&qinf->qi_tree_lock);
|
||||
xfs_qm_destroy_quotainos(qinf);
|
||||
out_free_lru:
|
||||
list_lru_destroy(&qinf->qi_lru);
|
||||
out_free_qinf:
|
||||
|
@ -706,7 +714,6 @@ out_free_qinf:
|
|||
return error;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Gets called when unmounting a filesystem or when all quotas get
|
||||
* turned off.
|
||||
|
@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo(
|
|||
|
||||
unregister_shrinker(&qi->qi_shrinker);
|
||||
list_lru_destroy(&qi->qi_lru);
|
||||
|
||||
if (qi->qi_uquotaip) {
|
||||
IRELE(qi->qi_uquotaip);
|
||||
qi->qi_uquotaip = NULL; /* paranoia */
|
||||
}
|
||||
if (qi->qi_gquotaip) {
|
||||
IRELE(qi->qi_gquotaip);
|
||||
qi->qi_gquotaip = NULL;
|
||||
}
|
||||
if (qi->qi_pquotaip) {
|
||||
IRELE(qi->qi_pquotaip);
|
||||
qi->qi_pquotaip = NULL;
|
||||
}
|
||||
xfs_qm_destroy_quotainos(qi);
|
||||
mutex_destroy(&qi->qi_tree_lock);
|
||||
mutex_destroy(&qi->qi_quotaofflock);
|
||||
kmem_free(qi);
|
||||
mp->m_quotainfo = NULL;
|
||||
|
@ -1599,6 +1595,24 @@ error_rele:
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_destroy_quotainos(
|
||||
xfs_quotainfo_t *qi)
|
||||
{
|
||||
if (qi->qi_uquotaip) {
|
||||
IRELE(qi->qi_uquotaip);
|
||||
qi->qi_uquotaip = NULL; /* paranoia */
|
||||
}
|
||||
if (qi->qi_gquotaip) {
|
||||
IRELE(qi->qi_gquotaip);
|
||||
qi->qi_gquotaip = NULL;
|
||||
}
|
||||
if (qi->qi_pquotaip) {
|
||||
IRELE(qi->qi_pquotaip);
|
||||
qi->qi_pquotaip = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_dqfree_one(
|
||||
struct xfs_dquot *dqp)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/if_alg.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include <crypto/aead.h>
|
||||
|
@ -150,7 +151,7 @@ struct af_alg_ctx {
|
|||
struct crypto_wait wait;
|
||||
|
||||
size_t used;
|
||||
size_t rcvused;
|
||||
atomic_t rcvused;
|
||||
|
||||
bool more;
|
||||
bool merge;
|
||||
|
@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk)
|
|||
struct af_alg_ctx *ctx = ask->private;
|
||||
|
||||
return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
|
||||
ctx->rcvused, 0);
|
||||
atomic_read(&ctx->rcvused), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -140,11 +140,13 @@ struct efi_boot_memmap {
|
|||
|
||||
struct capsule_info {
|
||||
efi_capsule_header_t header;
|
||||
efi_capsule_header_t *capsule;
|
||||
int reset_type;
|
||||
long index;
|
||||
size_t count;
|
||||
size_t total_size;
|
||||
phys_addr_t *pages;
|
||||
struct page **pages;
|
||||
phys_addr_t *phys;
|
||||
size_t page_bytes_remain;
|
||||
};
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
|
|||
{
|
||||
struct kstatfs sbuf;
|
||||
|
||||
if (time_is_before_jiffies(acct->needcheck))
|
||||
if (time_is_after_jiffies(acct->needcheck))
|
||||
goto out;
|
||||
|
||||
/* May block */
|
||||
|
|
|
@ -1763,3 +1763,4 @@ __weak void abort(void)
|
|||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
EXPORT_SYMBOL(abort);
|
||||
|
|
|
@ -1696,7 +1696,7 @@ void run_local_timers(void)
|
|||
hrtimer_run_queues();
|
||||
/* Raise the softirq only if required. */
|
||||
if (time_before(jiffies, base->clk)) {
|
||||
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
|
||||
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
||||
return;
|
||||
/* CPU is awake, so check the deferrable base. */
|
||||
base++;
|
||||
|
|
|
@ -671,7 +671,23 @@ do { \
|
|||
************** MIPS/64 **************
|
||||
***************************************/
|
||||
#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
|
||||
#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
|
||||
#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
|
||||
/*
|
||||
* GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
|
||||
* code below, so we special case MIPS64r6 until the compiler can do better.
|
||||
*/
|
||||
#define umul_ppmm(w1, w0, u, v) \
|
||||
do { \
|
||||
__asm__ ("dmulu %0,%1,%2" \
|
||||
: "=d" ((UDItype)(w0)) \
|
||||
: "d" ((UDItype)(u)), \
|
||||
"d" ((UDItype)(v))); \
|
||||
__asm__ ("dmuhu %0,%1,%2" \
|
||||
: "=d" ((UDItype)(w1)) \
|
||||
: "d" ((UDItype)(u)), \
|
||||
"d" ((UDItype)(v))); \
|
||||
} while (0)
|
||||
#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
|
||||
#define umul_ppmm(w1, w0, u, v) \
|
||||
do { \
|
||||
typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
|
||||
|
|
28
mm/debug.c
28
mm/debug.c
|
@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason)
|
|||
*/
|
||||
int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
|
||||
|
||||
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
|
||||
pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
|
||||
page, page_ref_count(page), mapcount,
|
||||
page->mapping, page_to_pgoff(page));
|
||||
if (PageCompound(page))
|
||||
|
@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason)
|
|||
|
||||
#ifdef CONFIG_MEMCG
|
||||
if (page->mem_cgroup)
|
||||
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
|
||||
pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page);
|
|||
|
||||
void dump_vma(const struct vm_area_struct *vma)
|
||||
{
|
||||
pr_emerg("vma %p start %p end %p\n"
|
||||
"next %p prev %p mm %p\n"
|
||||
"prot %lx anon_vma %p vm_ops %p\n"
|
||||
"pgoff %lx file %p private_data %p\n"
|
||||
pr_emerg("vma %px start %px end %px\n"
|
||||
"next %px prev %px mm %px\n"
|
||||
"prot %lx anon_vma %px vm_ops %px\n"
|
||||
"pgoff %lx file %px private_data %px\n"
|
||||
"flags: %#lx(%pGv)\n",
|
||||
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
|
||||
vma->vm_prev, vma->vm_mm,
|
||||
|
@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma);
|
|||
|
||||
void dump_mm(const struct mm_struct *mm)
|
||||
{
|
||||
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
|
||||
pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
|
||||
#ifdef CONFIG_MMU
|
||||
"get_unmapped_area %p\n"
|
||||
"get_unmapped_area %px\n"
|
||||
#endif
|
||||
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
|
||||
"pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
|
||||
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
|
||||
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
|
||||
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
|
||||
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
|
||||
"start_brk %lx brk %lx start_stack %lx\n"
|
||||
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
|
||||
"binfmt %p flags %lx core_state %p\n"
|
||||
"binfmt %px flags %lx core_state %px\n"
|
||||
#ifdef CONFIG_AIO
|
||||
"ioctx_table %p\n"
|
||||
"ioctx_table %px\n"
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
"owner %p "
|
||||
"owner %px "
|
||||
#endif
|
||||
"exe_file %p\n"
|
||||
"exe_file %px\n"
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
"mmu_notifier_mm %p\n"
|
||||
"mmu_notifier_mm %px\n"
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
|
||||
|
|
|
@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|||
next = pmd_addr_end(addr, end);
|
||||
if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
|
||||
&& pmd_none_or_clear_bad(pmd))
|
||||
continue;
|
||||
goto next;
|
||||
|
||||
/* invoke the mmu notifier if the pmd is populated */
|
||||
if (!mni_start) {
|
||||
|
@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
/* huge pmd was handled */
|
||||
continue;
|
||||
goto next;
|
||||
}
|
||||
}
|
||||
/* fall through, the trans huge pmd just split */
|
||||
|
@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|||
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
||||
dirty_accountable, prot_numa);
|
||||
pages += this_pages;
|
||||
next:
|
||||
cond_resched();
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
||||
if (mni_start)
|
||||
|
|
|
@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void)
|
|||
pgcnt = 0;
|
||||
for_each_resv_unavail_range(i, &start, &end) {
|
||||
for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
|
||||
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
|
||||
continue;
|
||||
mm_zero_struct_page(pfn_to_page(pfn));
|
||||
pgcnt++;
|
||||
}
|
||||
|
|
|
@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
|
|||
if (unlikely(!mem_section)) {
|
||||
unsigned long size, align;
|
||||
|
||||
size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
|
||||
size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
|
||||
align = 1 << (INTERNODE_CACHE_SHIFT);
|
||||
mem_section = memblock_virt_alloc(size, align);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#define ZSPAGE_MAGIC 0x58
|
||||
|
||||
|
|
Loading…
Reference in New Issue