Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
310655b07a
|
@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
|
|||
still doing productive work. As such, time spent in this subset of the
|
||||
stall state is tracked separately and exported in the "full" averages.
|
||||
|
||||
The ratios are tracked as recent trends over ten, sixty, and three
|
||||
hundred second windows, which gives insight into short term events as
|
||||
well as medium and long term trends. The total absolute stall time is
|
||||
tracked and exported as well, to allow detection of latency spikes
|
||||
which wouldn't necessarily make a dent in the time averages, or to
|
||||
average trends over custom time frames.
|
||||
The ratios (in %) are tracked as recent trends over ten, sixty, and
|
||||
three hundred second windows, which gives insight into short term events
|
||||
as well as medium and long term trends. The total absolute stall time
|
||||
(in us) is tracked and exported as well, to allow detection of latency
|
||||
spikes which wouldn't necessarily make a dent in the time averages,
|
||||
or to average trends over custom time frames.
|
||||
|
||||
Cgroup2 interface
|
||||
=================
|
||||
|
|
|
@ -228,7 +228,7 @@ patternProperties:
|
|||
- renesas,r9a06g032-smp
|
||||
- rockchip,rk3036-smp
|
||||
- rockchip,rk3066-smp
|
||||
- socionext,milbeaut-m10v-smp
|
||||
- socionext,milbeaut-m10v-smp
|
||||
- ste,dbx500-smp
|
||||
|
||||
cpu-release-addr:
|
||||
|
|
|
@ -102,9 +102,11 @@ Byte sequences
|
|||
dictionary which is empty, and that it will always be
|
||||
invalid at this place.
|
||||
|
||||
17 : bitstream version. If the first byte is 17, the next byte
|
||||
gives the bitstream version (version 1 only). If the first byte
|
||||
is not 17, the bitstream version is 0.
|
||||
17 : bitstream version. If the first byte is 17, and compressed
|
||||
stream length is at least 5 bytes (length of shortest possible
|
||||
versioned bitstream), the next byte gives the bitstream version
|
||||
(version 1 only).
|
||||
Otherwise, the bitstream version is 0.
|
||||
|
||||
18..21 : copy 0..3 literals
|
||||
state = (byte - 17) = 0..3 [ copy <state> literals ]
|
||||
|
|
|
@ -1893,14 +1893,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
|
|||
ARM/NUVOTON NPCM ARCHITECTURE
|
||||
M: Avi Fishman <avifishman70@gmail.com>
|
||||
M: Tomer Maimon <tmaimon77@gmail.com>
|
||||
M: Tali Perry <tali.perry1@gmail.com>
|
||||
R: Patrick Venture <venture@google.com>
|
||||
R: Nancy Yuen <yuenn@google.com>
|
||||
R: Brendan Higgins <brendanhiggins@google.com>
|
||||
R: Benjamin Fair <benjaminfair@google.com>
|
||||
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: arch/arm/mach-npcm/
|
||||
F: arch/arm/boot/dts/nuvoton-npcm*
|
||||
F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
|
||||
F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
|
||||
F: drivers/*/*npcm*
|
||||
F: Documentation/devicetree/bindings/*/*npcm*
|
||||
F: Documentation/devicetree/bindings/*/*/*npcm*
|
||||
|
@ -7518,7 +7519,7 @@ F: include/net/mac802154.h
|
|||
F: include/net/af_ieee802154.h
|
||||
F: include/net/cfg802154.h
|
||||
F: include/net/ieee802154_netdev.h
|
||||
F: Documentation/networking/ieee802154.txt
|
||||
F: Documentation/networking/ieee802154.rst
|
||||
|
||||
IFE PROTOCOL
|
||||
M: Yotam Gigi <yotam.gi@gmail.com>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Shy Crocodile
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
|
|||
*/
|
||||
static inline void
|
||||
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, unsigned long *args)
|
||||
unsigned long *args)
|
||||
{
|
||||
unsigned long *inside_ptregs = &(regs->r0);
|
||||
inside_ptregs -= i;
|
||||
|
||||
BUG_ON((i + n) > 6);
|
||||
unsigned int n = 6;
|
||||
unsigned int i = 0;
|
||||
|
||||
while (n--) {
|
||||
args[i++] = (*inside_ptregs);
|
||||
|
|
|
@ -57,6 +57,24 @@
|
|||
enable-active-high;
|
||||
};
|
||||
|
||||
/* TPS79501 */
|
||||
v1_8d_reg: fixedregulator-v1_8d {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "v1_8d";
|
||||
vin-supply = <&vbat>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
};
|
||||
|
||||
/* TPS79501 */
|
||||
v3_3d_reg: fixedregulator-v3_3d {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "v3_3d";
|
||||
vin-supply = <&vbat>;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
};
|
||||
|
||||
matrix_keypad: matrix_keypad0 {
|
||||
compatible = "gpio-matrix-keypad";
|
||||
debounce-delay-ms = <5>;
|
||||
|
@ -499,10 +517,10 @@
|
|||
status = "okay";
|
||||
|
||||
/* Regulators */
|
||||
AVDD-supply = <&vaux2_reg>;
|
||||
IOVDD-supply = <&vaux2_reg>;
|
||||
DRVDD-supply = <&vaux2_reg>;
|
||||
DVDD-supply = <&vbat>;
|
||||
AVDD-supply = <&v3_3d_reg>;
|
||||
IOVDD-supply = <&v3_3d_reg>;
|
||||
DRVDD-supply = <&v3_3d_reg>;
|
||||
DVDD-supply = <&v1_8d_reg>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -73,6 +73,24 @@
|
|||
enable-active-high;
|
||||
};
|
||||
|
||||
/* TPS79518 */
|
||||
v1_8d_reg: fixedregulator-v1_8d {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "v1_8d";
|
||||
vin-supply = <&vbat>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
};
|
||||
|
||||
/* TPS78633 */
|
||||
v3_3d_reg: fixedregulator-v3_3d {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "v3_3d";
|
||||
vin-supply = <&vbat>;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
};
|
||||
|
||||
leds {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&user_leds_s0>;
|
||||
|
@ -501,10 +519,10 @@
|
|||
status = "okay";
|
||||
|
||||
/* Regulators */
|
||||
AVDD-supply = <&vaux2_reg>;
|
||||
IOVDD-supply = <&vaux2_reg>;
|
||||
DRVDD-supply = <&vaux2_reg>;
|
||||
DVDD-supply = <&vbat>;
|
||||
AVDD-supply = <&v3_3d_reg>;
|
||||
IOVDD-supply = <&v3_3d_reg>;
|
||||
DRVDD-supply = <&v3_3d_reg>;
|
||||
DVDD-supply = <&v1_8d_reg>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1762,7 +1762,7 @@
|
|||
reg = <0xcc000 0x4>;
|
||||
reg-names = "rev";
|
||||
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
|
||||
clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
|
||||
clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
@ -1785,7 +1785,7 @@
|
|||
reg = <0xd0000 0x4>;
|
||||
reg-names = "rev";
|
||||
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
|
||||
clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
|
||||
clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -254,6 +254,7 @@
|
|||
};
|
||||
|
||||
vccio_sd: LDO_REG5 {
|
||||
regulator-boot-on;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vccio_sd";
|
||||
|
@ -430,7 +431,7 @@
|
|||
bus-width = <4>;
|
||||
cap-mmc-highspeed;
|
||||
cap-sd-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
broken-cd;
|
||||
disable-wp; /* wp not hooked up */
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
|
||||
|
|
|
@ -25,8 +25,6 @@
|
|||
|
||||
gpio_keys: gpio-keys {
|
||||
compatible = "gpio-keys";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pwr_key_l>;
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
compatible = "arm,cortex-a12";
|
||||
reg = <0x501>;
|
||||
resets = <&cru SRST_CORE1>;
|
||||
operating-points = <&cpu_opp_table>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>; /* min followed by max */
|
||||
clock-latency = <40000>;
|
||||
clocks = <&cru ARMCLK>;
|
||||
|
@ -80,7 +80,7 @@
|
|||
compatible = "arm,cortex-a12";
|
||||
reg = <0x502>;
|
||||
resets = <&cru SRST_CORE2>;
|
||||
operating-points = <&cpu_opp_table>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>; /* min followed by max */
|
||||
clock-latency = <40000>;
|
||||
clocks = <&cru ARMCLK>;
|
||||
|
@ -90,7 +90,7 @@
|
|||
compatible = "arm,cortex-a12";
|
||||
reg = <0x503>;
|
||||
resets = <&cru SRST_CORE3>;
|
||||
operating-points = <&cpu_opp_table>;
|
||||
operating-points-v2 = <&cpu_opp_table>;
|
||||
#cooling-cells = <2>; /* min followed by max */
|
||||
clock-latency = <40000>;
|
||||
clocks = <&cru ARMCLK>;
|
||||
|
@ -1119,8 +1119,6 @@
|
|||
clock-names = "ref", "pclk";
|
||||
power-domains = <&power RK3288_PD_VIO>;
|
||||
rockchip,grf = <&grf>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
status = "disabled";
|
||||
|
||||
ports {
|
||||
|
@ -1282,27 +1280,27 @@
|
|||
gpu_opp_table: gpu-opp-table {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp@100000000 {
|
||||
opp-100000000 {
|
||||
opp-hz = /bits/ 64 <100000000>;
|
||||
opp-microvolt = <950000>;
|
||||
};
|
||||
opp@200000000 {
|
||||
opp-200000000 {
|
||||
opp-hz = /bits/ 64 <200000000>;
|
||||
opp-microvolt = <950000>;
|
||||
};
|
||||
opp@300000000 {
|
||||
opp-300000000 {
|
||||
opp-hz = /bits/ 64 <300000000>;
|
||||
opp-microvolt = <1000000>;
|
||||
};
|
||||
opp@400000000 {
|
||||
opp-400000000 {
|
||||
opp-hz = /bits/ 64 <400000000>;
|
||||
opp-microvolt = <1100000>;
|
||||
};
|
||||
opp@500000000 {
|
||||
opp-500000000 {
|
||||
opp-hz = /bits/ 64 <500000000>;
|
||||
opp-microvolt = <1200000>;
|
||||
};
|
||||
opp@600000000 {
|
||||
opp-600000000 {
|
||||
opp-hz = /bits/ 64 <600000000>;
|
||||
opp-microvolt = <1250000>;
|
||||
};
|
||||
|
|
|
@ -518,7 +518,7 @@
|
|||
#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
|
||||
#define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
|
||||
#define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
|
||||
#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
|
||||
#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
|
||||
#define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
|
||||
#define PIN_PC10 74
|
||||
#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
|
||||
|
|
|
@ -213,13 +213,12 @@
|
|||
gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
|
||||
gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
|
||||
/*
|
||||
* This chipselect is active high. Just setting the flags
|
||||
* to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
|
||||
* it will be ignored, only the special "spi-cs-high" flag
|
||||
* really counts.
|
||||
* It's not actually active high, but the frameworks assume
|
||||
* the polarity of the passed-in GPIO is "normal" (active
|
||||
* high) then actively drives the line low to select the
|
||||
* chip.
|
||||
*/
|
||||
cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
|
||||
spi-cs-high;
|
||||
num-chipselects = <1>;
|
||||
|
||||
/*
|
||||
|
|
|
@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
args[0] = regs->ARM_ORIG_r0;
|
||||
args++;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||
pr_warn("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
memset(args_bad, 0, n_bad * sizeof(args[0]));
|
||||
n = SYSCALL_MAX_ARGS - i;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
args[0] = regs->ARM_ORIG_r0;
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0]));
|
||||
memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
regs->ARM_ORIG_r0 = args[0];
|
||||
args++;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
pr_warn("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
n = SYSCALL_MAX_ARGS - i;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
regs->ARM_ORIG_r0 = args[0];
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0]));
|
||||
memcpy(®s->ARM_r0 + 1, args, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
|
|||
|
||||
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
|
||||
if (!np)
|
||||
goto securam_fail;
|
||||
goto securam_fail_no_ref_dev;
|
||||
|
||||
pdev = of_find_device_by_node(np);
|
||||
of_node_put(np);
|
||||
if (!pdev) {
|
||||
pr_warn("%s: failed to find securam device!\n", __func__);
|
||||
goto securam_fail;
|
||||
goto securam_fail_no_ref_dev;
|
||||
}
|
||||
|
||||
sram_pool = gen_pool_get(&pdev->dev, NULL);
|
||||
|
@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
|
|||
return 0;
|
||||
|
||||
securam_fail:
|
||||
put_device(&pdev->dev);
|
||||
securam_fail_no_ref_dev:
|
||||
iounmap(pm_data.sfrbu);
|
||||
pm_data.sfrbu = NULL;
|
||||
return ret;
|
||||
|
|
|
@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
|
|||
}
|
||||
};
|
||||
|
||||
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
|
||||
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
|
||||
static struct iop_adma_platform_data iop13xx_adma_0_data = {
|
||||
.hw_id = 0,
|
||||
.pool_size = PAGE_SIZE,
|
||||
|
@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
|
|||
.resource = iop13xx_adma_0_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop13xx_adma_0_data,
|
||||
},
|
||||
};
|
||||
|
@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
|
|||
.resource = iop13xx_adma_1_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop13xx_adma_1_data,
|
||||
},
|
||||
};
|
||||
|
@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
|
|||
.resource = iop13xx_adma_2_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop13xx_adma_2_data,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
|
|||
}
|
||||
};
|
||||
|
||||
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
|
||||
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
|
||||
static struct platform_device iop13xx_tpmi_0_device = {
|
||||
.name = "iop-tpmi",
|
||||
.id = 0,
|
||||
|
@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
|
|||
.resource = iop13xx_tpmi_0_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_tpmi_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
|
|||
.resource = iop13xx_tpmi_1_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_tpmi_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
|
|||
.resource = iop13xx_tpmi_2_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_tpmi_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
|
|||
.resource = iop13xx_tpmi_3_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop13xx_tpmi_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
|
|||
writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void m10v_cpu_die(unsigned int l_cpu)
|
||||
{
|
||||
gic_cpu_if_down(0);
|
||||
|
@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
|
|||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct smp_operations m10v_smp_ops __initdata = {
|
||||
.smp_prepare_cpus = m10v_smp_init,
|
||||
.smp_boot_secondary = m10v_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_die = m10v_cpu_die,
|
||||
.cpu_kill = m10v_cpu_kill,
|
||||
#endif
|
||||
};
|
||||
CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
|
||||
|
||||
|
|
|
@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
|
|||
|
||||
static struct bgpio_pdata latch1_pdata = {
|
||||
.label = LATCH1_LABEL,
|
||||
.base = -1,
|
||||
.ngpio = LATCH1_NGPIO,
|
||||
};
|
||||
|
||||
|
@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
|
|||
|
||||
static struct bgpio_pdata latch2_pdata = {
|
||||
.label = LATCH2_LABEL,
|
||||
.base = -1,
|
||||
.ngpio = LATCH2_NGPIO,
|
||||
};
|
||||
|
||||
|
|
|
@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
|
|||
if (!node)
|
||||
return 0;
|
||||
|
||||
if (!of_device_is_available(node))
|
||||
if (!of_device_is_available(node)) {
|
||||
of_node_put(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(node);
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
|
|||
.resource = iop3xx_dma_0_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop3xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop3xx_dma_0_data,
|
||||
},
|
||||
};
|
||||
|
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
|
|||
.resource = iop3xx_dma_1_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop3xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop3xx_dma_1_data,
|
||||
},
|
||||
};
|
||||
|
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
|
|||
.resource = iop3xx_aau_resources,
|
||||
.dev = {
|
||||
.dma_mask = &iop3xx_adma_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = (void *) &iop3xx_aau_data,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
|
|||
.resource = orion_xor0_shared_resources,
|
||||
.dev = {
|
||||
.dma_mask = &orion_xor_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = &orion_xor0_pdata,
|
||||
},
|
||||
};
|
||||
|
@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
|
|||
.resource = orion_xor1_shared_resources,
|
||||
.dev = {
|
||||
.dma_mask = &orion_xor_dmamask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = &orion_xor1_pdata,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -162,6 +162,7 @@
|
|||
rx-fifo-depth = <16384>;
|
||||
snps,multicast-filter-bins = <256>;
|
||||
iommus = <&smmu 1>;
|
||||
altr,sysmgr-syscon = <&sysmgr 0x44 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -179,6 +180,7 @@
|
|||
rx-fifo-depth = <16384>;
|
||||
snps,multicast-filter-bins = <256>;
|
||||
iommus = <&smmu 2>;
|
||||
altr,sysmgr-syscon = <&sysmgr 0x48 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -196,6 +198,7 @@
|
|||
rx-fifo-depth = <16384>;
|
||||
snps,multicast-filter-bins = <256>;
|
||||
iommus = <&smmu 3>;
|
||||
altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -108,8 +108,8 @@
|
|||
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
|
||||
snps,reset-active-low;
|
||||
snps,reset-delays-us = <0 10000 50000>;
|
||||
tx_delay = <0x25>;
|
||||
rx_delay = <0x11>;
|
||||
tx_delay = <0x24>;
|
||||
rx_delay = <0x18>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -46,8 +46,7 @@
|
|||
|
||||
vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
enable-active-high;
|
||||
gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&usb20_host_drv>;
|
||||
regulator-name = "vcc_host1_5v";
|
||||
|
|
|
@ -1445,11 +1445,11 @@
|
|||
|
||||
sdmmc0 {
|
||||
sdmmc0_clk: sdmmc0-clk {
|
||||
rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
|
||||
rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
|
||||
};
|
||||
|
||||
sdmmc0_cmd: sdmmc0-cmd {
|
||||
rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
|
||||
rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
|
||||
};
|
||||
|
||||
sdmmc0_dectn: sdmmc0-dectn {
|
||||
|
@ -1461,14 +1461,14 @@
|
|||
};
|
||||
|
||||
sdmmc0_bus1: sdmmc0-bus1 {
|
||||
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
|
||||
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
|
||||
};
|
||||
|
||||
sdmmc0_bus4: sdmmc0-bus4 {
|
||||
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
|
||||
<1 RK_PA1 1 &pcfg_pull_up_4ma>,
|
||||
<1 RK_PA2 1 &pcfg_pull_up_4ma>,
|
||||
<1 RK_PA3 1 &pcfg_pull_up_4ma>;
|
||||
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
|
||||
<1 RK_PA1 1 &pcfg_pull_up_8ma>,
|
||||
<1 RK_PA2 1 &pcfg_pull_up_8ma>,
|
||||
<1 RK_PA3 1 &pcfg_pull_up_8ma>;
|
||||
};
|
||||
|
||||
sdmmc0_gpio: sdmmc0-gpio {
|
||||
|
@ -1642,50 +1642,50 @@
|
|||
rgmiim1_pins: rgmiim1-pins {
|
||||
rockchip,pins =
|
||||
/* mac_txclk */
|
||||
<1 RK_PB4 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PB4 2 &pcfg_pull_none_8ma>,
|
||||
/* mac_rxclk */
|
||||
<1 RK_PB5 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PB5 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_mdio */
|
||||
<1 RK_PC3 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PC3 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_txen */
|
||||
<1 RK_PD1 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PD1 2 &pcfg_pull_none_8ma>,
|
||||
/* mac_clk */
|
||||
<1 RK_PC5 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PC5 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_rxdv */
|
||||
<1 RK_PC6 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PC6 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_mdc */
|
||||
<1 RK_PC7 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PC7 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_rxd1 */
|
||||
<1 RK_PB2 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PB2 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_rxd0 */
|
||||
<1 RK_PB3 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PB3 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_txd1 */
|
||||
<1 RK_PB0 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PB0 2 &pcfg_pull_none_8ma>,
|
||||
/* mac_txd0 */
|
||||
<1 RK_PB1 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PB1 2 &pcfg_pull_none_8ma>,
|
||||
/* mac_rxd3 */
|
||||
<1 RK_PB6 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PB6 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_rxd2 */
|
||||
<1 RK_PB7 2 &pcfg_pull_none_2ma>,
|
||||
<1 RK_PB7 2 &pcfg_pull_none_4ma>,
|
||||
/* mac_txd3 */
|
||||
<1 RK_PC0 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PC0 2 &pcfg_pull_none_8ma>,
|
||||
/* mac_txd2 */
|
||||
<1 RK_PC1 2 &pcfg_pull_none_12ma>,
|
||||
<1 RK_PC1 2 &pcfg_pull_none_8ma>,
|
||||
|
||||
/* mac_txclk */
|
||||
<0 RK_PB0 1 &pcfg_pull_none>,
|
||||
<0 RK_PB0 1 &pcfg_pull_none_8ma>,
|
||||
/* mac_txen */
|
||||
<0 RK_PB4 1 &pcfg_pull_none>,
|
||||
<0 RK_PB4 1 &pcfg_pull_none_8ma>,
|
||||
/* mac_clk */
|
||||
<0 RK_PD0 1 &pcfg_pull_none>,
|
||||
<0 RK_PD0 1 &pcfg_pull_none_4ma>,
|
||||
/* mac_txd1 */
|
||||
<0 RK_PC0 1 &pcfg_pull_none>,
|
||||
<0 RK_PC0 1 &pcfg_pull_none_8ma>,
|
||||
/* mac_txd0 */
|
||||
<0 RK_PC1 1 &pcfg_pull_none>,
|
||||
<0 RK_PC1 1 &pcfg_pull_none_8ma>,
|
||||
/* mac_txd3 */
|
||||
<0 RK_PC7 1 &pcfg_pull_none>,
|
||||
<0 RK_PC7 1 &pcfg_pull_none_8ma>,
|
||||
/* mac_txd2 */
|
||||
<0 RK_PC6 1 &pcfg_pull_none>;
|
||||
<0 RK_PC6 1 &pcfg_pull_none_8ma>;
|
||||
};
|
||||
|
||||
rmiim1_pins: rmiim1-pins {
|
||||
|
|
|
@ -158,6 +158,7 @@
|
|||
};
|
||||
|
||||
&hdmi {
|
||||
ddc-i2c-bus = <&i2c3>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&hdmi_cec>;
|
||||
status = "okay";
|
||||
|
|
|
@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
args[0] = regs->orig_x0;
|
||||
args++;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||
pr_warning("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
memset(args_bad, 0, n_bad * sizeof(args[0]));
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
args[0] = regs->orig_x0;
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(args, ®s->regs[i], n * sizeof(args[0]));
|
||||
memcpy(args, ®s->regs[1], 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
regs->orig_x0 = args[0];
|
||||
args++;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
pr_warning("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
n = SYSCALL_MAX_ARGS - i;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
regs->orig_x0 = args[0];
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(®s->regs[i], args, n * sizeof(args[0]));
|
||||
memcpy(®s->regs[1], args, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
|
|||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
if (!low)
|
||||
return false;
|
||||
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
|
@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
|
|||
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
|
||||
unsigned long high = low + SDEI_STACK_SIZE;
|
||||
|
||||
if (!low)
|
||||
return false;
|
||||
|
||||
if (sp < low || sp >= high)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs, unsigned int i,
|
||||
unsigned int n, unsigned long *args)
|
||||
struct pt_regs *regs,
|
||||
unsigned long *args)
|
||||
{
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->a4;
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->b4;
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->a6;
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->b6;
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->a8;
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->b8;
|
||||
case 6:
|
||||
if (!n--)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
*args++ = regs->a4;
|
||||
*args++ = regs->b4;
|
||||
*args++ = regs->a6;
|
||||
*args++ = regs->b6;
|
||||
*args++ = regs->a8;
|
||||
*args = regs->b8;
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->a4 = *args++;
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->b4 = *args++;
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->a6 = *args++;
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->b6 = *args++;
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->a8 = *args++;
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->a9 = *args++;
|
||||
case 6:
|
||||
if (!n)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
regs->a4 = *args++;
|
||||
regs->b4 = *args++;
|
||||
regs->a6 = *args++;
|
||||
regs->b6 = *args++;
|
||||
regs->a8 = *args++;
|
||||
regs->a9 = *args;
|
||||
}
|
||||
|
||||
#endif /* __ASM_C6X_SYSCALLS_H */
|
||||
|
|
|
@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
|
|||
|
||||
static inline void
|
||||
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, unsigned long *args)
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
if (i == 0) {
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
memcpy(args, ®s->a1, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void
|
||||
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, const unsigned long *args)
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
if (i == 0) {
|
||||
regs->orig_a0 = args[0];
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
|
||||
regs->orig_a0 = args[0];
|
||||
args++;
|
||||
memcpy(®s->a1, args, 5 * sizeof(regs->a1));
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
|
|||
|
||||
static inline void
|
||||
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, unsigned long *args)
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
while (n > 0) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
*args++ = regs->er1;
|
||||
break;
|
||||
case 1:
|
||||
*args++ = regs->er2;
|
||||
break;
|
||||
case 2:
|
||||
*args++ = regs->er3;
|
||||
break;
|
||||
case 3:
|
||||
*args++ = regs->er4;
|
||||
break;
|
||||
case 4:
|
||||
*args++ = regs->er5;
|
||||
break;
|
||||
case 5:
|
||||
*args++ = regs->er6;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
*args++ = regs->er1;
|
||||
*args++ = regs->er2;
|
||||
*args++ = regs->er3;
|
||||
*args++ = regs->er4;
|
||||
*args++ = regs->er5;
|
||||
*args = regs->er6;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(args, &(®s->r00)[i], n * sizeof(args[0]));
|
||||
memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0]));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
}
|
||||
|
||||
extern void ia64_syscall_get_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs, unsigned int i, unsigned int n,
|
||||
unsigned long *args, int rw);
|
||||
struct pt_regs *regs, unsigned long *args, int rw);
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
|
||||
ia64_syscall_get_set_arguments(task, regs, args, 0);
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
|
||||
ia64_syscall_get_set_arguments(task, regs, args, 1);
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
|
|||
}
|
||||
|
||||
void ia64_syscall_get_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs, unsigned int i, unsigned int n,
|
||||
unsigned long *args, int rw)
|
||||
struct pt_regs *regs, unsigned long *args, int rw)
|
||||
{
|
||||
struct syscall_get_set_args data = {
|
||||
.i = i,
|
||||
.n = n,
|
||||
.i = 0,
|
||||
.n = 6,
|
||||
.args = args,
|
||||
.regs = regs,
|
||||
.rw = rw,
|
||||
|
|
|
@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
unsigned int n = 6;
|
||||
|
||||
while (n--)
|
||||
*args++ = microblaze_get_syscall_arg(regs, i++);
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
unsigned int n = 6;
|
||||
|
||||
while (n--)
|
||||
microblaze_set_syscall_arg(regs, i++, *args++);
|
||||
}
|
||||
|
|
|
@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
unsigned int n = 6;
|
||||
int ret;
|
||||
|
||||
/* O32 ABI syscall() */
|
||||
|
|
|
@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
|
|||
|
||||
sd.nr = syscall;
|
||||
sd.arch = syscall_get_arch();
|
||||
syscall_get_arguments(current, regs, 0, 6, args);
|
||||
syscall_get_arguments(current, regs, args);
|
||||
for (i = 0; i < 6; i++)
|
||||
sd.args[i] = args[i];
|
||||
sd.instruction_pointer = KSTK_EIP(current);
|
||||
|
|
|
@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
|
|||
* syscall_get_arguments - extract system call parameter values
|
||||
* @task: task of interest, must be blocked
|
||||
* @regs: task_pt_regs() of @task
|
||||
* @i: argument index [0,5]
|
||||
* @n: number of arguments; n+i must be [1,6].
|
||||
* @args: array filled with argument values
|
||||
*
|
||||
* Fetches @n arguments to the system call starting with the @i'th argument
|
||||
* (from 0 through 5). Argument @i is stored in @args[0], and so on.
|
||||
* An arch inline version is probably optimal when @i and @n are constants.
|
||||
* Fetches 6 arguments to the system call (from 0 through 5). The first
|
||||
* argument is stored in @args[0], and so on.
|
||||
*
|
||||
* It's only valid to call this when @task is stopped for tracing on
|
||||
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
|
||||
* It's invalid to call this with @i + @n > 6; we only support system calls
|
||||
* taking up to 6 arguments.
|
||||
*/
|
||||
#define SYSCALL_MAX_ARGS 6
|
||||
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, unsigned long *args)
|
||||
unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||
pr_warning("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
memset(args_bad, 0, n_bad * sizeof(args[0]));
|
||||
memset(args_bad, 0, n_bad * sizeof(args[0]));
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
args[0] = regs->orig_r0;
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(args, ®s->uregs[0] + i, n * sizeof(args[0]));
|
||||
args[0] = regs->orig_r0;
|
||||
args++;
|
||||
memcpy(args, ®s->uregs[0] + 1, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
/**
|
||||
* syscall_set_arguments - change system call parameter value
|
||||
* @task: task of interest, must be in system call entry tracing
|
||||
* @regs: task_pt_regs() of @task
|
||||
* @i: argument index [0,5]
|
||||
* @n: number of arguments; n+i must be [1,6].
|
||||
* @args: array of argument values to store
|
||||
*
|
||||
* Changes @n arguments to the system call starting with the @i'th argument.
|
||||
* Argument @i gets value @args[0], and so on.
|
||||
* An arch inline version is probably optimal when @i and @n are constants.
|
||||
* Changes 6 arguments to the system call. The first argument gets value
|
||||
* @args[0], and so on.
|
||||
*
|
||||
* It's only valid to call this when @task is stopped for tracing on
|
||||
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
|
||||
* It's invalid to call this with @i + @n > 6; we only support system calls
|
||||
* taking up to 6 arguments.
|
||||
*/
|
||||
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
regs->orig_r0 = args[0];
|
||||
args++;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
pr_warn("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
n = SYSCALL_MAX_ARGS - i;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
regs->orig_r0 = args[0];
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
|
||||
memcpy(®s->uregs[0] + i, args, n * sizeof(args[0]));
|
||||
memcpy(®s->uregs[0] + 1, args, 5 * sizeof(args[0]));
|
||||
}
|
||||
#endif /* _ASM_NDS32_SYSCALL_H */
|
||||
|
|
|
@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs, unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
struct pt_regs *regs, unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r4;
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r5;
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r6;
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r7;
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r8;
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = regs->r9;
|
||||
case 6:
|
||||
if (!n--)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
*args++ = regs->r4;
|
||||
*args++ = regs->r5;
|
||||
*args++ = regs->r6;
|
||||
*args++ = regs->r7;
|
||||
*args++ = regs->r8;
|
||||
*args = regs->r9;
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs, unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
struct pt_regs *regs, const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r4 = *args++;
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r5 = *args++;
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r6 = *args++;
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r7 = *args++;
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r8 = *args++;
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
regs->r9 = *args++;
|
||||
case 6:
|
||||
if (!n)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
regs->r4 = *args++;
|
||||
regs->r5 = *args++;
|
||||
regs->r6 = *args++;
|
||||
regs->r7 = *args++;
|
||||
regs->r8 = *args++;
|
||||
regs->r9 = *args;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
|
|||
|
||||
static inline void
|
||||
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, unsigned long *args)
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0]));
|
||||
memcpy(args, ®s->gpr[3], 6 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void
|
||||
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n, const unsigned long *args)
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
|
||||
memcpy(®s->gpr[3 + i], args, n * sizeof(args[0]));
|
||||
memcpy(®s->gpr[3], args, 6 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
|
|||
|
||||
static inline unsigned long regs_return_value(struct pt_regs *regs)
|
||||
{
|
||||
return regs->gr[20];
|
||||
return regs->gr[28];
|
||||
}
|
||||
|
||||
static inline void instruction_pointer_set(struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
regs->iaoq[0] = val;
|
||||
regs->iaoq[0] = val;
|
||||
regs->iaoq[1] = val + 4;
|
||||
}
|
||||
|
||||
/* Query offset/name of register from its name/offset */
|
||||
|
|
|
@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
|
|||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *tsk,
|
||||
struct pt_regs *regs, unsigned int i,
|
||||
unsigned int n, unsigned long *args)
|
||||
struct pt_regs *regs,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i);
|
||||
|
||||
switch (n) {
|
||||
case 6:
|
||||
args[5] = regs->gr[21];
|
||||
case 5:
|
||||
args[4] = regs->gr[22];
|
||||
case 4:
|
||||
args[3] = regs->gr[23];
|
||||
case 3:
|
||||
args[2] = regs->gr[24];
|
||||
case 2:
|
||||
args[1] = regs->gr[25];
|
||||
case 1:
|
||||
args[0] = regs->gr[26];
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
args[5] = regs->gr[21];
|
||||
args[4] = regs->gr[22];
|
||||
args[3] = regs->gr[23];
|
||||
args[2] = regs->gr[24];
|
||||
args[1] = regs->gr[25];
|
||||
args[0] = regs->gr[26];
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
|
|
|
@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
|
|||
|
||||
static int __init parisc_idle_init(void)
|
||||
{
|
||||
const char *marker;
|
||||
|
||||
/* check QEMU/SeaBIOS marker in PAGE0 */
|
||||
marker = (char *) &PAGE0->pad0;
|
||||
running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
|
||||
|
||||
if (!running_on_qemu)
|
||||
cpu_idle_poll_ctrl(1);
|
||||
|
||||
|
|
|
@ -397,6 +397,9 @@ void __init start_parisc(void)
|
|||
int ret, cpunum;
|
||||
struct pdc_coproc_cfg coproc_cfg;
|
||||
|
||||
/* check QEMU/SeaBIOS marker in PAGE0 */
|
||||
running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
|
||||
|
||||
cpunum = smp_processor_id();
|
||||
|
||||
init_cpu_topology();
|
||||
|
|
|
@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
unsigned long val, mask = -1UL;
|
||||
|
||||
BUG_ON(i + n > 6);
|
||||
unsigned int n = 6;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_tsk_thread_flag(task, TIF_32BIT))
|
||||
mask = 0xffffffff;
|
||||
#endif
|
||||
while (n--) {
|
||||
if (n == 0 && i == 0)
|
||||
if (n == 0)
|
||||
val = regs->orig_gpr3;
|
||||
else
|
||||
val = regs->gpr[3 + i + n];
|
||||
val = regs->gpr[3 + n];
|
||||
|
||||
args[n] = val & mask;
|
||||
}
|
||||
|
@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
|||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(®s->gpr[3 + i], args, n * sizeof(args[0]));
|
||||
memcpy(®s->gpr[3], args, 6 * sizeof(args[0]));
|
||||
|
||||
/* Also copy the first argument into orig_gpr3 */
|
||||
if (i == 0 && n > 0)
|
||||
regs->orig_gpr3 = args[0];
|
||||
regs->orig_gpr3 = args[0];
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/kvm_host.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
|
|||
|
||||
static __init void kvm_free_tmp(void)
|
||||
{
|
||||
/*
|
||||
* Inform kmemleak about the hole in the .bss section since the
|
||||
* corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
|
||||
*/
|
||||
kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
|
||||
ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
|
||||
free_reserved_area(&kvm_tmp[kvm_tmp_index],
|
||||
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
|
||||
}
|
||||
|
|
|
@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
if (i == 0) {
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
memcpy(args, ®s->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
memcpy(args, ®s->a1, 5 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
if (i == 0) {
|
||||
regs->orig_a0 = args[0];
|
||||
args++;
|
||||
i++;
|
||||
n--;
|
||||
}
|
||||
memcpy(®s->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
|
||||
regs->orig_a0 = args[0];
|
||||
args++;
|
||||
memcpy(®s->a1, args, 5 * sizeof(regs->a1));
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
unsigned long mask = -1UL;
|
||||
unsigned int n = 6;
|
||||
|
||||
/*
|
||||
* No arguments for this syscall, there's nothing to do.
|
||||
*/
|
||||
if (!n)
|
||||
return;
|
||||
|
||||
BUG_ON(i + n > 6);
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_tsk_thread_flag(task, TIF_31BIT))
|
||||
mask = 0xffffffff;
|
||||
#endif
|
||||
while (n-- > 0)
|
||||
if (i + n > 0)
|
||||
args[n] = regs->gprs[2 + i + n] & mask;
|
||||
if (i == 0)
|
||||
args[0] = regs->orig_gpr2 & mask;
|
||||
if (n > 0)
|
||||
args[n] = regs->gprs[2 + n] & mask;
|
||||
|
||||
args[0] = regs->orig_gpr2 & mask;
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
unsigned int n = 6;
|
||||
|
||||
while (n-- > 0)
|
||||
if (i + n > 0)
|
||||
regs->gprs[2 + i + n] = args[n];
|
||||
if (i == 0)
|
||||
regs->orig_gpr2 = args[0];
|
||||
if (n > 0)
|
||||
regs->gprs[2 + n] = args[n];
|
||||
regs->orig_gpr2 = args[0];
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
|
|||
|
||||
struct sh_clk_ops;
|
||||
|
||||
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
|
||||
void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
|
||||
{
|
||||
}
|
||||
|
||||
void __init plat_irq_setup(void)
|
||||
void __init __weak plat_irq_setup(void)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
/*
|
||||
* Do this simply for now. If we need to start supporting
|
||||
* fetching arguments from arbitrary indices, this will need some
|
||||
* extra logic. Presently there are no in-tree users that depend
|
||||
* on this behaviour.
|
||||
*/
|
||||
BUG_ON(i);
|
||||
|
||||
/* Argument pattern is: R4, R5, R6, R7, R0, R1 */
|
||||
switch (n) {
|
||||
case 6: args[5] = regs->regs[1];
|
||||
case 5: args[4] = regs->regs[0];
|
||||
case 4: args[3] = regs->regs[7];
|
||||
case 3: args[2] = regs->regs[6];
|
||||
case 2: args[1] = regs->regs[5];
|
||||
case 1: args[0] = regs->regs[4];
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
args[5] = regs->regs[1];
|
||||
args[4] = regs->regs[0];
|
||||
args[3] = regs->regs[7];
|
||||
args[2] = regs->regs[6];
|
||||
args[1] = regs->regs[5];
|
||||
args[0] = regs->regs[4];
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
/* Same note as above applies */
|
||||
BUG_ON(i);
|
||||
|
||||
switch (n) {
|
||||
case 6: regs->regs[1] = args[5];
|
||||
case 5: regs->regs[0] = args[4];
|
||||
case 4: regs->regs[7] = args[3];
|
||||
case 3: regs->regs[6] = args[2];
|
||||
case 2: regs->regs[5] = args[1];
|
||||
case 1: regs->regs[4] = args[0];
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
regs->regs[1] = args[5];
|
||||
regs->regs[0] = args[4];
|
||||
regs->regs[7] = args[3];
|
||||
regs->regs[6] = args[2];
|
||||
regs->regs[5] = args[1];
|
||||
regs->regs[4] = args[0];
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(args, ®s->regs[2 + i], n * sizeof(args[0]));
|
||||
memcpy(args, ®s->regs[2], 6 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(®s->regs[2 + i], args, n * sizeof(args[0]));
|
||||
memcpy(®s->regs[2], args, 6 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
int zero_extend = 0;
|
||||
unsigned int j;
|
||||
unsigned int n = 6;
|
||||
|
||||
#ifdef CONFIG_SPARC64
|
||||
if (test_tsk_thread_flag(task, TIF_32BIT))
|
||||
|
@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
|||
#endif
|
||||
|
||||
for (j = 0; j < n; j++) {
|
||||
unsigned long val = regs->u_regs[UREG_I0 + i + j];
|
||||
unsigned long val = regs->u_regs[UREG_I0 + j];
|
||||
|
||||
if (zero_extend)
|
||||
args[j] = (u32) val;
|
||||
|
@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
|||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
unsigned int j;
|
||||
unsigned int i;
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
regs->u_regs[UREG_I0 + i + j] = args[j];
|
||||
for (i = 0; i < 6; i++)
|
||||
regs->u_regs[UREG_I0 + i] = args[i];
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
const struct uml_pt_regs *r = ®s->regs;
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG1(r);
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG2(r);
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG3(r);
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG4(r);
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG5(r);
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
*args++ = UPT_SYSCALL_ARG6(r);
|
||||
case 6:
|
||||
if (!n--)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
*args++ = UPT_SYSCALL_ARG1(r);
|
||||
*args++ = UPT_SYSCALL_ARG2(r);
|
||||
*args++ = UPT_SYSCALL_ARG3(r);
|
||||
*args++ = UPT_SYSCALL_ARG4(r);
|
||||
*args++ = UPT_SYSCALL_ARG5(r);
|
||||
*args = UPT_SYSCALL_ARG6(r);
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
struct uml_pt_regs *r = ®s->regs;
|
||||
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG1(r) = *args++;
|
||||
case 1:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG2(r) = *args++;
|
||||
case 2:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG3(r) = *args++;
|
||||
case 3:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG4(r) = *args++;
|
||||
case 4:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG5(r) = *args++;
|
||||
case 5:
|
||||
if (!n--)
|
||||
break;
|
||||
UPT_SYSCALL_ARG6(r) = *args++;
|
||||
case 6:
|
||||
if (!n--)
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
UPT_SYSCALL_ARG1(r) = *args++;
|
||||
UPT_SYSCALL_ARG2(r) = *args++;
|
||||
UPT_SYSCALL_ARG3(r) = *args++;
|
||||
UPT_SYSCALL_ARG4(r) = *args++;
|
||||
UPT_SYSCALL_ARG5(r) = *args++;
|
||||
UPT_SYSCALL_ARG6(r) = *args;
|
||||
}
|
||||
|
||||
/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
|
||||
|
|
|
@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(args, ®s->bx + i, n * sizeof(args[0]));
|
||||
memcpy(args, ®s->bx, 6 * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
|
@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
# ifdef CONFIG_IA32_EMULATION
|
||||
if (task->thread_info.status & TS_COMPAT)
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--) break;
|
||||
*args++ = regs->bx;
|
||||
case 1:
|
||||
if (!n--) break;
|
||||
*args++ = regs->cx;
|
||||
case 2:
|
||||
if (!n--) break;
|
||||
*args++ = regs->dx;
|
||||
case 3:
|
||||
if (!n--) break;
|
||||
*args++ = regs->si;
|
||||
case 4:
|
||||
if (!n--) break;
|
||||
*args++ = regs->di;
|
||||
case 5:
|
||||
if (!n--) break;
|
||||
*args++ = regs->bp;
|
||||
case 6:
|
||||
if (!n--) break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
else
|
||||
if (task->thread_info.status & TS_COMPAT) {
|
||||
*args++ = regs->bx;
|
||||
*args++ = regs->cx;
|
||||
*args++ = regs->dx;
|
||||
*args++ = regs->si;
|
||||
*args++ = regs->di;
|
||||
*args = regs->bp;
|
||||
} else
|
||||
# endif
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--) break;
|
||||
*args++ = regs->di;
|
||||
case 1:
|
||||
if (!n--) break;
|
||||
*args++ = regs->si;
|
||||
case 2:
|
||||
if (!n--) break;
|
||||
*args++ = regs->dx;
|
||||
case 3:
|
||||
if (!n--) break;
|
||||
*args++ = regs->r10;
|
||||
case 4:
|
||||
if (!n--) break;
|
||||
*args++ = regs->r8;
|
||||
case 5:
|
||||
if (!n--) break;
|
||||
*args++ = regs->r9;
|
||||
case 6:
|
||||
if (!n--) break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
{
|
||||
*args++ = regs->di;
|
||||
*args++ = regs->si;
|
||||
*args++ = regs->dx;
|
||||
*args++ = regs->r10;
|
||||
*args++ = regs->r8;
|
||||
*args = regs->r9;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
# ifdef CONFIG_IA32_EMULATION
|
||||
if (task->thread_info.status & TS_COMPAT)
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--) break;
|
||||
regs->bx = *args++;
|
||||
case 1:
|
||||
if (!n--) break;
|
||||
regs->cx = *args++;
|
||||
case 2:
|
||||
if (!n--) break;
|
||||
regs->dx = *args++;
|
||||
case 3:
|
||||
if (!n--) break;
|
||||
regs->si = *args++;
|
||||
case 4:
|
||||
if (!n--) break;
|
||||
regs->di = *args++;
|
||||
case 5:
|
||||
if (!n--) break;
|
||||
regs->bp = *args++;
|
||||
case 6:
|
||||
if (!n--) break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
else
|
||||
if (task->thread_info.status & TS_COMPAT) {
|
||||
regs->bx = *args++;
|
||||
regs->cx = *args++;
|
||||
regs->dx = *args++;
|
||||
regs->si = *args++;
|
||||
regs->di = *args++;
|
||||
regs->bp = *args;
|
||||
} else
|
||||
# endif
|
||||
switch (i) {
|
||||
case 0:
|
||||
if (!n--) break;
|
||||
regs->di = *args++;
|
||||
case 1:
|
||||
if (!n--) break;
|
||||
regs->si = *args++;
|
||||
case 2:
|
||||
if (!n--) break;
|
||||
regs->dx = *args++;
|
||||
case 3:
|
||||
if (!n--) break;
|
||||
regs->r10 = *args++;
|
||||
case 4:
|
||||
if (!n--) break;
|
||||
regs->r8 = *args++;
|
||||
case 5:
|
||||
if (!n--) break;
|
||||
regs->r9 = *args++;
|
||||
case 6:
|
||||
if (!n--) break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
{
|
||||
regs->di = *args++;
|
||||
regs->si = *args++;
|
||||
regs->dx = *args++;
|
||||
regs->r10 = *args++;
|
||||
regs->r8 = *args++;
|
||||
regs->r9 = *args;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(void)
|
||||
|
|
|
@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
|
|||
__HYPERCALL_DECLS;
|
||||
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
|
||||
|
||||
if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
|
||||
return -EINVAL;
|
||||
|
||||
asm volatile(CALL_NOSPEC
|
||||
: __HYPERCALL_5PARAM
|
||||
: [thunk_target] "a" (&hypercall_page[call])
|
||||
|
|
|
@ -6422,11 +6422,11 @@ e_free:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int get_num_contig_pages(int idx, struct page **inpages,
|
||||
unsigned long npages)
|
||||
static unsigned long get_num_contig_pages(unsigned long idx,
|
||||
struct page **inpages, unsigned long npages)
|
||||
{
|
||||
unsigned long paddr, next_paddr;
|
||||
int i = idx + 1, pages = 1;
|
||||
unsigned long i = idx + 1, pages = 1;
|
||||
|
||||
/* find the number of contiguous pages starting from idx */
|
||||
paddr = __sme_page_pa(inpages[idx]);
|
||||
|
@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
|
|||
|
||||
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
|
||||
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct kvm_sev_launch_update_data params;
|
||||
struct sev_data_launch_update_data *data;
|
||||
struct page **inpages;
|
||||
int i, ret, pages;
|
||||
int ret;
|
||||
|
||||
if (!sev_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
@ -6799,7 +6799,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
|
|||
struct page **src_p, **dst_p;
|
||||
struct kvm_sev_dbg debug;
|
||||
unsigned long n;
|
||||
int ret, size;
|
||||
unsigned int size;
|
||||
int ret;
|
||||
|
||||
if (!sev_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
@ -6807,6 +6808,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
|
|||
if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
|
||||
return -EINVAL;
|
||||
if (!debug.dst_uaddr)
|
||||
return -EINVAL;
|
||||
|
||||
vaddr = debug.src_uaddr;
|
||||
size = debug.len;
|
||||
vaddr_end = vaddr + size;
|
||||
|
@ -6857,8 +6863,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
|
|||
dst_vaddr,
|
||||
len, &argp->error);
|
||||
|
||||
sev_unpin_memory(kvm, src_p, 1);
|
||||
sev_unpin_memory(kvm, dst_p, 1);
|
||||
sev_unpin_memory(kvm, src_p, n);
|
||||
sev_unpin_memory(kvm, dst_p, n);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
|
|
@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
|
||||
int msr;
|
||||
|
||||
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
||||
unsigned word = msr / BITS_PER_LONG;
|
||||
|
||||
msr_bitmap[word] = ~0;
|
||||
msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Merge L0's and L1's MSR bitmap, return false to indicate that
|
||||
* we do not use the hardware.
|
||||
|
@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
return false;
|
||||
|
||||
msr_bitmap_l1 = (unsigned long *)kmap(page);
|
||||
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
|
||||
/*
|
||||
* L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
|
||||
* just lets the processor take the value from the virtual-APIC page;
|
||||
* take those 256 bits directly from the L1 bitmap.
|
||||
*/
|
||||
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
||||
unsigned word = msr / BITS_PER_LONG;
|
||||
msr_bitmap_l0[word] = msr_bitmap_l1[word];
|
||||
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
|
||||
}
|
||||
} else {
|
||||
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
||||
unsigned word = msr / BITS_PER_LONG;
|
||||
msr_bitmap_l0[word] = ~0;
|
||||
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
|
||||
}
|
||||
}
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
X2APIC_MSR(APIC_TASKPRI),
|
||||
MSR_TYPE_W);
|
||||
/*
|
||||
* To keep the control flow simple, pay eight 8-byte writes (sixteen
|
||||
* 4-byte writes on 32-bit systems) up front to enable intercepts for
|
||||
* the x2APIC MSR range and selectively disable them below.
|
||||
*/
|
||||
enable_x2apic_msr_intercepts(msr_bitmap_l0);
|
||||
|
||||
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
|
||||
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
|
||||
/*
|
||||
* L0 need not intercept reads for MSRs between 0x800
|
||||
* and 0x8ff, it just lets the processor take the value
|
||||
* from the virtual-APIC page; take those 256 bits
|
||||
* directly from the L1 bitmap.
|
||||
*/
|
||||
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
||||
unsigned word = msr / BITS_PER_LONG;
|
||||
|
||||
msr_bitmap_l0[word] = msr_bitmap_l1[word];
|
||||
}
|
||||
}
|
||||
|
||||
if (nested_cpu_has_vid(vmcs12)) {
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
X2APIC_MSR(APIC_EOI),
|
||||
MSR_TYPE_W);
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
X2APIC_MSR(APIC_SELF_IPI),
|
||||
MSR_TYPE_W);
|
||||
X2APIC_MSR(APIC_TASKPRI),
|
||||
MSR_TYPE_R | MSR_TYPE_W);
|
||||
|
||||
if (nested_cpu_has_vid(vmcs12)) {
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
X2APIC_MSR(APIC_EOI),
|
||||
MSR_TYPE_W);
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
X2APIC_MSR(APIC_SELF_IPI),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
}
|
||||
|
||||
if (spec_ctrl)
|
||||
|
|
|
@ -187,15 +187,18 @@ struct thread_struct {
|
|||
|
||||
/* Clearing a0 terminates the backtrace. */
|
||||
#define start_thread(regs, new_pc, new_sp) \
|
||||
memset(regs, 0, sizeof(*regs)); \
|
||||
regs->pc = new_pc; \
|
||||
regs->ps = USER_PS_VALUE; \
|
||||
regs->areg[1] = new_sp; \
|
||||
regs->areg[0] = 0; \
|
||||
regs->wmask = 1; \
|
||||
regs->depc = 0; \
|
||||
regs->windowbase = 0; \
|
||||
regs->windowstart = 1;
|
||||
do { \
|
||||
memset((regs), 0, sizeof(*(regs))); \
|
||||
(regs)->pc = (new_pc); \
|
||||
(regs)->ps = USER_PS_VALUE; \
|
||||
(regs)->areg[1] = (new_sp); \
|
||||
(regs)->areg[0] = 0; \
|
||||
(regs)->wmask = 1; \
|
||||
(regs)->depc = 0; \
|
||||
(regs)->windowbase = 0; \
|
||||
(regs)->windowstart = 1; \
|
||||
(regs)->syscall = NO_SYSCALL; \
|
||||
} while (0)
|
||||
|
||||
/* Forward declaration */
|
||||
struct task_struct;
|
||||
|
|
|
@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
|
||||
unsigned int j;
|
||||
unsigned int i;
|
||||
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
|
||||
|
||||
for (j = 0; j < n; ++j) {
|
||||
if (i + j < SYSCALL_MAX_ARGS)
|
||||
args[j] = regs->areg[reg[i + j]];
|
||||
else
|
||||
args[j] = 0;
|
||||
}
|
||||
for (i = 0; i < 6; ++i)
|
||||
args[i] = regs->areg[reg[i]];
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
|
||||
unsigned int j;
|
||||
unsigned int i;
|
||||
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
|
||||
if (i < SYSCALL_MAX_ARGS)
|
||||
n = SYSCALL_MAX_ARGS - i;
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
for (j = 0; j < n; ++j)
|
||||
regs->areg[reg[i + j]] = args[j];
|
||||
for (i = 0; i < 6; ++i)
|
||||
regs->areg[reg[i]] = args[i];
|
||||
}
|
||||
|
||||
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
|
||||
|
|
|
@ -1860,6 +1860,8 @@ ENTRY(system_call)
|
|||
l32i a7, a2, PT_SYSCALL
|
||||
|
||||
1:
|
||||
s32i a7, a1, 4
|
||||
|
||||
/* syscall = sys_call_table[syscall_nr] */
|
||||
|
||||
movi a4, sys_call_table
|
||||
|
@ -1893,8 +1895,12 @@ ENTRY(system_call)
|
|||
retw
|
||||
|
||||
1:
|
||||
l32i a4, a1, 4
|
||||
l32i a3, a2, PT_SYSCALL
|
||||
s32i a4, a2, PT_SYSCALL
|
||||
mov a6, a2
|
||||
call4 do_syscall_trace_leave
|
||||
s32i a3, a2, PT_SYSCALL
|
||||
retw
|
||||
|
||||
ENDPROC(system_call)
|
||||
|
|
|
@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* level == 0 is for the return address from the caller of this function,
|
||||
* not from this function itself.
|
||||
*/
|
||||
unsigned long return_address(unsigned level)
|
||||
{
|
||||
struct return_addr_data r = {
|
||||
.skip = level + 1,
|
||||
.skip = level,
|
||||
};
|
||||
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
|
||||
return r.addr;
|
||||
|
|
|
@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
|
|||
|
||||
pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
|
||||
if (!pte)
|
||||
panic("%s: Failed to allocate %zu bytes align=%lx\n",
|
||||
panic("%s: Failed to allocate %lu bytes align=%lx\n",
|
||||
__func__, n_pages * sizeof(pte_t), PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < n_pages; ++i)
|
||||
|
|
|
@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
|
|||
* at least two nodes.
|
||||
*/
|
||||
return !(varied_queue_weights || multiple_classes_busy
|
||||
#ifdef BFQ_GROUP_IOSCHED_ENABLED
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
|| bfqd->num_groups_with_pending_reqs > 0
|
||||
#endif
|
||||
);
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
|
|||
entity->on_st = true;
|
||||
}
|
||||
|
||||
#ifdef BFQ_GROUP_IOSCHED_ENABLED
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
|
||||
struct bfq_group *bfqg =
|
||||
container_of(entity, struct bfq_group, entity);
|
||||
|
|
|
@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
|
|||
*/
|
||||
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_qc_t unused;
|
||||
|
||||
if (blk_cloned_rq_check_limits(q, rq))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
|
@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||
* bypass a potential scheduler on the bottom device for
|
||||
* insert.
|
||||
*/
|
||||
return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
|
||||
return blk_mq_request_issue_directly(rq, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
||||
|
||||
|
|
|
@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||
* busy in case of 'none' scheduler, and this way may save
|
||||
* us one extra enqueue & dequeue to sw queue.
|
||||
*/
|
||||
if (!hctx->dispatch_busy && !e && !run_queue_async)
|
||||
if (!hctx->dispatch_busy && !e && !run_queue_async) {
|
||||
blk_mq_try_issue_list_directly(hctx, list);
|
||||
else
|
||||
blk_mq_insert_requests(hctx, ctx, list);
|
||||
if (list_empty(list))
|
||||
return;
|
||||
}
|
||||
blk_mq_insert_requests(hctx, ctx, list);
|
||||
}
|
||||
|
||||
blk_mq_run_hw_queue(hctx, run_queue_async);
|
||||
|
|
129
block/blk-mq.c
129
block/blk-mq.c
|
@ -1711,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
unsigned int depth;
|
||||
|
||||
list_splice_init(&plug->mq_list, &list);
|
||||
plug->rq_count = 0;
|
||||
|
||||
if (plug->rq_count > 2 && plug->multiple_queues)
|
||||
list_sort(NULL, &list, plug_rq_cmp);
|
||||
|
||||
plug->rq_count = 0;
|
||||
|
||||
this_q = NULL;
|
||||
this_hctx = NULL;
|
||||
this_ctx = NULL;
|
||||
|
@ -1800,74 +1801,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie,
|
||||
bool bypass, bool last)
|
||||
bool bypass_insert, bool last)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
bool run_queue = true;
|
||||
blk_status_t ret = BLK_STS_RESOURCE;
|
||||
int srcu_idx;
|
||||
bool force = false;
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
/*
|
||||
* hctx_lock is needed before checking quiesced flag.
|
||||
* RCU or SRCU read lock is needed before checking quiesced flag.
|
||||
*
|
||||
* When queue is stopped or quiesced, ignore 'bypass', insert
|
||||
* and return BLK_STS_OK to caller, and avoid driver to try to
|
||||
* dispatch again.
|
||||
* When queue is stopped or quiesced, ignore 'bypass_insert' from
|
||||
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
|
||||
* and avoid driver to try to dispatch again.
|
||||
*/
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
|
||||
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
|
||||
run_queue = false;
|
||||
bypass = false;
|
||||
goto out_unlock;
|
||||
bypass_insert = false;
|
||||
goto insert;
|
||||
}
|
||||
|
||||
if (unlikely(q->elevator && !bypass))
|
||||
goto out_unlock;
|
||||
if (q->elevator && !bypass_insert)
|
||||
goto insert;
|
||||
|
||||
if (!blk_mq_get_dispatch_budget(hctx))
|
||||
goto out_unlock;
|
||||
goto insert;
|
||||
|
||||
if (!blk_mq_get_driver_tag(rq)) {
|
||||
blk_mq_put_dispatch_budget(hctx);
|
||||
goto out_unlock;
|
||||
goto insert;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always add a request that has been through
|
||||
*.queue_rq() to the hardware dispatch list.
|
||||
*/
|
||||
force = true;
|
||||
ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
|
||||
out_unlock:
|
||||
return __blk_mq_issue_directly(hctx, rq, cookie, last);
|
||||
insert:
|
||||
if (bypass_insert)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
blk_mq_request_bypass_insert(rq, run_queue);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, blk_qc_t *cookie)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
|
||||
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
|
||||
blk_mq_request_bypass_insert(rq, true);
|
||||
else if (ret != BLK_STS_OK)
|
||||
blk_mq_end_request(rq, ret);
|
||||
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
}
|
||||
|
||||
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
blk_qc_t unused_cookie;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
switch (ret) {
|
||||
case BLK_STS_OK:
|
||||
break;
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
case BLK_STS_RESOURCE:
|
||||
if (force) {
|
||||
blk_mq_request_bypass_insert(rq, run_queue);
|
||||
/*
|
||||
* We have to return BLK_STS_OK for the DM
|
||||
* to avoid livelock. Otherwise, we return
|
||||
* the real result to indicate whether the
|
||||
* request is direct-issued successfully.
|
||||
*/
|
||||
ret = bypass ? BLK_STS_OK : ret;
|
||||
} else if (!bypass) {
|
||||
blk_mq_sched_insert_request(rq, false,
|
||||
run_queue, false);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!bypass)
|
||||
blk_mq_end_request(rq, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1875,20 +1878,22 @@ out_unlock:
|
|||
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list)
|
||||
{
|
||||
blk_qc_t unused;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
while (!list_empty(list)) {
|
||||
blk_status_t ret;
|
||||
struct request *rq = list_first_entry(list, struct request,
|
||||
queuelist);
|
||||
|
||||
list_del_init(&rq->queuelist);
|
||||
if (ret == BLK_STS_OK)
|
||||
ret = blk_mq_try_issue_directly(hctx, rq, &unused,
|
||||
false,
|
||||
ret = blk_mq_request_issue_directly(rq, list_empty(list));
|
||||
if (ret != BLK_STS_OK) {
|
||||
if (ret == BLK_STS_RESOURCE ||
|
||||
ret == BLK_STS_DEV_RESOURCE) {
|
||||
blk_mq_request_bypass_insert(rq,
|
||||
list_empty(list));
|
||||
else
|
||||
blk_mq_sched_insert_request(rq, false, true, false);
|
||||
break;
|
||||
}
|
||||
blk_mq_end_request(rq, ret);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1896,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||
* the driver there was more coming, but that turned out to
|
||||
* be a lie.
|
||||
*/
|
||||
if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
|
||||
if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
|
||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||
}
|
||||
|
||||
|
@ -2003,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
plug->rq_count--;
|
||||
}
|
||||
blk_add_rq_to_plug(plug, rq);
|
||||
trace_block_plug(q);
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
|
||||
if (same_queue_rq) {
|
||||
data.hctx = same_queue_rq->mq_hctx;
|
||||
trace_block_unplug(q, 1, true);
|
||||
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
||||
&cookie, false, true);
|
||||
&cookie);
|
||||
}
|
||||
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
|
||||
!data.hctx->dispatch_busy)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||
} else {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
|
@ -2332,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
return 0;
|
||||
|
||||
free_fq:
|
||||
kfree(hctx->fq);
|
||||
blk_free_flush_queue(hctx->fq);
|
||||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
|
|
@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
|
|||
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct list_head *list);
|
||||
|
||||
blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie,
|
||||
bool bypass, bool last);
|
||||
/* Used by blk_insert_cloned_request() to issue request directly */
|
||||
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
|
||||
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list);
|
||||
|
||||
|
|
|
@ -1748,6 +1748,11 @@ static int __init null_init(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
|
||||
pr_err("null_blk: invalid home_node value\n");
|
||||
g_home_node = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
if (g_queue_mode == NULL_Q_RQ) {
|
||||
pr_err("null_blk: legacy IO path no longer available\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -314,6 +314,7 @@ static void pcd_init_units(void)
|
|||
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
|
||||
1, BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
put_disk(disk);
|
||||
disk->queue = NULL;
|
||||
continue;
|
||||
}
|
||||
|
@ -750,6 +751,8 @@ static int pcd_detect(void)
|
|||
|
||||
printk("%s: No CD-ROM drive found\n", name);
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(cd->disk->queue);
|
||||
cd->disk->queue = NULL;
|
||||
blk_mq_free_tag_set(&cd->tag_set);
|
||||
|
@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
|
|||
pcd_probe_capabilities();
|
||||
|
||||
if (register_blkdev(major, name)) {
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
|
||||
blk_cleanup_queue(cd->disk->queue);
|
||||
blk_mq_free_tag_set(&cd->tag_set);
|
||||
put_disk(cd->disk);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
|
|||
int unit;
|
||||
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
|
||||
if (cd->present) {
|
||||
del_gendisk(cd->disk);
|
||||
pi_release(cd->pi);
|
||||
|
|
|
@ -762,6 +762,8 @@ static int pf_detect(void)
|
|||
|
||||
printk("%s: No ATAPI disk detected\n", name);
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(pf->disk->queue);
|
||||
pf->disk->queue = NULL;
|
||||
blk_mq_free_tag_set(&pf->tag_set);
|
||||
|
@ -1029,8 +1031,13 @@ static int __init pf_init(void)
|
|||
pf_busy = 0;
|
||||
|
||||
if (register_blkdev(major, name)) {
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(pf->disk->queue);
|
||||
blk_mq_free_tag_set(&pf->tag_set);
|
||||
put_disk(pf->disk);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
|
|||
int unit;
|
||||
unregister_blkdev(major, name);
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
|
||||
if (pf->present)
|
||||
del_gendisk(pf->disk);
|
||||
|
||||
|
|
|
@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
|
|||
return 0;
|
||||
|
||||
err_read:
|
||||
/* prevent double queue cleanup */
|
||||
ace->gd->queue = NULL;
|
||||
put_disk(ace->gd);
|
||||
err_alloc_disk:
|
||||
blk_cleanup_queue(ace->queue);
|
||||
|
|
|
@ -37,8 +37,8 @@
|
|||
*
|
||||
* Returns size of the event. If it is an invalid event, returns 0.
|
||||
*/
|
||||
static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
struct tcg_pcr_event *event_header)
|
||||
static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
|
||||
struct tcg_pcr_event *event_header)
|
||||
{
|
||||
struct tcg_efi_specid_event_head *efispecid;
|
||||
struct tcg_event_field *event_field;
|
||||
|
|
|
@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
|
|||
__poll_t mask = 0;
|
||||
|
||||
poll_wait(file, &priv->async_wait, wait);
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
|
||||
if (!priv->response_read || priv->response_length)
|
||||
/*
|
||||
* The response_length indicates if there is still response
|
||||
* (or part of it) to be consumed. Partial reads decrease it
|
||||
* by the number of bytes read, and write resets it the zero.
|
||||
*/
|
||||
if (priv->response_length)
|
||||
mask = EPOLLIN | EPOLLRDNORM;
|
||||
else
|
||||
mask = EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
return mask;
|
||||
}
|
||||
|
||||
|
|
|
@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
|
|||
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
|
||||
return 0;
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
|
||||
mutex_lock(&chip->tpm_mutex);
|
||||
if (!tpm_chip_start(chip)) {
|
||||
if (!tpm_chip_start(chip)) {
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
tpm2_shutdown(chip, TPM2_SU_STATE);
|
||||
tpm_chip_stop(chip);
|
||||
}
|
||||
mutex_unlock(&chip->tpm_mutex);
|
||||
} else {
|
||||
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
|
||||
else
|
||||
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
|
||||
|
||||
tpm_chip_stop(chip);
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
|
|||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
if (mapped_nents) {
|
||||
if (mapped_nents)
|
||||
sg_to_sec4_sg_last(req->src, mapped_nents,
|
||||
edesc->sec4_sg + sec4_sg_src_index,
|
||||
0);
|
||||
if (*next_buflen)
|
||||
scatterwalk_map_and_copy(next_buf, req->src,
|
||||
to_hash - *buflen,
|
||||
*next_buflen, 0);
|
||||
} else {
|
||||
else
|
||||
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
|
||||
1);
|
||||
}
|
||||
|
||||
if (*next_buflen)
|
||||
scatterwalk_map_and_copy(next_buf, req->src,
|
||||
to_hash - *buflen,
|
||||
*next_buflen, 0);
|
||||
desc = edesc->hw_desc;
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
|
|
|
@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
|||
/* Init DMA config if supported */
|
||||
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
|
||||
if (ret < 0)
|
||||
goto clk_notifier_unregister;
|
||||
goto del_adapter;
|
||||
|
||||
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
|
||||
return 0; /* Return OK */
|
||||
|
||||
del_adapter:
|
||||
i2c_del_adapter(&i2c_imx->adapter);
|
||||
clk_notifier_unregister:
|
||||
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
||||
rpm_disable:
|
||||
|
|
|
@ -115,6 +115,7 @@ struct mapped_device {
|
|||
struct srcu_struct io_barrier;
|
||||
};
|
||||
|
||||
void disable_discard(struct mapped_device *md);
|
||||
void disable_write_same(struct mapped_device *md);
|
||||
void disable_write_zeroes(struct mapped_device *md);
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ struct dm_device {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
const char *dm_allowed_targets[] __initconst = {
|
||||
const char * const dm_allowed_targets[] __initconst = {
|
||||
"crypt",
|
||||
"delay",
|
||||
"linear",
|
||||
|
|
|
@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
|
|||
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
|
||||
{
|
||||
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
|
||||
range2->logical_sector + range2->n_sectors > range2->logical_sector;
|
||||
range1->logical_sector + range1->n_sectors > range2->logical_sector;
|
||||
}
|
||||
|
||||
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
|
||||
|
@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
|
|||
struct dm_integrity_range *last_range =
|
||||
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
|
||||
struct task_struct *last_range_task;
|
||||
if (!ranges_overlap(range, last_range))
|
||||
break;
|
||||
last_range_task = last_range->task;
|
||||
list_del(&last_range->wait_entry);
|
||||
if (!add_new_range(ic, last_range, false)) {
|
||||
|
@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
journal_watermark = val;
|
||||
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
|
||||
sync_msec = val;
|
||||
else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
|
||||
else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
|
||||
if (ic->meta_dev) {
|
||||
dm_put_device(ti, ic->meta_dev);
|
||||
ic->meta_dev = NULL;
|
||||
|
@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
goto bad;
|
||||
}
|
||||
ic->sectors_per_block = val >> SECTOR_SHIFT;
|
||||
} else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
|
||||
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
|
||||
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
|
||||
"Invalid internal_hash argument");
|
||||
if (r)
|
||||
goto bad;
|
||||
} else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
|
||||
} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
|
||||
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
|
||||
"Invalid journal_crypt argument");
|
||||
if (r)
|
||||
goto bad;
|
||||
} else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
|
||||
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
|
||||
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
|
||||
"Invalid journal_mac argument");
|
||||
if (r)
|
||||
|
@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
|
|||
.io_hints = dm_integrity_io_hints,
|
||||
};
|
||||
|
||||
int __init dm_integrity_init(void)
|
||||
static int __init dm_integrity_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
|
|||
return r;
|
||||
}
|
||||
|
||||
void dm_integrity_exit(void)
|
||||
static void __exit dm_integrity_exit(void)
|
||||
{
|
||||
dm_unregister_target(&integrity_target);
|
||||
kmem_cache_destroy(journal_io_cache);
|
||||
|
|
|
@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
|||
}
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET)) {
|
||||
if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
if (req_op(clone) == REQ_OP_DISCARD &&
|
||||
!clone->q->limits.max_discard_sectors)
|
||||
disable_discard(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
disable_write_same(tio->md);
|
||||
if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(tio->md);
|
||||
}
|
||||
|
||||
|
|
|
@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int device_requires_stable_pages(struct dm_target *ti,
|
||||
struct dm_dev *dev, sector_t start,
|
||||
sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* If any underlying device requires stable pages, a table must require
|
||||
* them as well. Only targets that support iterate_devices are considered:
|
||||
* don't want error, zero, etc to require stable pages.
|
||||
*/
|
||||
static bool dm_table_requires_stable_pages(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (ti->type->iterate_devices &&
|
||||
ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
|
@ -1896,6 +1926,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||
|
||||
dm_table_verify_integrity(t);
|
||||
|
||||
/*
|
||||
* Some devices don't use blk_integrity but still want stable pages
|
||||
* because they do their own checksumming.
|
||||
*/
|
||||
if (dm_table_requires_stable_pages(t))
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
else
|
||||
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
|
||||
/*
|
||||
* Determine whether or not this queue's I/O timings contribute
|
||||
* to the entropy pool, Only request-based targets use this.
|
||||
|
|
|
@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
|
|||
}
|
||||
}
|
||||
|
||||
void disable_discard(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
||||
/* device doesn't really support DISCARD, disable it */
|
||||
limits->max_discard_sectors = 0;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
||||
}
|
||||
|
||||
void disable_write_same(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
|
|||
dm_endio_fn endio = tio->ti->type->end_io;
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!bio->bi_disk->queue->limits.max_write_same_sectors)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!bio->bi_disk->queue->limits.max_discard_sectors)
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!bio->bi_disk->queue->limits.max_write_same_sectors)
|
||||
disable_write_same(md);
|
||||
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(md);
|
||||
}
|
||||
|
||||
|
@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* BIO based queue uses its own splitting. When multipage bvecs
|
||||
* is switched on, size of the incoming bio may be too big to
|
||||
* be handled in some targets, such as crypt.
|
||||
*
|
||||
* When these targets are ready for the big bio, we can remove
|
||||
* the limit.
|
||||
*/
|
||||
ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
|
||||
ti->max_io_len = (uint32_t) len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
|
||||
/*
|
||||
* We check "time_after" and "!chip_good" before checking "chip_good" to avoid
|
||||
* the failure due to scheduling.
|
||||
*/
|
||||
if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
|
||||
break;
|
||||
|
||||
if (chip_good(map, adr, datum)) {
|
||||
|
|
|
@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
tpa_info = &rxr->rx_tpa[agg_id];
|
||||
|
||||
if (unlikely(cons != rxr->rx_next_cons)) {
|
||||
netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
|
||||
cons, rxr->rx_next_cons);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
return;
|
||||
}
|
||||
|
@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
}
|
||||
|
||||
cons = rxcmp->rx_cmp_opaque;
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
data = rx_buf->data;
|
||||
data_ptr = rx_buf->data_ptr;
|
||||
if (unlikely(cons != rxr->rx_next_cons)) {
|
||||
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
|
||||
|
||||
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
|
||||
cons, rxr->rx_next_cons);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
return rc1;
|
||||
}
|
||||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
data = rx_buf->data;
|
||||
data_ptr = rx_buf->data_ptr;
|
||||
prefetch(data_ptr);
|
||||
|
||||
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
|
||||
|
@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
|
||||
rx_buf->data = NULL;
|
||||
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
|
||||
u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
|
||||
|
||||
bnxt_reuse_rx_data(rxr, cons, data);
|
||||
if (agg_bufs)
|
||||
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
|
||||
|
||||
rc = -EIO;
|
||||
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
|
||||
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
|
||||
bnxt_sched_reset(bp, rxr);
|
||||
}
|
||||
goto next_rx;
|
||||
}
|
||||
|
||||
|
|
|
@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
|
|||
pci_set_power_state(tp->pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
|
||||
static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
|
||||
{
|
||||
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
|
||||
case MII_TG3_AUX_STAT_10HALF:
|
||||
|
@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
|
|||
bool current_link_up;
|
||||
u32 bmsr, val;
|
||||
u32 lcl_adv, rmt_adv;
|
||||
u16 current_speed;
|
||||
u32 current_speed;
|
||||
u8 current_duplex;
|
||||
int i, err;
|
||||
|
||||
|
@ -5719,7 +5719,7 @@ out:
|
|||
static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
|
||||
{
|
||||
u32 orig_pause_cfg;
|
||||
u16 orig_active_speed;
|
||||
u32 orig_active_speed;
|
||||
u8 orig_active_duplex;
|
||||
u32 mac_status;
|
||||
bool current_link_up;
|
||||
|
@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
|
|||
{
|
||||
int err = 0;
|
||||
u32 bmsr, bmcr;
|
||||
u16 current_speed = SPEED_UNKNOWN;
|
||||
u32 current_speed = SPEED_UNKNOWN;
|
||||
u8 current_duplex = DUPLEX_UNKNOWN;
|
||||
bool current_link_up = false;
|
||||
u32 local_adv, remote_adv, sgsr;
|
||||
|
|
|
@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
|
|||
struct tg3_link_config {
|
||||
/* Describes what we're trying to get. */
|
||||
u32 advertising;
|
||||
u16 speed;
|
||||
u32 speed;
|
||||
u8 duplex;
|
||||
u8 autoneg;
|
||||
u8 flowctrl;
|
||||
|
@ -2882,7 +2882,7 @@ struct tg3_link_config {
|
|||
u8 active_flowctrl;
|
||||
|
||||
u8 active_duplex;
|
||||
u16 active_speed;
|
||||
u32 active_speed;
|
||||
u32 rmt_adv;
|
||||
};
|
||||
|
||||
|
|
|
@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
|
|||
|
||||
/* First, update TX stats if needed */
|
||||
if (skb) {
|
||||
if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags &
|
||||
SKBTX_HW_TSTAMP) &&
|
||||
gem_ptp_do_txstamp(queue, skb, desc) == 0) {
|
||||
/* skb now belongs to timestamp buffer
|
||||
* and will be removed later
|
||||
*/
|
||||
|
|
|
@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
|
|||
u16 board_type;
|
||||
u16 supported_type;
|
||||
|
||||
u16 link_speed;
|
||||
u32 link_speed;
|
||||
u16 link_duplex;
|
||||
u16 link_autoneg;
|
||||
u16 module_type;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/pci-aspm.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
|
||||
|
@ -7336,6 +7337,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Disable ASPM completely as that cause random device stop working
|
||||
* problems as well as full system hangs for some PCIe devices users.
|
||||
*/
|
||||
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
|
||||
|
||||
/* enable device (incl. PCI PM wakeup and hotplug setup) */
|
||||
rc = pcim_enable_device(pdev);
|
||||
if (rc < 0) {
|
||||
|
|
|
@ -1273,8 +1273,12 @@ static void vrf_setup(struct net_device *dev)
|
|||
dev->priv_flags |= IFF_NO_QUEUE;
|
||||
dev->priv_flags |= IFF_NO_RX_HANDLER;
|
||||
|
||||
dev->min_mtu = 0;
|
||||
dev->max_mtu = 0;
|
||||
/* VRF devices do not care about MTU, but if the MTU is set
|
||||
* too low then the ipv4 and ipv6 protocols are disabled
|
||||
* which breaks networking.
|
||||
*/
|
||||
dev->min_mtu = IPV6_MIN_MTU;
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
}
|
||||
|
||||
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
|
|
@ -157,8 +157,12 @@
|
|||
#define DBG_IRT(x...)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
|
||||
#else
|
||||
#define COMPARE_IRTE_ADDR(irte, hpa) \
|
||||
((irte)->dest_iosapic_addr == F_EXTEND(hpa))
|
||||
((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
|
||||
#endif
|
||||
|
||||
#define IOSAPIC_REG_SELECT 0x00
|
||||
#define IOSAPIC_REG_WINDOW 0x10
|
||||
|
|
|
@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
|
|||
arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
|
||||
arb->rstc.ops = &meson_audio_arb_rstc_ops;
|
||||
arb->rstc.of_node = dev->of_node;
|
||||
arb->rstc.owner = THIS_MODULE;
|
||||
|
||||
/*
|
||||
* Enable general :
|
||||
|
|
|
@ -667,9 +667,9 @@ config RTC_DRV_S5M
|
|||
will be called rtc-s5m.
|
||||
|
||||
config RTC_DRV_SD3078
|
||||
tristate "ZXW Crystal SD3078"
|
||||
tristate "ZXW Shenzhen whwave SD3078"
|
||||
help
|
||||
If you say yes here you get support for the ZXW Crystal
|
||||
If you say yes here you get support for the ZXW Shenzhen whwave
|
||||
SD3078 RTC chips.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
|
|
|
@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
|
|||
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
if (device_may_wakeup(dev))
|
||||
enable_irq_wake(cros_ec_rtc->cros_ec->irq);
|
||||
return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
|
|||
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
if (device_may_wakeup(dev))
|
||||
disable_irq_wake(cros_ec_rtc->cros_ec->irq);
|
||||
return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
|
|||
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
|
||||
rtc->rtc_sync = false;
|
||||
|
||||
/*
|
||||
* TODO: some models have alarms on a minute boundary but still support
|
||||
* real hardware interrupts. Add this once the core supports it.
|
||||
*/
|
||||
if (config->rtc_data_start != RTC_SEC)
|
||||
rtc->rtc_dev->uie_unsupported = 1;
|
||||
|
||||
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
|
||||
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
|
||||
da9063_alarm_event,
|
||||
|
|
|
@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
|||
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
|
||||
{
|
||||
unsigned int byte;
|
||||
int value = 0xff; /* return 0xff for ignored values */
|
||||
int value = -1; /* return -1 for ignored values */
|
||||
|
||||
byte = readb(rtc->regbase + reg_off);
|
||||
if (byte & AR_ENB) {
|
||||
|
|
|
@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
|||
* wake up the thread.
|
||||
*/
|
||||
spin_lock(&lpfc_cmd->buf_lock);
|
||||
if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
|
||||
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (lpfc_cmd->waitq)
|
||||
wake_up(lpfc_cmd->waitq);
|
||||
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (lpfc_cmd->waitq) {
|
||||
wake_up(lpfc_cmd->waitq);
|
||||
lpfc_cmd->waitq = NULL;
|
||||
}
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue