Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2017-01-09 15:39:11 -05:00
commit bb1d303444
222 changed files with 2065 additions and 1234 deletions

View File

@ -106,6 +106,16 @@
use by PCI
Format: <irq>,<irq>...
acpi_mask_gpe= [HW,ACPI]
Due to the existence of _Lxx/_Exx, some GPEs triggered
by unsupported hardware/firmware features can result in
GPE floodings that cannot be automatically disabled by
the GPE dispatcher.
This facility can be used to prevent such uncontrolled
GPE floodings.
Format: <int>
Support masking of GPEs numbered from 0x00 to 0x7f.
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
AML control methods that contain the opcodes to create
@ -3811,10 +3821,11 @@
it if 0 is given (See Documentation/cgroup-v1/memory.txt)
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
Format: { <int> | force }
Format: { <int> | force | noforce }
<int> -- Number of I/O TLB slabs
force -- force using of bounce buffers even if they
wouldn't be automatically used by the kernel
noforce -- Never use bounce buffers (for debugging)
switches= [HW,M68k]

View File

@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
Required properties:
- compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
Required properties for TPS65218:
Required properties:
- interrupts: should be one of the following
- <2>: For controllers compatible with tps65217
- <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
Examples:
@ -17,6 +18,7 @@ Examples:
&tps {
tps65217-pwrbutton {
compatible = "ti,tps65217-pwrbutton";
interrupts = <2>;
};
};

View File

@ -2,11 +2,16 @@ TPS65217 Charger
Required Properties:
-compatible: "ti,tps65217-charger"
-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
Should be <0> for the USB charger and <1> for the AC adapter.
-interrupt-names: Should be "USB" and "AC"
This node is a subnode of the tps65217 PMIC.
Example:
tps65217-charger {
compatible = "ti,tps65090-charger";
compatible = "ti,tps65217-charger";
interrupts = <0>, <1>;
interrupt-names = "USB", "AC";
};

View File

@ -55,21 +55,6 @@ Device Drivers DMA Management
.. kernel-doc:: drivers/base/dma-mapping.c
:export:
Device Drivers Power Management
-------------------------------
.. kernel-doc:: drivers/base/power/main.c
:export:
Device Drivers ACPI Support
---------------------------
.. kernel-doc:: drivers/acpi/scan.c
:export:
.. kernel-doc:: drivers/acpi/scan.c
:internal:
Device drivers PnP support
--------------------------

View File

@ -127,22 +127,22 @@ the VFIO when devices are unbound from the driver.
Physical Device Driver Interface
--------------------------------
The physical device driver interface provides the parent_ops[3] structure to
define the APIs to manage work in the mediated core driver that is related to
the physical device.
The physical device driver interface provides the mdev_parent_ops[3] structure
to define the APIs to manage work in the mediated core driver that is related
to the physical device.
The structures in the parent_ops structure are as follows:
The structures in the mdev_parent_ops structure are as follows:
* dev_attr_groups: attributes of the parent device
* mdev_attr_groups: attributes of the mediated device
* supported_config: attributes to define supported configurations
The functions in the parent_ops structure are as follows:
The functions in the mdev_parent_ops structure are as follows:
* create: allocate basic resources in a driver for a mediated device
* remove: free resources in a driver when a mediated device is destroyed
The callbacks in the parent_ops structure are as follows:
The callbacks in the mdev_parent_ops structure are as follows:
* open: open callback of mediated device
* close: close callback of mediated device
@ -151,14 +151,14 @@ The callbacks in the parent_ops structure are as follows:
* write: write emulation callback
* mmap: mmap emulation callback
A driver should use the parent_ops structure in the function call to register
itself with the mdev core driver:
A driver should use the mdev_parent_ops structure in the function call to
register itself with the mdev core driver:
extern int mdev_register_device(struct device *dev,
const struct parent_ops *ops);
const struct mdev_parent_ops *ops);
However, the parent_ops structure is not required in the function call that a
driver should use to unregister itself with the mdev core driver:
However, the mdev_parent_ops structure is not required in the function call
that a driver should use to unregister itself with the mdev core driver:
extern void mdev_unregister_device(struct device *dev);
@ -223,6 +223,9 @@ Directories and files under the sysfs for Each Physical Device
sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
(or using mdev_parent_dev(mdev) to arrive at the parent device outside
of the core mdev code)
* device_api
This attribute should show which device API is being created, for example,
@ -394,5 +397,5 @@ References
[1] See Documentation/vfio.txt for more information on VFIO.
[2] struct mdev_driver in include/linux/mdev.h
[3] struct parent_ops in include/linux/mdev.h
[3] struct mdev_parent_ops in include/linux/mdev.h
[4] struct vfio_iommu_driver_ops in include/linux/vfio.h

View File

@ -3806,6 +3806,7 @@ F: include/linux/devcoredump.h
DEVICE FREQUENCY (DEVFREQ)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
R: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
S: Maintained
@ -5512,6 +5513,7 @@ M: Alex Elder <elder@kernel.org>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
F: drivers/staging/greybus/
L: greybus-dev@lists.linaro.org
GREYBUS AUDIO PROTOCOLS DRIVERS
M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
@ -5969,6 +5971,7 @@ F: drivers/media/platform/sti/hva
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
L: devel@linuxdriverproject.org
S: Maintained
F: arch/x86/include/asm/mshyperv.h
@ -9855,7 +9858,7 @@ M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/firmware/psci.c
F: drivers/firmware/psci*.c
F: include/linux/psci.h
F: include/uapi/linux/psci.h

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Roaring Lionus
# *DOCUMENTATION*

View File

@ -1502,8 +1502,7 @@ source kernel/Kconfig.preempt
config HZ_FIXED
int
default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \
ARCH_S5PV210 || ARCH_EXYNOS4
default 200 if ARCH_EBSA110
default 128 if SOC_AT91RM9200
default 0

View File

@ -501,6 +501,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
am3517-evm.dtb \
am3517_mt_ventoux.dtb \
logicpd-torpedo-37xx-devkit.dtb \
logicpd-som-lv-37xx-devkit.dtb \
omap3430-sdp.dtb \
omap3-beagle.dtb \
omap3-beagle-xm.dtb \

View File

@ -6,8 +6,6 @@
* published by the Free Software Foundation.
*/
#include <dt-bindings/mfd/tps65217.h>
/ {
cpus {
cpu@0 {
@ -319,13 +317,13 @@
ti,pmic-shutdown-controller;
charger {
interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>;
interrupts-names = "AC", "USB";
interrupts = <0>, <1>;
interrupt-names = "USB", "AC";
status = "okay";
};
pwrbutton {
interrupts = <TPS65217_IRQ_PB>;
interrupts = <2>;
status = "okay";
};

View File

@ -16,6 +16,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
i2c0 = &i2c0;

View File

@ -16,6 +16,7 @@
interrupt-parent = <&wakeupgen>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
memory@0 {
device_type = "memory";

View File

@ -62,11 +62,6 @@
linux,default-trigger = "mmc0";
};
};
extcon_usb2: extcon_usb2 {
compatible = "linux,extcon-usb-gpio";
id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
};
};
&mmc1 {
@ -79,3 +74,8 @@
&omap_dwc3_2 {
extcon = <&extcon_usb2>;
};
&extcon_usb2 {
id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
};

View File

@ -23,11 +23,6 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
extcon_usb2: extcon_usb2 {
compatible = "linux,extcon-usb-gpio";
id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
};
status-leds {
compatible = "gpio-leds";
cpu0-led {
@ -76,6 +71,11 @@
extcon = <&extcon_usb2>;
};
&extcon_usb2 {
id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
};
&mmc1 {
status = "okay";
vmmc-supply = <&v3_3d>;
@ -87,3 +87,7 @@
&sn65hvs882 {
load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
};
&pcie1 {
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};

View File

@ -303,6 +303,13 @@
gpio-controller;
#gpio-cells = <2>;
};
extcon_usb2: tps659038_usb {
compatible = "ti,palmas-usb-vid";
ti,enable-vbus-detection;
ti,enable-id-detection;
/* ID & VBUS GPIOs provided in board dts */
};
};
};
@ -369,7 +376,7 @@
};
&usb2 {
dr_mode = "otg";
dr_mode = "peripheral";
};
&mmc2 {

View File

@ -12,6 +12,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -12,6 +12,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -18,6 +18,7 @@
compatible = "ti,dra7xx";
interrupt-parent = <&crossbar_mpu>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -132,3 +132,19 @@
ti,palmas-long-press-seconds = <6>;
};
};
&usb2_phy1 {
phy-supply = <&ldo4_reg>;
};
&usb2_phy2 {
phy-supply = <&ldo4_reg>;
};
&dss {
vdda_video-supply = <&ldo5_reg>;
};
&mmc1 {
vmmc_aux-supply = <&ldo1_reg>;
};

View File

@ -31,11 +31,11 @@
};
};
avic: avic-interrupt-controller@60000000 {
avic: interrupt-controller@68000000 {
compatible = "fsl,imx31-avic", "fsl,avic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x60000000 0x100000>;
reg = <0x68000000 0x100000>;
};
soc {

View File

@ -533,7 +533,6 @@
MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17071
MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17071
MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17071
MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x000b0
>;
};

View File

@ -1100,6 +1100,7 @@
interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
fsl,weim-cs-gpr = <&gpr>;
status = "disabled";
};
ocotp: ocotp@021bc000 {

View File

@ -900,6 +900,7 @@
reg = <0x021b8000 0x4000>;
interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
fsl,weim-cs-gpr = <&gpr>;
status = "disabled";
};
ocotp: ocotp@021bc000 {

View File

@ -977,6 +977,7 @@
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
fsl,weim-cs-gpr = <&gpr>;
status = "disabled";
};
ocotp: ocotp@021bc000 {

View File

@ -17,6 +17,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
serial0 = &uart1;

View File

@ -734,6 +734,8 @@
vmmc_aux-supply = <&vsim>;
bus-width = <8>;
non-removable;
no-sdio;
no-sd;
};
&mmc3 {

View File

@ -17,6 +17,7 @@
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -15,6 +15,7 @@
interrupt-parent = <&wakeupgen>;
#address-cells = <1>;
#size-cells = <1>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -17,6 +17,7 @@
compatible = "ti,omap5";
interrupt-parent = <&wakeupgen>;
chosen { };
aliases {
i2c0 = &i2c1;

View File

@ -4,6 +4,7 @@
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/soc/qcom,gsbi.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@ -303,6 +304,9 @@
firmware {
scm {
compatible = "qcom,scm-apq8064";
clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
clock-names = "core";
};
};

View File

@ -81,7 +81,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0 0x2c001000 0 0x1000>,
<0 0x2c002000 0 0x1000>,
<0 0x2c002000 0 0x2000>,
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;

View File

@ -131,7 +131,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0 0x2c001000 0 0x1000>,
<0 0x2c002000 0 0x1000>,
<0 0x2c002000 0 0x2000>,
<0 0x2c004000 0 0x2000>,
<0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;

View File

@ -153,7 +153,8 @@
switch0phy1: switch1phy0@1 {
reg = <1>;
interrupt-parent = <&switch0>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; };
interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
};
switch0phy2: switch1phy0@2 {
reg = <2>;
interrupt-parent = <&switch0>;

View File

@ -31,10 +31,10 @@ static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
static void __clk_enable(struct clk *clk)
void davinci_clk_enable(struct clk *clk)
{
if (clk->parent)
__clk_enable(clk->parent);
davinci_clk_enable(clk->parent);
if (clk->usecount++ == 0) {
if (clk->flags & CLK_PSC)
davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
@ -44,7 +44,7 @@ static void __clk_enable(struct clk *clk)
}
}
static void __clk_disable(struct clk *clk)
void davinci_clk_disable(struct clk *clk)
{
if (WARN_ON(clk->usecount == 0))
return;
@ -56,7 +56,7 @@ static void __clk_disable(struct clk *clk)
clk->clk_disable(clk);
}
if (clk->parent)
__clk_disable(clk->parent);
davinci_clk_disable(clk->parent);
}
int davinci_clk_reset(struct clk *clk, bool reset)
@ -103,7 +103,7 @@ int clk_enable(struct clk *clk)
return -EINVAL;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_enable(clk);
davinci_clk_enable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
@ -118,7 +118,7 @@ void clk_disable(struct clk *clk)
return;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_disable(clk);
davinci_clk_disable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_disable);

View File

@ -132,6 +132,8 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
int davinci_set_refclk_rate(unsigned long rate);
int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
int davinci_clk_reset(struct clk *clk, bool reset);
void davinci_clk_enable(struct clk *clk);
void davinci_clk_disable(struct clk *clk);
extern struct platform_device davinci_wdt_device;
extern void davinci_watchdog_reset(struct platform_device *);

View File

@ -319,6 +319,16 @@ static struct clk emac_clk = {
.gpsc = 1,
};
/*
* In order to avoid adding the emac_clk to the clock lookup table twice (and
* screwing up the linked list in the process) create a separate clock for
* mdio inheriting the rate from emac_clk.
*/
static struct clk mdio_clk = {
.name = "mdio",
.parent = &emac_clk,
};
static struct clk mcasp_clk = {
.name = "mcasp",
.parent = &async3_clk,
@ -367,6 +377,16 @@ static struct clk aemif_clk = {
.flags = ALWAYS_ENABLED,
};
/*
* In order to avoid adding the aemif_clk to the clock lookup table twice (and
* screwing up the linked list in the process) create a separate clock for
* nand inheriting the rate from aemif_clk.
*/
static struct clk aemif_nand_clk = {
.name = "nand",
.parent = &aemif_clk,
};
static struct clk usb11_clk = {
.name = "usb11",
.parent = &pll0_sysclk4,
@ -529,7 +549,7 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "arm", &arm_clk),
CLK(NULL, "rmii", &rmii_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_mdio.0", "fck", &emac_clk),
CLK("davinci_mdio.0", "fck", &mdio_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk),
CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk),
@ -537,7 +557,15 @@ static struct clk_lookup da850_clks[] = {
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
CLK("ti-aemif", NULL, &aemif_clk),
CLK(NULL, "aemif", &aemif_clk),
/*
* The only user of this clock is davinci_nand and it get's it through
* con_id. The nand node itself is created from within the aemif
* driver to guarantee that it's probed after the aemif timing
* parameters are configured. of_dev_auxdata is not accessible from
* the aemif driver and can't be passed to of_platform_populate(). For
* that reason we're leaving the dev_id here as NULL.
*/
CLK(NULL, "aemif", &aemif_nand_clk),
CLK("ohci-da8xx", "usb11", &usb11_clk),
CLK("musb-da8xx", "usb20", &usb20_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),

View File

@ -22,6 +22,8 @@
#define DA8XX_USB0_BASE 0x01e00000
#define DA8XX_USB1_BASE 0x01e25000
static struct clk *usb20_clk;
static struct platform_device da8xx_usb_phy = {
.name = "da8xx-usb-phy",
.id = -1,
@ -158,26 +160,13 @@ int __init da8xx_register_usb_refclkin(int rate)
static void usb20_phy_clk_enable(struct clk *clk)
{
struct clk *usb20_clk;
int err;
u32 val;
u32 timeout = 500000; /* 500 msec */
val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
if (IS_ERR(usb20_clk)) {
pr_err("could not get usb20 clk: %ld\n", PTR_ERR(usb20_clk));
return;
}
/* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
err = clk_prepare_enable(usb20_clk);
if (err) {
pr_err("failed to enable usb20 clk: %d\n", err);
clk_put(usb20_clk);
return;
}
davinci_clk_enable(usb20_clk);
/*
* Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
@ -197,8 +186,7 @@ static void usb20_phy_clk_enable(struct clk *clk)
pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
done:
clk_disable_unprepare(usb20_clk);
clk_put(usb20_clk);
davinci_clk_disable(usb20_clk);
}
static void usb20_phy_clk_disable(struct clk *clk)
@ -285,11 +273,19 @@ static struct clk_lookup usb20_phy_clk_lookup =
int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
{
struct clk *parent;
int ret = 0;
int ret;
usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
ret = PTR_ERR_OR_ZERO(usb20_clk);
if (ret)
return ret;
parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
if (IS_ERR(parent))
return PTR_ERR(parent);
ret = PTR_ERR_OR_ZERO(parent);
if (ret) {
clk_put(usb20_clk);
return ret;
}
usb20_phy_clk.parent = parent;
ret = clk_register(&usb20_phy_clk);

View File

@ -385,36 +385,6 @@ fail:
return pen_release != -1 ? ret : 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
static void __init exynos_smp_init_cpus(void)
{
void __iomem *scu_base = scu_base_addr();
unsigned int i, ncores;
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
else
/*
* CPU Nodes are passed thru DT and set_cpu_possible
* is set by "arm_dt_init_cpu_maps".
*/
return;
/* sanity check */
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
ncores, nr_cpu_ids);
ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
}
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
{
int i;
@ -479,7 +449,6 @@ static void exynos_cpu_die(unsigned int cpu)
#endif /* CONFIG_HOTPLUG_CPU */
const struct smp_operations exynos_smp_ops __initconst = {
.smp_init_cpus = exynos_smp_init_cpus,
.smp_prepare_cpus = exynos_smp_prepare_cpus,
.smp_secondary_init = exynos_secondary_init,
.smp_boot_secondary = exynos_boot_secondary,

View File

@ -37,7 +37,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
.map_io = debug_ll_io_init,
.init_early = imx1_init_early,
.init_irq = imx1_init_irq,
.dt_compat = imx1_dt_board_compat,

View File

@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
# Common support
obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
omap_device.o omap-headsmp.o sram.o drm.o
hwmod-common = omap_hwmod.o omap_hwmod_reset.o \

View File

@ -304,7 +304,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
.init_late = am43xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
.init_time = omap4_local_timer_init,
.init_time = omap3_gptimer_timer_init,
.dt_compat = am43_boards_compat,
.restart = omap44xx_restart,
MACHINE_END

View File

@ -1,160 +0,0 @@
/*
* OMAP2+ specific gpio initialization
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
*
* Author:
* Charulatha V <charu@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/gpio.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_data/gpio-omap.h>
#include "soc.h"
#include "omap_hwmod.h"
#include "omap_device.h"
#include "omap-pm.h"
#include "powerdomain.h"
static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
{
struct platform_device *pdev;
struct omap_gpio_platform_data *pdata;
struct omap_gpio_dev_attr *dev_attr;
char *name = "omap_gpio";
int id;
struct powerdomain *pwrdm;
/*
* extract the device id from name field available in the
* hwmod database and use the same for constructing ids for
* gpio devices.
* CAUTION: Make sure the name in the hwmod database does
* not change. If changed, make corresponding change here
* or make use of static variable mechanism to handle this.
*/
sscanf(oh->name, "gpio%d", &id);
pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
if (!pdata) {
pr_err("gpio%d: Memory allocation failed\n", id);
return -ENOMEM;
}
dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
pdata->bank_width = dev_attr->bank_width;
pdata->dbck_flag = dev_attr->dbck_flag;
pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
if (!pdata->regs) {
pr_err("gpio%d: Memory allocation failed\n", id);
kfree(pdata);
return -ENOMEM;
}
switch (oh->class->rev) {
case 0:
if (id == 1)
/* non-wakeup GPIO pins for OMAP2 Bank1 */
pdata->non_wakeup_gpios = 0xe203ffc0;
else if (id == 2)
/* non-wakeup GPIO pins for OMAP2 Bank2 */
pdata->non_wakeup_gpios = 0x08700040;
/* fall through */
case 1:
pdata->regs->revision = OMAP24XX_GPIO_REVISION;
pdata->regs->direction = OMAP24XX_GPIO_OE;
pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN;
pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
break;
case 2:
pdata->regs->revision = OMAP4_GPIO_REVISION;
pdata->regs->direction = OMAP4_GPIO_OE;
pdata->regs->datain = OMAP4_GPIO_DATAIN;
pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
pdata->regs->ctrl = OMAP4_GPIO_CTRL;
pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0;
pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
break;
default:
WARN(1, "Invalid gpio bank_type\n");
kfree(pdata->regs);
kfree(pdata);
return -EINVAL;
}
pwrdm = omap_hwmod_get_pwrdm(oh);
pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
kfree(pdata);
if (IS_ERR(pdev)) {
WARN(1, "Can't build omap_device for %s:%s.\n",
name, oh->name);
return PTR_ERR(pdev);
}
return 0;
}
/*
* gpio_init needs to be done before
* machine_init functions access gpio APIs.
* Hence gpio_init is a omap_postcore_initcall.
*/
static int __init omap2_gpio_init(void)
{
/* If dtb is there, the devices will be created dynamically */
if (of_have_populated_dt())
return -ENODEV;
return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL);
}
omap_postcore_initcall(omap2_gpio_init);

View File

@ -741,14 +741,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
int ret = 0;
char name[MOD_CLK_MAX_NAME_LEN];
struct clk *clk;
static const char modck[] = "_mod_ck";
/* +7 magic comes from '_mod_ck' suffix */
if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
pr_warn("%s: warning: cropping name for %s\n", __func__,
oh->name);
strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
strcat(name, "_mod_ck");
strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
clk = clk_get(NULL, name);
if (!IS_ERR(clk)) {

View File

@ -121,10 +121,6 @@ extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];

View File

@ -295,10 +295,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
GFP_KERNEL);
if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
!prcm_irq_setup->priority_mask) {
pr_err("PRCM: kzalloc failed\n");
!prcm_irq_setup->priority_mask)
goto err;
}
memset(mask, 0, sizeof(mask));

View File

@ -510,18 +510,19 @@ void __init omap3_secure_sync32k_timer_init(void)
}
#endif /* CONFIG_ARCH_OMAP3 */
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
defined(CONFIG_SOC_AM43XX)
void __init omap3_gptimer_timer_init(void)
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
if (of_have_populated_dt())
clocksource_probe();
}
#endif
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
defined(CONFIG_SOC_DRA7XX)
static void __init omap4_sync32k_timer_init(void)
{
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",

View File

@ -345,10 +345,40 @@ static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
};
static const struct dma_slave_map s3c2410_dma_slave_map[] = {
{ "s3c2410-sdi", "rx-tx", (void *)DMACH_SDI },
{ "s3c2410-spi.0", "rx", (void *)DMACH_SPI0_RX },
{ "s3c2410-spi.0", "tx", (void *)DMACH_SPI0_TX },
{ "s3c2410-spi.1", "rx", (void *)DMACH_SPI1_RX },
{ "s3c2410-spi.1", "tx", (void *)DMACH_SPI1_TX },
/*
* The DMA request source[1] (DMACH_UARTx_SRC2) are
* not used in the UART driver.
*/
{ "s3c2410-uart.0", "rx", (void *)DMACH_UART0 },
{ "s3c2410-uart.0", "tx", (void *)DMACH_UART0 },
{ "s3c2410-uart.1", "rx", (void *)DMACH_UART1 },
{ "s3c2410-uart.1", "tx", (void *)DMACH_UART1 },
{ "s3c2410-uart.2", "rx", (void *)DMACH_UART2 },
{ "s3c2410-uart.2", "tx", (void *)DMACH_UART2 },
{ "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
{ "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
{ "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
{ "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
};
static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2410_dma_channels,
.num_channels = DMACH_MAX,
.slave_map = s3c2410_dma_slave_map,
.slavecnt = ARRAY_SIZE(s3c2410_dma_slave_map),
};
struct platform_device s3c2410_device_dma = {
@ -388,10 +418,36 @@ static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
};
static const struct dma_slave_map s3c2412_dma_slave_map[] = {
{ "s3c2412-sdi", "rx-tx", (void *)DMACH_SDI },
{ "s3c2412-spi.0", "rx", (void *)DMACH_SPI0_RX },
{ "s3c2412-spi.0", "tx", (void *)DMACH_SPI0_TX },
{ "s3c2412-spi.1", "rx", (void *)DMACH_SPI1_RX },
{ "s3c2412-spi.1", "tx", (void *)DMACH_SPI1_TX },
{ "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
{ "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
{ "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
{ "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
{ "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
{ "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
{ "s3c2412-iis", "rx", (void *)DMACH_I2S_IN },
{ "s3c2412-iis", "tx", (void *)DMACH_I2S_OUT },
{ "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
{ "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
};
static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2412_dma_channels,
.num_channels = DMACH_MAX,
.slave_map = s3c2412_dma_slave_map,
.slavecnt = ARRAY_SIZE(s3c2412_dma_slave_map),
};
struct platform_device s3c2412_device_dma = {
@ -534,10 +590,30 @@ static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
[DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
};
static const struct dma_slave_map s3c2443_dma_slave_map[] = {
{ "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
{ "s3c2443-spi.0", "rx", (void *)DMACH_SPI0_RX },
{ "s3c2443-spi.0", "tx", (void *)DMACH_SPI0_TX },
{ "s3c2443-spi.1", "rx", (void *)DMACH_SPI1_RX },
{ "s3c2443-spi.1", "tx", (void *)DMACH_SPI1_TX },
{ "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
{ "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
{ "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
{ "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
{ "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
{ "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
{ "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
{ "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
{ "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
{ "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
};
static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
.num_phy_channels = 6,
.channels = s3c2443_dma_channels,
.num_channels = DMACH_MAX,
.slave_map = s3c2443_dma_slave_map,
.slavecnt = ARRAY_SIZE(s3c2443_dma_slave_map),
};
struct platform_device s3c2443_device_dma = {

View File

@ -356,5 +356,21 @@
status = "disabled";
};
};
vpu: vpu@d0100000 {
compatible = "amlogic,meson-gx-vpu";
reg = <0x0 0xd0100000 0x0 0x100000>,
<0x0 0xc883c000 0x0 0x1000>,
<0x0 0xc8838000 0x0 0x1000>;
reg-names = "vpu", "hhi", "dmc";
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
#size-cells = <0>;
/* CVBS VDAC output port */
cvbs_vdac_port: port@0 {
reg = <0>;
};
};
};
};

View File

@ -142,6 +142,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
cvbs-connector {
compatible = "composite-video-connector";
port {
cvbs_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
};
&uart_AO {
@ -229,3 +239,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
&cvbs_vdac_port {
cvbs_vdac_out: endpoint {
remote-endpoint = <&cvbs_connector_in>;
};
};

View File

@ -125,6 +125,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
cvbs-connector {
compatible = "composite-video-connector";
port {
cvbs_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
};
/* This UART is brought out to the DB9 connector */
@ -234,3 +244,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
&cvbs_vdac_port {
cvbs_vdac_out: endpoint {
remote-endpoint = <&cvbs_connector_in>;
};
};

View File

@ -506,3 +506,7 @@
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&vpu {
compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
};

View File

@ -117,6 +117,16 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
cvbs-connector {
compatible = "composite-video-connector";
port {
cvbs_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
};
&uart_AO {
@ -203,3 +213,9 @@
clocks = <&clkc CLKID_FCLK_DIV4>;
clock-names = "clkin0";
};
&cvbs_vdac_port {
cvbs_vdac_out: endpoint {
remote-endpoint = <&cvbs_connector_in>;
};
};

View File

@ -43,7 +43,7 @@
#include "meson-gx.dtsi"
#include <dt-bindings/clock/gxbb-clkc.h>
#include <dt-bindings/gpio/meson-gxbb-gpio.h>
#include <dt-bindings/gpio/meson-gxl-gpio.h>
/ {
compatible = "amlogic,meson-gxl";
@ -299,3 +299,7 @@
<&clkc CLKID_FCLK_DIV2>;
clock-names = "core", "clkin0", "clkin1";
};
&vpu {
compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
};

View File

@ -90,6 +90,16 @@
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
};
cvbs-connector {
compatible = "composite-video-connector";
port {
cvbs_connector_in: endpoint {
remote-endpoint = <&cvbs_vdac_out>;
};
};
};
};
/* This UART is brought out to the DB9 connector */
@ -167,3 +177,9 @@
max-speed = <1000>;
};
};
&cvbs_vdac_port {
cvbs_vdac_out: endpoint {
remote-endpoint = <&cvbs_connector_in>;
};
};

View File

@ -112,3 +112,7 @@
};
};
};
&vpu {
compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu";
};

View File

@ -81,7 +81,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0x2c001000 0 0x1000>,
<0x0 0x2c002000 0 0x1000>,
<0x0 0x2c002000 0 0x2000>,
<0x0 0x2c004000 0 0x2000>,
<0x0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;

View File

@ -64,6 +64,16 @@
reg = <0x0 0x86000000 0x0 0x200000>;
no-map;
};
memory@85800000 {
reg = <0x0 0x85800000 0x0 0x800000>;
no-map;
};
memory@86200000 {
reg = <0x0 0x86200000 0x0 0x2600000>;
no-map;
};
};
cpus {

View File

@ -169,7 +169,7 @@
power-source = <3300>;
};
sdhi0_pins_uhs: sd0 {
sdhi0_pins_uhs: sd0_uhs {
groups = "sdhi0_data4", "sdhi0_ctrl";
function = "sdhi0";
power-source = <1800>;

View File

@ -331,6 +331,7 @@ CONFIG_DRM_VC4=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_HISI_KIRIN=m
CONFIG_DRM_MESON=m
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
CONFIG_BACKLIGHT_GENERIC=m

View File

@ -9,9 +9,17 @@
struct task_struct;
/*
* We don't use read_sysreg() as we want the compiler to cache the value where
* possible.
*/
static __always_inline struct task_struct *get_current(void)
{
return (struct task_struct *)read_sysreg(sp_el0);
unsigned long sp_el0;
asm ("mrs %0, sp_el0" : "=r" (sp_el0));
return (struct task_struct *)sp_el0;
}
#define current get_current()

View File

@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb = 1;
return atomic_pool_init();

View File

@ -88,21 +88,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pud = pud_offset(pgd, addr);
printk(", *pud=%016llx", pud_val(*pud));
pr_cont(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
printk(", *pmd=%016llx", pmd_val(*pmd));
pr_cont(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%016llx", pte_val(*pte));
pr_cont(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
printk("\n");
pr_cont("\n");
}
#ifdef CONFIG_ARM64_HW_AFDBM

View File

@ -401,7 +401,8 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);

View File

@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr)
uasm_i_and(&p, V0, V0, AT);
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
uasm_i_or(&p, V0, V0, AT);
#ifdef CONFIG_64BIT
uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
#endif
uasm_i_mtc0(&p, V0, C0_STATUS);
uasm_i_ehb(&p);
@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
/* Setup status register for running guest in UM */
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, V1, V1, AT);
uasm_i_mtc0(&p, V1, C0_STATUS);
uasm_i_ehb(&p);

View File

@ -360,7 +360,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/*

View File

@ -68,12 +68,10 @@ static struct dma_map_ops swiotlb_dma_ops = {
*/
int __init pci_swiotlb_detect_override(void)
{
int use_swiotlb = swiotlb | swiotlb_force;
if (swiotlb_force)
if (swiotlb_force == SWIOTLB_FORCE)
swiotlb = 1;
return use_swiotlb;
return swiotlb;
}
IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
pci_xen_swiotlb_detect,

View File

@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
#define VMX_VPID_EXTENT_SUPPORTED_MASK \
(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
/*
* Hyper-V requires all of these, so mark them as supported even though
* they are just treated the same as all-context.
@ -10473,12 +10467,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
goto out;
return 1;
}
if (vmcs12->vmcs_link_pointer != -1ull) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
goto out;
return 1;
}
/*
@ -10498,7 +10492,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
goto out;
return 1;
}
}
@ -10516,7 +10510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
goto out;
return 1;
}
}

View File

@ -3070,6 +3070,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
@ -3106,10 +3108,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
u32 hflags = vcpu->arch.hflags;
if (events->smi.smm)
vcpu->arch.hflags |= HF_SMM_MASK;
hflags |= HF_SMM_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_MASK;
hflags &= ~HF_SMM_MASK;
kvm_set_hflags(vcpu, hflags);
vcpu->arch.smi_pending = events->smi.pending;
if (events->smi.smm_inside_nmi)
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;

View File

@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = 1;
} else {
prog = orig_prog;
}
out_addrs:

View File

@ -48,7 +48,7 @@ int __init pci_xen_swiotlb_detect(void)
* activate this IOMMU. If running as PV privileged, activate it
* irregardless.
*/
if ((xen_initial_domain() || swiotlb || swiotlb_force))
if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
xen_swiotlb = 1;
/* If we are running under Xen, we MUST disable the native SWIOTLB.

View File

@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void)
size = PFN_PHYS(xen_start_info->nr_p2m_frames);
}
if (!xen_is_e820_reserved(start, size)) {
memblock_reserve(start, size);
if (!xen_is_e820_reserved(start, size))
return;
}
#ifdef CONFIG_X86_32
/*
@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void)
BUG();
#else
xen_relocate_p2m();
memblock_free(start, size);
#endif
}

View File

@ -114,7 +114,7 @@ void __init acpi_watchdog_init(void)
pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
resources, nresources);
if (IS_ERR(pdev))
pr_err("Failed to create platform device\n");
pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
kfree(resources);

View File

@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
if (check_children && list_empty(&adev->children))
return -ENODEV;
return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
/*
* If the device has a _HID (or _CID) returning a valid ACPI/PNP
* device ID, it is better to make it look less attractive here, so that
* the other device with the same _ADR value (that may not have a valid
* device ID) can be matched going forward. [This means a second spec
* violation in a row, so whatever we do here is best effort anyway.]
*/
return sta_present && list_empty(&adev->pnp.ids) ?
FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@ -250,7 +258,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
return 0;
err:
acpi_dma_deconfigure(dev);
ACPI_COMPANION_SET(dev, NULL);
put_device(dev);
put_device(&acpi_dev->dev);

View File

@ -37,6 +37,7 @@ void acpi_amba_init(void);
static inline void acpi_amba_init(void) {}
#endif
int acpi_sysfs_init(void);
void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC

View File

@ -2074,6 +2074,7 @@ int __init acpi_scan_init(void)
}
}
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
acpi_ec_ecdt_start();

View File

@ -708,6 +708,62 @@ end:
return result ? result : size;
}
/*
* A Quirk Mechanism for GPE Flooding Prevention:
*
* Quirks may be needed to prevent GPE flooding on a specific GPE. The
* flooding typically cannot be detected and automatically prevented by
* ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
* the AML tables. This normally indicates a feature gap in Linux, thus
* instead of providing endless quirk tables, we provide a boot parameter
* for those who want this quirk. For example, if the users want to prevent
* the GPE flooding for GPE 00, they need to specify the following boot
* parameter:
* acpi_mask_gpe=0x00
* The masking status can be modified by the following runtime controlling
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
/*
* Currently, the GPE flooding prevention only supports to mask the GPEs
* numbered from 00 to 7f.
*/
#define ACPI_MASKABLE_GPE_MAX 0x80
static u64 __initdata acpi_masked_gpes;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
u8 gpe;
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
acpi_masked_gpes |= ((u64)1<<gpe);
return 1;
}
__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
void __init acpi_gpe_apply_masked_gpes(void)
{
acpi_handle handle;
acpi_status status;
u8 gpe;
for (gpe = 0;
gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
gpe++) {
if (acpi_masked_gpes & ((u64)1<<gpe)) {
status = acpi_get_gpe_device(gpe, &handle);
if (ACPI_SUCCESS(status)) {
pr_info("Masking GPE 0x%x.\n", gpe);
(void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
}
void acpi_irq_stats_init(void)
{
acpi_status status;

View File

@ -626,6 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
time_start = 0;
if (timed && runtime_pm)
time_start = ktime_get();

View File

@ -768,5 +768,5 @@ fail:
kfree(clks);
iounmap(base);
}
CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);

View File

@ -37,12 +37,14 @@
* @smstpcr: module stop control register
* @mstpsr: module stop status register (optional)
* @lock: protects writes to SMSTPCR
* @width_8bit: registers are 8-bit, not 32-bit
*/
struct mstp_clock_group {
struct clk_onecell_data data;
void __iomem *smstpcr;
void __iomem *mstpsr;
spinlock_t lock;
bool width_8bit;
};
/**
@ -59,6 +61,18 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
u32 __iomem *reg)
{
return group->width_8bit ? readb(reg) : clk_readl(reg);
}
static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
u32 __iomem *reg)
{
group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
}
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_lock_irqsave(&group->lock, flags);
value = clk_readl(group->smstpcr);
value = cpg_mstp_read(group, group->smstpcr);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
clk_writel(value, group->smstpcr);
cpg_mstp_write(group, value, group->smstpcr);
spin_unlock_irqrestore(&group->lock, flags);
@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
return 0;
for (i = 1000; i > 0; --i) {
if (!(clk_readl(group->mstpsr) & bitmask))
if (!(cpg_mstp_read(group, group->mstpsr) & bitmask))
break;
cpu_relax();
}
@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
u32 value;
if (group->mstpsr)
value = clk_readl(group->mstpsr);
value = cpg_mstp_read(group, group->mstpsr);
else
value = clk_readl(group->smstpcr);
value = cpg_mstp_read(group, group->smstpcr);
return !(value & BIT(clock->bit_index));
}
@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
return;
}
if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks"))
group->width_8bit = true;
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);

View File

@ -26,6 +26,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
{ .compatible = "apm,xgene-shadowcat", },
{ .compatible = "arm,integrator-ap", },
{ .compatible = "arm,integrator-cp", },

View File

@ -857,13 +857,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
struct perf_limits *perf_limits = limits;
u64 value, cap;
for_each_cpu(cpu, cpumask) {
for_each_cpu(cpu, policy->cpus) {
int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@ -949,7 +949,7 @@ skip_epp:
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
{
if (hwp_active)
intel_pstate_hwp_set(policy->cpus);
intel_pstate_hwp_set(policy);
return 0;
}
@ -968,19 +968,28 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
int ret;
if (!hwp_active)
return 0;
mutex_lock(&intel_pstate_limits_lock);
all_cpu_data[policy->cpu]->epp_policy = 0;
return intel_pstate_hwp_set_policy(policy);
ret = intel_pstate_hwp_set_policy(policy);
mutex_unlock(&intel_pstate_limits_lock);
return ret;
}
static void intel_pstate_hwp_set_online_cpus(void)
static void intel_pstate_update_policies(void)
{
get_online_cpus();
intel_pstate_hwp_set(cpu_online_mask);
put_online_cpus();
int cpu;
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
}
/************************** debugfs begin ************************/
@ -1018,10 +1027,6 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
if (hwp_active ||
pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
return;
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@ -1105,11 +1110,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
return count;
}
@ -1134,11 +1138,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
return count;
}
@ -1163,11 +1166,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
return count;
}
@ -2153,8 +2155,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
if (per_cpu_limits)
perf_limits = cpu->perf_limits;
mutex_lock(&intel_pstate_limits_lock);
intel_pstate_update_perf_limits(policy, perf_limits);
mutex_unlock(&intel_pstate_limits_lock);
return 0;
}
@ -2487,7 +2493,10 @@ hwp_cpu_matched:
if (rc)
goto out;
if (intel_pstate_driver == &intel_pstate && !hwp_active &&
pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
if (hwp_active)

View File

@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
list_add(&devfreq->node, &devfreq_list);
governor = find_devfreq_governor(devfreq->governor_name);
if (!IS_ERR(governor))
if (IS_ERR(governor)) {
dev_err(dev, "%s: Unable to find governor for the device\n",
__func__);
err = PTR_ERR(governor);
goto err_init;
}
devfreq->governor = governor;
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
NULL);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);

View File

@ -497,7 +497,7 @@ passive:
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
ret = -EPROBE_DEFER;
ret = PTR_ERR(bus->devfreq);
goto err;
}

View File

@ -721,11 +721,17 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
if (!ret)
if (ret)
return ret;
if (scpi_info->is_legacy)
/* only 32-bits supported, hi_val can be junk */
*val = le32_to_cpu(buf.lo_val);
else
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
le32_to_cpu(buf.lo_val);
return ret;
return 0;
}
static int scpi_device_get_power_state(u16 dev_id)

View File

@ -270,8 +270,7 @@ static int suspend_test_thread(void *arg)
struct cpuidle_device *dev;
struct cpuidle_driver *drv;
/* No need for an actual callback, we just want to wake up the CPU. */
struct timer_list wakeup_timer =
TIMER_INITIALIZER(dummy_callback, 0, 0);
struct timer_list wakeup_timer;
/* Wait for the main thread to give the start signal. */
wait_for_completion(&suspend_threads_started);
@ -287,6 +286,7 @@ static int suspend_test_thread(void *arg)
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*

View File

@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return ret;
}
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0;
}
@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL;
/* First check if it's PCI_COMMAND */

View File

@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head);
intel_vgpu_reset_ggtt(vgpu);
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page_addr;
gvt_dbg_core("init gtt\n");
@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
gvt->gtt.scratch_ggtt_page =
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
if (!gvt->gtt.scratch_ggtt_page) {
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
gvt->gtt.scratch_ggtt_mfn =
intel_gvt_hypervisor_virt_to_mfn(page_addr);
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate scratch ggtt page\n");
__free_page(gvt->gtt.scratch_ggtt_page);
return -EFAULT;
}
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
__free_page(gvt->gtt.scratch_ggtt_page);
if (enable_out_of_sync)
clean_spt_oos(gvt);
}
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries.
*
*/
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
u32 index;
u32 offset;
u32 num_entries;
struct intel_gvt_gtt_entry e;
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
e.val64 |= _PAGE_PRESENT;
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
}

View File

@ -81,6 +81,9 @@ struct intel_gvt_gtt {
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
struct page *scratch_ggtt_page;
unsigned long scratch_ggtt_mfn;
};
enum {
@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);

View File

@ -175,6 +175,7 @@ struct intel_vgpu {
struct notifier_block group_notifier;
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
} vdev;
#endif
};

View File

@ -114,12 +114,15 @@ out:
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock);
entry = __gvt_cache_find(vgpu, gfn);
mutex_unlock(&vgpu->vdev.cache_lock);
return entry == NULL ? 0 : entry->pfn;
entry = __gvt_cache_find(vgpu, gfn);
pfn = (entry == NULL) ? 0 : entry->pfn;
mutex_unlock(&vgpu->vdev.cache_lock);
return pfn;
}
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct device *dev = &vgpu->vdev.mdev->dev;
struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
unsigned long g1;
int rc;
@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
struct device *dev = &vgpu->vdev.mdev->dev;
struct device *dev = mdev_dev(vgpu->vdev.mdev);
unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock);
@ -396,7 +399,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
struct device *pdev;
void *gvt;
pdev = mdev->parent->dev;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
@ -418,7 +421,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(&mdev->dev));
dev_name(mdev_dev(mdev)));
return 0;
}
@ -482,7 +485,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier);
if (ret != 0) {
gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
@ -490,17 +493,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier);
if (ret != 0) {
gvt_err("vfio_register_notifier for group failed: %d\n", ret);
goto undo_iommu;
}
return kvmgt_guest_init(mdev);
ret = kvmgt_guest_init(mdev);
if (ret)
goto undo_group;
atomic_set(&vgpu->vdev.released, 0);
return ret;
undo_group:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vgpu->vdev.group_notifier);
undo_iommu:
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
out:
return ret;
@ -509,17 +521,26 @@ out:
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
return;
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
&vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
vgpu->vdev.kvm = NULL;
vgpu->handle = 0;
}
@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
{
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.release_work);
__intel_vgpu_release(vgpu);
}
@ -1089,7 +1111,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return 0;
}
static const struct parent_ops intel_vgpu_ops = {
static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
srcu_read_unlock(&kvm->srcu, idx);
return -EINVAL;
}
spin_lock(&kvm->mmu_lock);
@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
srcu_read_unlock(&kvm->srcu, idx);
return -EINVAL;
}
spin_lock(&kvm->mmu_lock);
@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
struct intel_vgpu *vgpu;
if (!info) {
gvt_err("kvmgt_guest_info invalid\n");
return false;
}
vgpu = info->vgpu;
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvmgt_protect_table_destroy(info);
gvt_cache_destroy(vgpu);
gvt_cache_destroy(info->vgpu);
vfree(info);
return true;
@ -1372,7 +1398,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return pfn;
pfn = INTEL_GVT_INVALID_ADDR;
dev = &info->vgpu->vdev.mdev->dev;
dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);

View File

@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n");

View File

@ -244,14 +244,16 @@ err_phys:
static void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages)
struct sg_table *pages,
bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
drm_clflush_sg(pages);
@ -263,7 +265,7 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
__i915_gem_object_release_shmem(obj, pages);
__i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
__i915_gem_object_release_shmem(obj, pages);
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@ -2304,15 +2306,6 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
static unsigned int swiotlb_max_size(void)
{
#if IS_ENABLED(CONFIG_SWIOTLB)
return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
return 0;
#endif
}
static void i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
@ -2322,7 +2315,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
if (orig_st->nents == orig_st->orig_nents)
return;
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
return;
new_sg = new_st.sgl;
@ -2360,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size();
max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
@ -2728,6 +2721,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
struct drm_i915_gem_request *request;
struct i915_gem_context *incomplete_ctx;
struct intel_timeline *timeline;
unsigned long flags;
bool ring_hung;
if (engine->irq_seqno_barrier)
@ -2763,13 +2757,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
if (i915_gem_context_is_default(incomplete_ctx))
return;
timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
spin_lock_irqsave(&engine->timeline->lock, flags);
spin_lock(&timeline->lock);
list_for_each_entry_continue(request, &engine->timeline->requests, link)
if (request->ctx == incomplete_ctx)
reset_request(request);
timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
list_for_each_entry(request, &timeline->requests, link)
reset_request(request);
spin_unlock(&timeline->lock);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
void i915_gem_reset(struct drm_i915_private *dev_priv)

View File

@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
rcu_assign_pointer(active->request, request);
}
/**
* i915_gem_active_set_retire_fn - updates the retirement callback
* @active - the active tracker
* @fn - the routine called when the request is retired
* @mutex - struct_mutex used to guard retirements
*
* i915_gem_active_set_retire_fn() updates the function pointer that
* is called when the final request associated with the @active tracker
* is retired.
*/
static inline void
i915_gem_active_set_retire_fn(struct i915_gem_active *active,
i915_gem_retire_fn fn,
struct mutex *mutex)
{
lockdep_assert_held(mutex);
active->retire = fn ?: i915_gem_retire_noop;
}
static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active)
{

View File

@ -16791,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state = crtc->config;
int pixclk = 0;
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@ -16803,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.enabled = crtc_state->base.enable;
crtc->active = crtc_state->base.active;
if (crtc_state->base.active) {
if (crtc_state->base.active)
dev_priv->active_crtcs |= 1 << crtc->pipe;
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc_state);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@ -16892,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
int pixclk = 0;
crtc->base.hwmode = crtc->config->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@ -16919,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc->config);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc->config->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}

View File

@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp);
struct intel_dp *intel_dp,
bool force_disable_vdd);
static void
intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
/*
* Even vdd force doesn't work until we've made
@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
* Only the HW needs to be reprogrammed, the SW state is fixed and
* has been setup during connector init.
*/
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
return 0;
}
@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
port_name(port), pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp)
struct intel_dp *intel_dp,
bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
intel_pps_get_registers(dev_priv, intel_dp, &regs);
/*
* On some VLV machines the BIOS can leave the VDD
* enabled even on power seqeuencers which aren't
* hooked up to any port. This would mess up the
* power domain tracking the first time we pick
* one of these power sequencers for use since
* edp_panel_vdd_on() would notice that the VDD was
* already on and therefore wouldn't grab the power
* domain reference. Disable VDD first to avoid this.
* This also avoids spuriously turning the VDD on as
* soon as the new power seqeuencer gets initialized.
*/
if (force_disable_vdd) {
u32 pp = ironlake_get_pp_control(intel_dp);
WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
if (pp & EDP_FORCE_VDD)
DRM_DEBUG_KMS("VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
I915_WRITE(regs.pp_ctrl, pp);
}
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
vlv_initial_power_sequencer_setup(intel_dp);
} else {
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
}
}

View File

@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
{
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
&overlay->i915->drm.struct_mutex));
overlay->last_flip.retire = retire;
i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
&overlay->i915->drm.struct_mutex);
i915_gem_active_set(&overlay->last_flip, req);
i915_add_request(req);
}
@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
i915_gem_track_fb(overlay->vma->obj, new_bo,
INTEL_FRONTBUFFER_OVERLAY(pipe));
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
overlay->vma = vma;
@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
init_request_active(&overlay->last_flip, NULL);
regs = intel_overlay_map_regs(overlay);
if (!regs)
goto out_unpin_bo;

View File

@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = {
};
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 };
static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };

View File

@ -353,12 +353,12 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
[0] = {
.num = ST_ACCEL_FS_AVL_2G,
.value = 0x00,
.gain = IIO_G_TO_M_S_2(1024),
.gain = IIO_G_TO_M_S_2(1000),
},
[1] = {
.num = ST_ACCEL_FS_AVL_6G,
.value = 0x01,
.gain = IIO_G_TO_M_S_2(340),
.gain = IIO_G_TO_M_S_2(3000),
},
},
},
@ -366,6 +366,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = 0x21,
.mask = 0x40,
},
/*
* Data Alignment Setting - needs to be set to get
* left-justified data like all other sensors.
*/
.das = {
.addr = 0x21,
.mask = 0x01,
},
.drdy_irq = {
.addr = 0x21,
.mask_int1 = 0x04,

View File

@ -561,7 +561,7 @@ config TI_ADS8688
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
depends on MFD_TI_AM335X_TSCADC
depends on MFD_TI_AM335X_TSCADC && HAS_DMA
select IIO_BUFFER
select IIO_KFIFO_BUF
help

View File

@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
const struct iio_chan_spec *channel = &indio_dev->channels[i];
unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
unsigned int bytes_to_read =
DIV_ROUND_UP(channel->scan_type.realbits +
channel->scan_type.shift, 8);
unsigned int storage_bytes =
channel->scan_type.storagebits >> 3;

Some files were not shown because too many files have changed in this diff Show More