Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Simple overlapping changes in bpf land wrt. bpf_helper_defs.h handling. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
31d518f35e
|
@ -24,19 +24,16 @@ The wrapper can be run with:
|
|||
For more information on this wrapper (also called kunit_tool) checkout the
|
||||
:doc:`kunit-tool` page.
|
||||
|
||||
Creating a kunitconfig
|
||||
======================
|
||||
Creating a .kunitconfig
|
||||
=======================
|
||||
The Python script is a thin wrapper around Kbuild. As such, it needs to be
|
||||
configured with a ``kunitconfig`` file. This file essentially contains the
|
||||
configured with a ``.kunitconfig`` file. This file essentially contains the
|
||||
regular Kernel config, with the specific test targets as well.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
|
||||
cd $PATH_TO_LINUX_REPO
|
||||
ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
|
||||
|
||||
You may want to add kunitconfig to your local gitignore.
|
||||
cp arch/um/configs/kunit_defconfig .kunitconfig
|
||||
|
||||
Verifying KUnit Works
|
||||
---------------------
|
||||
|
@ -151,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
|
|||
|
||||
obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
|
||||
|
||||
Now add it to your ``kunitconfig``:
|
||||
Now add it to your ``.kunitconfig``:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
|
|
|
@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules
|
|||
against this restriction and errors out when appropriate. Schedule analysis is
|
||||
needed to avoid this, which is outside the scope of the document.
|
||||
|
||||
At the moment, the time-aware scheduler can only be triggered based on a
|
||||
standalone clock and not based on PTP time. This means the base-time argument
|
||||
from tc-taprio is ignored and the schedule starts right away. It also means it
|
||||
is more difficult to phase-align the scheduler with the other devices in the
|
||||
network.
|
||||
|
||||
Device Tree bindings and board design
|
||||
=====================================
|
||||
|
||||
|
|
|
@ -771,6 +771,8 @@ F: drivers/thermal/thermal_mmio.c
|
|||
|
||||
AMAZON ETHERNET DRIVERS
|
||||
M: Netanel Belgazal <netanel@amazon.com>
|
||||
M: Arthur Kiyanovski <akiyano@amazon.com>
|
||||
R: Guy Tzalik <gtzalik@amazon.com>
|
||||
R: Saeed Bishara <saeedb@amazon.com>
|
||||
R: Zorik Machulsky <zorik@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -7034,6 +7036,7 @@ L: linux-acpi@vger.kernel.org
|
|||
S: Maintained
|
||||
F: Documentation/firmware-guide/acpi/gpio-properties.rst
|
||||
F: drivers/gpio/gpiolib-acpi.c
|
||||
F: drivers/gpio/gpiolib-acpi.h
|
||||
|
||||
GPIO IR Transmitter
|
||||
M: Sean Young <sean@mess.org>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -246,6 +246,7 @@ check_syscall_nr:
|
|||
*/
|
||||
li t1, -1
|
||||
beq a7, t1, ret_from_syscall_rejected
|
||||
blt a7, t1, 1f
|
||||
/* Call syscall */
|
||||
la s0, sys_call_table
|
||||
slli t0, a7, RISCV_LGPTR
|
||||
|
|
|
@ -9,8 +9,5 @@
|
|||
/*
|
||||
* Assembly functions that may be used (directly or indirectly) by modules
|
||||
*/
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(__asm_copy_to_user);
|
||||
EXPORT_SYMBOL(__asm_copy_from_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm-generic/export.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user)
|
|||
j 3b
|
||||
ENDPROC(__asm_copy_to_user)
|
||||
ENDPROC(__asm_copy_from_user)
|
||||
EXPORT_SYMBOL(__asm_copy_to_user)
|
||||
EXPORT_SYMBOL(__asm_copy_from_user)
|
||||
|
||||
|
||||
ENTRY(__clear_user)
|
||||
|
@ -108,6 +111,7 @@ ENTRY(__clear_user)
|
|||
bltu a0, a3, 5b
|
||||
j 3b
|
||||
ENDPROC(__clear_user)
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.balign 4
|
||||
|
|
|
@ -22,6 +22,7 @@ void flush_icache_all(void)
|
|||
else
|
||||
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_all);
|
||||
|
||||
/*
|
||||
* Performs an icache flush for the given MM context. RISC-V has no direct
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/compat.h>
|
||||
#include <linux/elevator.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/pr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
* but we call blkdev_ioctl, which gets the lock for us
|
||||
*/
|
||||
case BLKRRPART:
|
||||
case BLKREPORTZONE:
|
||||
case BLKRESETZONE:
|
||||
case BLKOPENZONE:
|
||||
case BLKCLOSEZONE:
|
||||
case BLKFINISHZONE:
|
||||
case BLKGETZONESZ:
|
||||
case BLKGETNRZONES:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
case BLKBSZSET_32:
|
||||
|
@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
case BLKTRACETEARDOWN: /* compatible */
|
||||
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
|
||||
return ret;
|
||||
case IOC_PR_REGISTER:
|
||||
case IOC_PR_RESERVE:
|
||||
case IOC_PR_RELEASE:
|
||||
case IOC_PR_PREEMPT:
|
||||
case IOC_PR_PREEMPT_ABORT:
|
||||
case IOC_PR_CLEAR:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
default:
|
||||
if (disk->fops->compat_ioctl)
|
||||
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
|
||||
|
|
|
@ -76,8 +76,7 @@ enum brcm_ahci_version {
|
|||
};
|
||||
|
||||
enum brcm_ahci_quirks {
|
||||
BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
|
||||
};
|
||||
|
||||
struct brcm_ahci_priv {
|
||||
|
@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
|
|||
brcm_sata_phy_disable(priv, i);
|
||||
}
|
||||
|
||||
static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
||||
static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
|
||||
struct brcm_ahci_priv *priv)
|
||||
{
|
||||
void __iomem *ahci;
|
||||
struct resource *res;
|
||||
u32 impl;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
|
||||
ahci = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ahci))
|
||||
return 0;
|
||||
|
||||
impl = readl(ahci + HOST_PORTS_IMPL);
|
||||
impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
|
||||
|
||||
if (fls(impl) > SATA_TOP_MAX_PHYS)
|
||||
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
|
||||
|
@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
|||
else if (!impl)
|
||||
dev_info(priv->dev, "no ports found\n");
|
||||
|
||||
devm_iounmap(&pdev->dev, ahci);
|
||||
devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
|
||||
|
||||
return impl;
|
||||
}
|
||||
|
||||
|
@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
|
|||
/* Perform the SATA PHY reset sequence */
|
||||
brcm_sata_phy_disable(priv, ap->port_no);
|
||||
|
||||
/* Reset the SATA clock */
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
ahci_platform_enable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
/* Bring the PHY back on */
|
||||
brcm_sata_phy_enable(priv, ap->port_no);
|
||||
|
||||
|
@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
ret = ahci_platform_suspend(dev);
|
||||
brcm_sata_phys_disable(priv);
|
||||
return ret;
|
||||
|
||||
return ahci_platform_suspend(dev);
|
||||
}
|
||||
|
||||
static int brcm_ahci_resume(struct device *dev)
|
||||
|
@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
/* Make sure clocks are turned on before re-configuration */
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_init(priv);
|
||||
brcm_sata_phys_enable(priv);
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
return ahci_platform_resume(dev);
|
||||
|
||||
/* Since we had to enable clocks earlier on, we cannot use
|
||||
* ahci_platform_resume() as-is since a second call to
|
||||
* ahci_platform_enable_resources() would bump up the resources
|
||||
* (regulators, clocks, PHYs) count artificially so we copy the part
|
||||
* after ahci_platform_enable_resources().
|
||||
*/
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_resume_host(dev);
|
||||
if (ret)
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
/* We resumed so update PM runtime state */
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
|
|||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_deassert(priv->rcdev);
|
||||
|
||||
if ((priv->version == BRCM_SATA_BCM7425) ||
|
||||
(priv->version == BRCM_SATA_NSP)) {
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv)) {
|
||||
ret = PTR_ERR(hpriv);
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
|
||||
switch (priv->version) {
|
||||
case BRCM_SATA_BCM7425:
|
||||
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
|
||||
/* fall through */
|
||||
case BRCM_SATA_NSP:
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
goto out_reset;
|
||||
|
||||
/* Must be first so as to configure endianness including that
|
||||
* of the standard AHCI register space.
|
||||
*/
|
||||
brcm_sata_init(priv);
|
||||
|
||||
priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
|
||||
if (!priv->port_mask)
|
||||
return -ENODEV;
|
||||
/* Initializes priv->port_mask which is used below */
|
||||
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
|
||||
if (!priv->port_mask) {
|
||||
ret = -ENODEV;
|
||||
goto out_disable_clks;
|
||||
}
|
||||
|
||||
/* Must be done before ahci_platform_enable_phys() */
|
||||
brcm_sata_phys_enable(priv);
|
||||
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv))
|
||||
return PTR_ERR(hpriv);
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
|
||||
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
|
||||
ret = ahci_platform_enable_resources(hpriv);
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
|
||||
&ahci_platform_sht);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
out_disable_clks:
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
out_reset:
|
||||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_assert(priv->rcdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int brcm_ahci_remove(struct platform_device *pdev)
|
||||
|
@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
|
|||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
ret = ata_platform_remove_one(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
|
|||
* RETURNS:
|
||||
* 0 on success otherwise a negative error code
|
||||
*/
|
||||
static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
|
@ -74,6 +74,7 @@ disable_phys:
|
|||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_disable_phys - Disable PHYs
|
||||
|
@ -81,7 +82,7 @@ disable_phys:
|
|||
*
|
||||
* This function disables all PHYs found in hpriv->phys.
|
||||
*/
|
||||
static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
|||
phy_exit(hpriv->phys[i]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_enable_clks - Enable platform clocks
|
||||
|
|
|
@ -5328,6 +5328,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_qc_get_active - get bitmask of active qcs
|
||||
* @ap: port in question
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*
|
||||
* RETURNS:
|
||||
* Bitmask of active qcs
|
||||
*/
|
||||
u64 ata_qc_get_active(struct ata_port *ap)
|
||||
{
|
||||
u64 qc_active = ap->qc_active;
|
||||
|
||||
/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
|
||||
if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
|
||||
qc_active |= (1 << 0);
|
||||
qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
|
||||
}
|
||||
|
||||
return qc_active;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_qc_get_active);
|
||||
|
||||
/**
|
||||
* ata_qc_complete_multiple - Complete multiple qcs successfully
|
||||
* @ap: port in question
|
||||
|
|
|
@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
|
|||
i, ioread32(hcr_base + CC),
|
||||
ioread32(hcr_base + CA));
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
return;
|
||||
|
||||
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
|
||||
|
|
|
@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
|
|||
}
|
||||
|
||||
if (work_done) {
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
|
||||
/* Update the software queue position index in hardware */
|
||||
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
|
||||
|
|
|
@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
check_commands = 0;
|
||||
check_commands &= ~(1 << pos);
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
|
|||
.release = pkt_close,
|
||||
.ioctl = pkt_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.ioctl = pkt_compat_ioctl,
|
||||
.compat_ioctl = pkt_compat_ioctl,
|
||||
#endif
|
||||
.check_events = pkt_check_events,
|
||||
};
|
||||
|
|
|
@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ
|
|||
select DEVFREQ_GOV_PASSIVE
|
||||
select DEVFREQ_EVENT_EXYNOS_PPMU
|
||||
select PM_DEVFREQ_EVENT
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
|
||||
Memory bus has one more group of memory bus (e.g, MIF and INT block).
|
||||
|
@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ
|
|||
ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
|
||||
ARCH_TEGRA_210_SOC || \
|
||||
COMPILE_TEST
|
||||
select PM_OPP
|
||||
depends on COMMON_CLK
|
||||
help
|
||||
This adds the DEVFREQ driver for the Tegra family of SoCs.
|
||||
It reads ACTMON counters of memory controllers and adjusts the
|
||||
|
@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ
|
|||
depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
|
||||
depends on COMMON_CLK
|
||||
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the DEVFREQ driver for the Tegra20 family of SoCs.
|
||||
It reads Memory Controller counters and adjusts the operating
|
||||
|
@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ
|
|||
select DEVFREQ_EVENT_ROCKCHIP_DFI
|
||||
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
select PM_DEVFREQ_EVENT
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
|
||||
It sets the frequency for the memory controller and reads the usage counts
|
||||
|
|
|
@ -553,8 +553,8 @@ config GPIO_TEGRA
|
|||
|
||||
config GPIO_TEGRA186
|
||||
tristate "NVIDIA Tegra186 GPIO support"
|
||||
default ARCH_TEGRA_186_SOC
|
||||
depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
|
||||
default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
|
||||
depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
|
||||
depends on OF_GPIO
|
||||
select GPIOLIB_IRQCHIP
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
|
|
@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
|
|||
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
|
||||
default:
|
||||
/* acturally if code runs to here, it's an error case */
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
|
|||
int direction;
|
||||
|
||||
mutex_lock(&chip->lock);
|
||||
direction = !chip->lines[offset].dir;
|
||||
direction = chip->lines[offset].dir;
|
||||
mutex_unlock(&chip->lock);
|
||||
|
||||
return direction;
|
||||
|
@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
|
|||
struct gpio_chip *gc;
|
||||
struct device *dev;
|
||||
const char *name;
|
||||
int rv, base;
|
||||
int rv, base, i;
|
||||
u16 ngpio;
|
||||
|
||||
dev = &pdev->dev;
|
||||
|
@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
|
|||
if (!chip->lines)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < gc->ngpio; i++)
|
||||
chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
|
||||
|
||||
if (device_property_read_bool(dev, "named-gpio-lines")) {
|
||||
rv = gpio_mockup_name_lines(dev, chip);
|
||||
if (rv)
|
||||
|
|
|
@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
|
||||
gc = &mpc8xxx_gc->gc;
|
||||
gc->parent = &pdev->dev;
|
||||
|
||||
if (of_property_read_bool(np, "little-endian")) {
|
||||
ret = bgpio_init(gc, &pdev->dev, 4,
|
||||
|
|
|
@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
|
|||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
|
||||
clear_bit(hwirq, chip->irq_mask);
|
||||
}
|
||||
|
||||
static void pca953x_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
|
||||
set_bit(hwirq, chip->irq_mask);
|
||||
}
|
||||
|
||||
static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
|
||||
|
@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
|
|||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
||||
int bank_nb = d->hwirq / BANK_SZ;
|
||||
u8 mask = BIT(d->hwirq % BANK_SZ);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
|
||||
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
|
||||
|
@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_FALLING)
|
||||
chip->irq_trig_fall[bank_nb] |= mask;
|
||||
else
|
||||
chip->irq_trig_fall[bank_nb] &= ~mask;
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_RISING)
|
||||
chip->irq_trig_raise[bank_nb] |= mask;
|
||||
else
|
||||
chip->irq_trig_raise[bank_nb] &= ~mask;
|
||||
assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
|
||||
assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
|
|||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
||||
u8 mask = BIT(d->hwirq % BANK_SZ);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
|
||||
chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
|
||||
clear_bit(hwirq, chip->irq_trig_raise);
|
||||
clear_bit(hwirq, chip->irq_trig_fall);
|
||||
}
|
||||
|
||||
static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
|
||||
|
|
|
@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __exit iproc_gpio_remove(struct platform_device *pdev)
|
||||
static int iproc_gpio_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct iproc_gpio_chip *chip;
|
||||
|
||||
|
|
|
@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
|
|||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
RSR_CPENABLE(*cpenable);
|
||||
WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
|
||||
|
||||
*cpenable = xtensa_get_sr(cpenable);
|
||||
xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void disable_cp(unsigned long flags, unsigned long cpenable)
|
||||
{
|
||||
WSR_CPENABLE(cpenable);
|
||||
xtensa_set_sr(cpenable, cpenable);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
|
|||
chip = gpiod_to_chip(desc);
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
|
||||
/*
|
||||
* Open drain emulation using input mode may incorrectly report
|
||||
* input here, fix that up.
|
||||
*/
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
|
||||
test_bit(FLAG_IS_OUT, &desc->flags))
|
||||
return 0;
|
||||
|
||||
if (!chip->get_direction)
|
||||
return -ENOTSUPP;
|
||||
|
||||
|
@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
|
|||
|
||||
if (chip->ngpio <= p->chip_hwnum) {
|
||||
dev_err(dev,
|
||||
"requested GPIO %d is out of range [0..%d] for chip %s\n",
|
||||
idx, chip->ngpio, chip->label);
|
||||
"requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
|
||||
idx, p->chip_hwnum, chip->ngpio - 1,
|
||||
chip->label);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
|
|
@ -15112,7 +15112,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
|
|||
return ret;
|
||||
|
||||
fb_obj_bump_render_priority(obj);
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
|
||||
|
||||
if (!new_plane_state->base.fence) { /* implicit fencing */
|
||||
struct dma_fence *fence;
|
||||
|
|
|
@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
|
|||
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->frontbuffer = NULL;
|
||||
RCU_INIT_POINTER(obj->frontbuffer, NULL);
|
||||
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
kfree(front);
|
||||
kfree_rcu(front, rcu);
|
||||
}
|
||||
|
||||
struct intel_frontbuffer *
|
||||
|
@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
|
|||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
front = obj->frontbuffer;
|
||||
if (front)
|
||||
kref_get(&front->ref);
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
front = __intel_frontbuffer_get(obj);
|
||||
if (front)
|
||||
return front;
|
||||
|
||||
|
@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
|
|||
i915_active_may_sleep(frontbuffer_retire));
|
||||
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
if (obj->frontbuffer) {
|
||||
if (rcu_access_pointer(obj->frontbuffer)) {
|
||||
kfree(front);
|
||||
front = obj->frontbuffer;
|
||||
front = rcu_dereference_protected(obj->frontbuffer, true);
|
||||
kref_get(&front->ref);
|
||||
} else {
|
||||
i915_gem_object_get(obj);
|
||||
obj->frontbuffer = front;
|
||||
rcu_assign_pointer(obj->frontbuffer, front);
|
||||
}
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
|
||||
|
|
|
@ -27,10 +27,10 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include "gem/i915_gem_object_types.h"
|
||||
#include "i915_active.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
enum fb_op_origin {
|
||||
ORIGIN_GTT,
|
||||
|
@ -45,6 +45,7 @@ struct intel_frontbuffer {
|
|||
atomic_t bits;
|
||||
struct i915_active write;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
|
||||
|
@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
|
|||
void intel_frontbuffer_flip(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
void intel_frontbuffer_put(struct intel_frontbuffer *front);
|
||||
|
||||
static inline struct intel_frontbuffer *
|
||||
__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
if (likely(!rcu_access_pointer(obj->frontbuffer)))
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
front = rcu_dereference(obj->frontbuffer);
|
||||
if (!front)
|
||||
break;
|
||||
|
||||
if (unlikely(!kref_get_unless_zero(&front->ref)))
|
||||
continue;
|
||||
|
||||
if (likely(front == rcu_access_pointer(obj->frontbuffer)))
|
||||
break;
|
||||
|
||||
intel_frontbuffer_put(front);
|
||||
} while (1);
|
||||
rcu_read_unlock();
|
||||
|
||||
return front;
|
||||
}
|
||||
|
||||
struct intel_frontbuffer *
|
||||
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
|
||||
|
||||
|
@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
|
|||
struct intel_frontbuffer *new,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
void intel_frontbuffer_put(struct intel_frontbuffer *front);
|
||||
|
||||
#endif /* __INTEL_FRONTBUFFER_H__ */
|
||||
|
|
|
@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
|||
struct i915_vma *vma)
|
||||
{
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
struct intel_frontbuffer *from = NULL, *to = NULL;
|
||||
|
||||
WARN_ON(overlay->old_vma);
|
||||
|
||||
intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
|
||||
vma ? vma->obj->frontbuffer : NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
if (overlay->vma)
|
||||
from = intel_frontbuffer_get(overlay->vma->obj);
|
||||
if (vma)
|
||||
to = intel_frontbuffer_get(vma->obj);
|
||||
|
||||
intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
if (to)
|
||||
intel_frontbuffer_put(to);
|
||||
if (from)
|
||||
intel_frontbuffer_put(from);
|
||||
|
||||
intel_frontbuffer_flip_prepare(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
ret = PTR_ERR(vma);
|
||||
goto out_pin_section;
|
||||
}
|
||||
intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
|
||||
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
|
||||
|
||||
if (!overlay->active) {
|
||||
u32 oconfig;
|
||||
|
|
|
@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
drm_clflush_sg(obj->mm.pages);
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
||||
}
|
||||
|
||||
static int clflush_work(struct dma_fence_work *base)
|
||||
|
|
|
@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
i915_gem_object_unlock(obj);
|
||||
|
||||
if (write_domain)
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
out:
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
|
||||
obj->mm.dirty = true;
|
||||
/* return with the pages pinned */
|
||||
return 0;
|
||||
|
|
|
@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
|
|||
for_each_ggtt_vma(vma, obj)
|
||||
intel_gt_flush_ggtt_writes(vma->vm->gt);
|
||||
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
||||
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
if (vma->iomap)
|
||||
|
@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
|
|||
obj->write_domain = 0;
|
||||
}
|
||||
|
||||
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
front = __intel_frontbuffer_get(obj);
|
||||
if (front) {
|
||||
intel_frontbuffer_flush(front, origin);
|
||||
intel_frontbuffer_put(front);
|
||||
}
|
||||
}
|
||||
|
||||
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
front = __intel_frontbuffer_get(obj);
|
||||
if (front) {
|
||||
intel_frontbuffer_invalidate(front, origin);
|
||||
intel_frontbuffer_put(front);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_init__objects(struct drm_i915_private *i915)
|
||||
{
|
||||
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "display/intel_frontbuffer.h"
|
||||
#include "i915_gem_object_types.h"
|
||||
|
||||
#include "i915_gem_gtt.h"
|
||||
|
||||
void i915_gem_init__objects(struct drm_i915_private *i915);
|
||||
|
@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
|
|||
unsigned int flags,
|
||||
const struct i915_sched_attr *attr);
|
||||
|
||||
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin);
|
||||
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin);
|
||||
|
||||
static inline void
|
||||
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
|
||||
__i915_gem_object_flush_frontbuffer(obj, origin);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
|
||||
__i915_gem_object_invalidate_frontbuffer(obj, origin);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -150,7 +150,7 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
u16 write_domain;
|
||||
|
||||
struct intel_frontbuffer *frontbuffer;
|
||||
struct intel_frontbuffer __rcu *frontbuffer;
|
||||
|
||||
/** Current tiling stride for the object, if it's tiled. */
|
||||
unsigned int tiling_and_stride;
|
||||
|
|
|
@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf)
|
|||
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
/* Defer dropping the display power well for 100ms, it's slow! */
|
||||
GEM_BUG_ON(!wakeref);
|
||||
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
|
||||
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
|
||||
|
||||
i915_globals_park();
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
* We manually control the domain here and pretend that it
|
||||
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
|
||||
*/
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
|
||||
|
||||
if (copy_from_user(vaddr, user_data, args->size))
|
||||
return -EFAULT;
|
||||
|
@ -169,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||
drm_clflush_virt_range(vaddr, args->size);
|
||||
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
|
||||
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -589,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
goto out_unpin;
|
||||
}
|
||||
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
|
||||
|
||||
user_data = u64_to_user_ptr(args->data_ptr);
|
||||
offset = args->offset;
|
||||
|
@ -631,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||
user_data += page_length;
|
||||
offset += page_length;
|
||||
}
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
||||
|
||||
i915_gem_object_unlock_fence(obj, fence);
|
||||
out_unpin:
|
||||
|
@ -721,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
|
|||
offset = 0;
|
||||
}
|
||||
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
||||
i915_gem_object_unlock_fence(obj, fence);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
|
|||
return ktime_to_ns(ktime_sub(ktime_get(), kt));
|
||||
}
|
||||
|
||||
static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* We think we are runtime suspended.
|
||||
*
|
||||
* Report the delta from when the device was suspended to now,
|
||||
* on top of the last known real value, as the approximated RC6
|
||||
* counter value.
|
||||
*/
|
||||
val = ktime_since(pmu->sleep_last);
|
||||
val += pmu->sample[__I915_SAMPLE_RC6].cur;
|
||||
|
||||
pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
|
||||
{
|
||||
/*
|
||||
* If we are coming back from being runtime suspended we must
|
||||
* be careful not to report a larger value than returned
|
||||
* previously.
|
||||
*/
|
||||
if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
|
||||
pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = val;
|
||||
} else {
|
||||
val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static u64 get_rc6(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
unsigned long flags;
|
||||
bool awake = false;
|
||||
u64 val;
|
||||
|
||||
val = 0;
|
||||
if (intel_gt_pm_get_if_awake(gt)) {
|
||||
val = __get_rc6(gt);
|
||||
intel_gt_pm_put_async(gt);
|
||||
awake = true;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
|
||||
if (val)
|
||||
val = __pmu_update_rc6(pmu, val);
|
||||
if (awake) {
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = val;
|
||||
} else {
|
||||
/*
|
||||
* We think we are runtime suspended.
|
||||
*
|
||||
* Report the delta from when the device was suspended to now,
|
||||
* on top of the last known real value, as the approximated RC6
|
||||
* counter value.
|
||||
*/
|
||||
val = ktime_since(pmu->sleep_last);
|
||||
val += pmu->sample[__I915_SAMPLE_RC6].cur;
|
||||
}
|
||||
|
||||
if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
|
||||
val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
|
||||
else
|
||||
val = __pmu_estimate_rc6(pmu);
|
||||
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
|
||||
|
||||
spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
|
||||
|
@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
|
|||
struct i915_pmu *pmu = &i915->pmu;
|
||||
|
||||
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
|
||||
__pmu_update_rc6(pmu, __get_rc6(&i915->gt));
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
|
||||
|
||||
pmu->sleep_last = ktime_get();
|
||||
}
|
||||
|
||||
static void unpark_rc6(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
|
||||
/* Estimate how long we slept and accumulate that into rc6 counters */
|
||||
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
|
||||
__pmu_estimate_rc6(pmu);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static u64 get_rc6(struct intel_gt *gt)
|
||||
|
@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
|
|||
}
|
||||
|
||||
static void park_rc6(struct drm_i915_private *i915) {}
|
||||
static void unpark_rc6(struct drm_i915_private *i915) {}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
|
|||
*/
|
||||
__i915_pmu_maybe_start_timer(pmu);
|
||||
|
||||
unpark_rc6(i915);
|
||||
|
||||
spin_unlock_irq(&pmu->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ enum {
|
|||
__I915_SAMPLE_FREQ_ACT = 0,
|
||||
__I915_SAMPLE_FREQ_REQ,
|
||||
__I915_SAMPLE_RC6,
|
||||
__I915_SAMPLE_RC6_ESTIMATED,
|
||||
__I915_SAMPLE_RC6_LAST_REPORTED,
|
||||
__I915_NUM_PMU_SAMPLERS
|
||||
};
|
||||
|
||||
|
|
|
@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
|
|||
return err;
|
||||
|
||||
if (flags & EXEC_OBJECT_WRITE) {
|
||||
if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
|
||||
i915_active_add_request(&obj->frontbuffer->write, rq);
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
front = __intel_frontbuffer_get(obj);
|
||||
if (unlikely(front)) {
|
||||
if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
|
||||
i915_active_add_request(&front->write, rq);
|
||||
intel_frontbuffer_put(front);
|
||||
}
|
||||
|
||||
dma_resv_add_excl_fence(vma->resv, &rq->fence);
|
||||
obj->write_domain = I915_GEM_DOMAIN_RENDER;
|
||||
|
|
|
@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
||||
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
||||
|
||||
/* Locate the first rule available */
|
||||
if (fs->location == RX_CLS_LOC_ANY)
|
||||
|
@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
|
|||
|
||||
if (rule->fs.flow_type != fs->flow_type ||
|
||||
rule->fs.ring_cookie != fs->ring_cookie ||
|
||||
rule->fs.m_ext.data[0] != fs->m_ext.data[0])
|
||||
rule->fs.h_ext.data[0] != fs->h_ext.data[0])
|
||||
continue;
|
||||
|
||||
switch (fs->flow_type & ~FLOW_EXT) {
|
||||
|
@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ip_frag = be32_to_cpu(fs->m_ext.data[0]);
|
||||
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
|
||||
|
||||
layout = &udf_tcpip6_layout;
|
||||
slice_num = bcm_sf2_get_slice_number(layout, 0);
|
||||
|
|
|
@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
|||
|
||||
if (enabled) {
|
||||
/* Enable VLAN filtering. */
|
||||
tpid = ETH_P_8021AD;
|
||||
tpid2 = ETH_P_8021Q;
|
||||
tpid = ETH_P_8021Q;
|
||||
tpid2 = ETH_P_8021AD;
|
||||
} else {
|
||||
/* Disable VLAN filtering. */
|
||||
tpid = ETH_P_SJA1105;
|
||||
|
@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
|
|||
|
||||
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
|
||||
general_params = table->entries;
|
||||
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
||||
general_params->tpid = tpid;
|
||||
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
|
||||
general_params->tpid = tpid;
|
||||
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
|
||||
general_params->tpid2 = tpid2;
|
||||
/* When VLAN filtering is on, we need to at least be able to
|
||||
* decode management traffic through the "backup plan".
|
||||
|
@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
|
|||
if (!clone)
|
||||
goto out;
|
||||
|
||||
sja1105_ptp_txtstamp_skb(ds, slot, clone);
|
||||
sja1105_ptp_txtstamp_skb(ds, port, clone);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->mgmt_lock);
|
||||
|
|
|
@ -237,7 +237,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
|
|||
if (rw == SPI_WRITE)
|
||||
priv->info->ptp_cmd_packing(buf, cmd, PACK);
|
||||
|
||||
rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
|
||||
rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
|
||||
SJA1105_SIZE_PTP_CMD);
|
||||
|
||||
if (rw == SPI_READ)
|
||||
|
@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
|
|||
ptp_data->clock = NULL;
|
||||
}
|
||||
|
||||
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
|
||||
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
|
@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
|
|||
goto out;
|
||||
}
|
||||
|
||||
rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
|
||||
rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
|
||||
if (rc < 0) {
|
||||
dev_err(ds->dev, "timed out polling for tstamp\n");
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
|
|||
return size;
|
||||
}
|
||||
|
||||
/* TPID and TPID2 are intentionally reversed so that semantic
|
||||
* compatibility with E/T is kept.
|
||||
*/
|
||||
static size_t
|
||||
sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
||||
enum packing_op op)
|
||||
|
@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
|
|||
sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
|
||||
sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
|
||||
sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
|
||||
sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
|
||||
sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
|
||||
sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
|
||||
sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
|
||||
sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
|
||||
sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
|
||||
sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
|
||||
sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
|
||||
|
|
|
@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
|
|||
if (admin->cycle_time_extension)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!ns_to_sja1105_delta(admin->base_time)) {
|
||||
dev_err(ds->dev, "A base time of zero is not hardware-allowed\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
for (i = 0; i < admin->num_entries; i++) {
|
||||
s64 delta_ns = admin->entries[i].interval;
|
||||
s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
|
||||
|
|
|
@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
|
|||
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
|
||||
func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
|
||||
|
||||
#define BNX2X_VFS_VLAN_CREDIT(bp) \
|
||||
(GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
|
||||
|
||||
#define PF_VLAN_CREDIT_E2(bp, func_num) \
|
||||
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
|
||||
((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \
|
||||
func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
|
||||
|
||||
#endif /* BNX2X_SP_VERBS */
|
||||
|
|
|
@ -515,6 +515,7 @@ struct link_config {
|
|||
|
||||
enum cc_pause requested_fc; /* flow control user has requested */
|
||||
enum cc_pause fc; /* actual link flow control */
|
||||
enum cc_pause advertised_fc; /* actual advertised flow control */
|
||||
|
||||
enum cc_fec requested_fec; /* Forward Error Correction: */
|
||||
enum cc_fec fec; /* requested and actual in use */
|
||||
|
|
|
@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev,
|
|||
struct port_info *p = netdev_priv(dev);
|
||||
|
||||
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
||||
epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
|
||||
epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
|
||||
epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
||||
epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
||||
}
|
||||
|
||||
static int set_pauseparam(struct net_device *dev,
|
||||
|
|
|
@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
|
|||
if (cc_pause & PAUSE_TX)
|
||||
fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
|
||||
else
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
|
||||
FW_PORT_CAP32_802_3_PAUSE;
|
||||
} else if (cc_pause & PAUSE_TX) {
|
||||
fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
|
||||
}
|
||||
|
@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
|
|||
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
{
|
||||
const struct fw_port_cmd *cmd = (const void *)rpl;
|
||||
int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
int link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
enum fw_port_module_type mod_type;
|
||||
unsigned int speed, fc, fec;
|
||||
fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
struct adapter *adapter = pi->adapter;
|
||||
unsigned int speed, fc, fec, adv_fc;
|
||||
enum fw_port_module_type mod_type;
|
||||
int action, link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
|
||||
/* Extract the various fields from the Port Information message.
|
||||
*/
|
||||
action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
switch (action) {
|
||||
case FW_PORT_ACTION_GET_PORT_INFO: {
|
||||
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
||||
|
@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|||
}
|
||||
|
||||
fec = fwcap_to_cc_fec(acaps);
|
||||
adv_fc = fwcap_to_cc_pause(acaps);
|
||||
fc = fwcap_to_cc_pause(linkattr);
|
||||
speed = fwcap_to_speed(linkattr);
|
||||
|
||||
|
@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|||
}
|
||||
|
||||
if (link_ok != lc->link_ok || speed != lc->speed ||
|
||||
fc != lc->fc || fec != lc->fec) { /* something changed */
|
||||
fc != lc->fc || adv_fc != lc->advertised_fc ||
|
||||
fec != lc->fec) {
|
||||
/* something changed */
|
||||
if (!link_ok && lc->link_ok) {
|
||||
lc->link_down_rc = linkdnrc;
|
||||
dev_warn_ratelimited(adapter->pdev_dev,
|
||||
|
@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
|||
}
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->advertised_fc = adv_fc;
|
||||
lc->fc = fc;
|
||||
lc->fec = fec;
|
||||
|
||||
|
|
|
@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
|
|||
struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
||||
pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
|
||||
pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
|
||||
pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
|
||||
pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -135,6 +135,7 @@ struct link_config {
|
|||
|
||||
enum cc_pause requested_fc; /* flow control user has requested */
|
||||
enum cc_pause fc; /* actual link flow control */
|
||||
enum cc_pause advertised_fc; /* actual advertised flow control */
|
||||
|
||||
enum cc_fec auto_fec; /* Forward Error Correction: */
|
||||
enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
|
||||
|
|
|
@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
|
|||
static void t4vf_handle_get_port_info(struct port_info *pi,
|
||||
const struct fw_port_cmd *cmd)
|
||||
{
|
||||
int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
int link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
enum fw_port_module_type mod_type;
|
||||
unsigned int speed, fc, fec;
|
||||
fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
|
||||
struct link_config *lc = &pi->link_cfg;
|
||||
struct adapter *adapter = pi->adapter;
|
||||
unsigned int speed, fc, fec, adv_fc;
|
||||
enum fw_port_module_type mod_type;
|
||||
int action, link_ok, linkdnrc;
|
||||
enum fw_port_type port_type;
|
||||
|
||||
/* Extract the various fields from the Port Information message. */
|
||||
action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
|
||||
switch (action) {
|
||||
case FW_PORT_ACTION_GET_PORT_INFO: {
|
||||
u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
|
||||
|
@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|||
}
|
||||
|
||||
fec = fwcap_to_cc_fec(acaps);
|
||||
adv_fc = fwcap_to_cc_pause(acaps);
|
||||
fc = fwcap_to_cc_pause(linkattr);
|
||||
speed = fwcap_to_speed(linkattr);
|
||||
|
||||
|
@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|||
}
|
||||
|
||||
if (link_ok != lc->link_ok || speed != lc->speed ||
|
||||
fc != lc->fc || fec != lc->fec) { /* something changed */
|
||||
fc != lc->fc || adv_fc != lc->advertised_fc ||
|
||||
fec != lc->fec) {
|
||||
/* something changed */
|
||||
if (!link_ok && lc->link_ok) {
|
||||
lc->link_down_rc = linkdnrc;
|
||||
dev_warn_ratelimited(adapter->pdev_dev,
|
||||
|
@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
|
|||
}
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->advertised_fc = adv_fc;
|
||||
lc->fc = fc;
|
||||
lc->fec = fec;
|
||||
|
||||
|
|
|
@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|||
int page_offset;
|
||||
unsigned int sz;
|
||||
int *count_ptr;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
vaddr = phys_to_virt(addr);
|
||||
WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
|
||||
|
@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|||
WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
|
||||
SMP_CACHE_BYTES));
|
||||
|
||||
dma_unmap_page(priv->rx_dma_dev, sg_addr,
|
||||
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
/* We may use multiple Rx pools */
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||||
if (!dpaa_bp)
|
||||
goto free_buffers;
|
||||
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
dma_unmap_page(priv->rx_dma_dev, sg_addr,
|
||||
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
||||
if (!skb) {
|
||||
sz = dpaa_bp->size +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
|
@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|||
skb_add_rx_frag(skb, i - 1, head_page, frag_off,
|
||||
frag_len, dpaa_bp->size);
|
||||
}
|
||||
|
||||
/* Update the pool count for the current {cpu x bpool} */
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
(*count_ptr)--;
|
||||
|
||||
if (qm_sg_entry_is_final(&sgt[i]))
|
||||
|
@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
|||
return skb;
|
||||
|
||||
free_buffers:
|
||||
/* compensate sw bpool counter changes */
|
||||
for (i--; i >= 0; i--) {
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||||
if (dpaa_bp) {
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
(*count_ptr)++;
|
||||
}
|
||||
}
|
||||
/* free all the SG entries */
|
||||
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
|
||||
sg_addr = qm_sg_addr(&sgt[i]);
|
||||
for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
|
||||
sg_addr = qm_sg_addr(&sgt[j]);
|
||||
sg_vaddr = phys_to_virt(sg_addr);
|
||||
/* all pages 0..i were unmaped */
|
||||
if (j > i)
|
||||
dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
|
||||
DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
|
||||
free_pages((unsigned long)sg_vaddr, 0);
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||||
if (dpaa_bp) {
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
(*count_ptr)--;
|
||||
/* counters 0..i-1 were decremented */
|
||||
if (j >= i) {
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
|
||||
if (dpaa_bp) {
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
(*count_ptr)--;
|
||||
}
|
||||
}
|
||||
|
||||
if (qm_sg_entry_is_final(&sgt[i]))
|
||||
if (qm_sg_entry_is_final(&sgt[j]))
|
||||
break;
|
||||
}
|
||||
/* free the SGT fragment */
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/xz.h>
|
||||
#include "mlxfw_mfa2.h"
|
||||
#include "mlxfw_mfa2_file.h"
|
||||
|
@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
|||
comp_size = be32_to_cpu(comp->size);
|
||||
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
|
||||
|
||||
comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
|
||||
comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
|
||||
if (!comp_data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
comp_data->comp.data_size = comp_size;
|
||||
|
@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
|
|||
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
|
||||
return &comp_data->comp;
|
||||
err_out:
|
||||
kfree(comp_data);
|
||||
vfree(comp_data);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
|
|||
const struct mlxfw_mfa2_comp_data *comp_data;
|
||||
|
||||
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
|
||||
kfree(comp_data);
|
||||
vfree(comp_data);
|
||||
}
|
||||
|
||||
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
|
||||
|
|
|
@ -5471,6 +5471,7 @@ enum mlxsw_reg_htgt_trap_group {
|
|||
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
|
||||
|
||||
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
|
||||
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
|
||||
|
|
|
@ -4543,8 +4543,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
|
|||
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
|
||||
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
|
||||
/* PKT Sample trap */
|
||||
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
|
||||
false, SP_IP2ME, DISCARD),
|
||||
|
@ -4627,6 +4627,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
|
|||
rate = 19 * 1024;
|
||||
burst_size = 12;
|
||||
break;
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
||||
rate = 360;
|
||||
burst_size = 7;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
@ -4666,6 +4670,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
|
|||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
|
||||
case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
|
||||
priority = 5;
|
||||
tc = 5;
|
||||
break;
|
||||
|
|
|
@ -6787,6 +6787,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
|
|||
|
||||
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
|
||||
rif = mlxsw_sp->router->rifs[i];
|
||||
if (rif && rif->ops &&
|
||||
rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
|
||||
continue;
|
||||
if (rif && rif->dev && rif->dev != dev &&
|
||||
!ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
|
||||
mlxsw_sp->mac_mask)) {
|
||||
|
|
|
@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
|||
struct device *dev = dwmac->dev;
|
||||
const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
|
||||
struct meson8b_dwmac_clk_configs *clk_configs;
|
||||
static const struct clk_div_table div_table[] = {
|
||||
{ .div = 2, .val = 2, },
|
||||
{ .div = 3, .val = 3, },
|
||||
{ .div = 4, .val = 4, },
|
||||
{ .div = 5, .val = 5, },
|
||||
{ .div = 6, .val = 6, },
|
||||
{ .div = 7, .val = 7, },
|
||||
};
|
||||
|
||||
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
|
||||
if (!clk_configs)
|
||||
|
@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
|||
clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
|
||||
clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
|
||||
clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
|
||||
clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
|
||||
CLK_DIVIDER_ALLOW_ZERO |
|
||||
CLK_DIVIDER_ROUND_CLOSEST;
|
||||
clk_configs->m250_div.table = div_table;
|
||||
clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
|
||||
CLK_DIVIDER_ROUND_CLOSEST;
|
||||
clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
|
||||
&clk_divider_ops,
|
||||
&clk_configs->m250_div.hw);
|
||||
|
|
|
@ -540,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
|
|||
mtu = dst_mtu(&rt->dst);
|
||||
}
|
||||
|
||||
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
|
||||
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
|
||||
|
||||
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
|
||||
mtu < ntohs(iph->tot_len)) {
|
||||
|
|
|
@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = {
|
|||
.config_intr = aqr_config_intr,
|
||||
.ack_interrupt = aqr_ack_interrupt,
|
||||
.read_status = aqr_read_status,
|
||||
.suspend = aqr107_suspend,
|
||||
.resume = aqr107_resume,
|
||||
},
|
||||
{
|
||||
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
|
||||
|
|
|
@ -166,9 +166,9 @@ static struct posix_clock_operations ptp_clock_ops = {
|
|||
.read = ptp_read,
|
||||
};
|
||||
|
||||
static void delete_ptp_clock(struct posix_clock *pc)
|
||||
static void ptp_clock_release(struct device *dev)
|
||||
{
|
||||
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
|
||||
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
||||
|
||||
mutex_destroy(&ptp->tsevq_mux);
|
||||
mutex_destroy(&ptp->pincfg_mux);
|
||||
|
@ -213,7 +213,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||
}
|
||||
|
||||
ptp->clock.ops = ptp_clock_ops;
|
||||
ptp->clock.release = delete_ptp_clock;
|
||||
ptp->info = info;
|
||||
ptp->devid = MKDEV(major, index);
|
||||
ptp->index = index;
|
||||
|
@ -236,15 +235,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||
if (err)
|
||||
goto no_pin_groups;
|
||||
|
||||
/* Create a new device in our class. */
|
||||
ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
|
||||
ptp, ptp->pin_attr_groups,
|
||||
"ptp%d", ptp->index);
|
||||
if (IS_ERR(ptp->dev)) {
|
||||
err = PTR_ERR(ptp->dev);
|
||||
goto no_device;
|
||||
}
|
||||
|
||||
/* Register a new PPS source. */
|
||||
if (info->pps) {
|
||||
struct pps_source_info pps;
|
||||
|
@ -260,8 +250,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||
}
|
||||
}
|
||||
|
||||
/* Create a posix clock. */
|
||||
err = posix_clock_register(&ptp->clock, ptp->devid);
|
||||
/* Initialize a new device of our class in our clock structure. */
|
||||
device_initialize(&ptp->dev);
|
||||
ptp->dev.devt = ptp->devid;
|
||||
ptp->dev.class = ptp_class;
|
||||
ptp->dev.parent = parent;
|
||||
ptp->dev.groups = ptp->pin_attr_groups;
|
||||
ptp->dev.release = ptp_clock_release;
|
||||
dev_set_drvdata(&ptp->dev, ptp);
|
||||
dev_set_name(&ptp->dev, "ptp%d", ptp->index);
|
||||
|
||||
/* Create a posix clock and link it to the device. */
|
||||
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
||||
if (err) {
|
||||
pr_err("failed to create posix clock\n");
|
||||
goto no_clock;
|
||||
|
@ -273,8 +273,6 @@ no_clock:
|
|||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
no_pps:
|
||||
device_destroy(ptp_class, ptp->devid);
|
||||
no_device:
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
no_pin_groups:
|
||||
if (ptp->kworker)
|
||||
|
@ -304,7 +302,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
|||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
|
||||
device_destroy(ptp_class, ptp->devid);
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
|
||||
posix_clock_unregister(&ptp->clock);
|
||||
|
|
|
@ -28,7 +28,7 @@ struct timestamp_event_queue {
|
|||
|
||||
struct ptp_clock {
|
||||
struct posix_clock clock;
|
||||
struct device *dev;
|
||||
struct device dev;
|
||||
struct ptp_clock_info *info;
|
||||
dev_t devid;
|
||||
int index; /* index into clocks.map */
|
||||
|
|
|
@ -2473,50 +2473,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
|
|||
rc = qeth_cm_enable(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "2err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_cm_setup(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "3err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_ulp_enable(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "4err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_ulp_setup(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_alloc_qdio_queues(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "5err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_qdio_establish(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
|
||||
qeth_free_qdio_queues(card);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_qdio_activate(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "7err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
rc = qeth_dm_act(card);
|
||||
if (rc) {
|
||||
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
|
||||
goto out_qdio;
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_qdio:
|
||||
qeth_qdio_clear_card(card, !IS_IQD(card));
|
||||
qdio_free(CARD_DDEV(card));
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qeth_print_status_message(struct qeth_card *card)
|
||||
|
@ -3419,11 +3415,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (card->state != CARD_STATE_DOWN) {
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
qeth_free_qdio_queues(card);
|
||||
card->options.cq = cq;
|
||||
rc = 0;
|
||||
|
@ -5023,10 +5014,8 @@ retriable:
|
|||
}
|
||||
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
|
||||
rc = qeth_query_setdiagass(card);
|
||||
if (rc < 0) {
|
||||
if (rc)
|
||||
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
|
||||
|
|
|
@ -287,12 +287,12 @@ static void qeth_l2_stop_card(struct qeth_card *card)
|
|||
card->state = CARD_STATE_HARDSETUP;
|
||||
}
|
||||
if (card->state == CARD_STATE_HARDSETUP) {
|
||||
qeth_qdio_clear_card(card, 0);
|
||||
qeth_drain_output_queues(card);
|
||||
qeth_clear_working_pool_list(card);
|
||||
card->state = CARD_STATE_DOWN;
|
||||
}
|
||||
|
||||
qeth_qdio_clear_card(card, 0);
|
||||
flush_workqueue(card->event_wq);
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
card->info.promisc_mode = 0;
|
||||
|
@ -1912,8 +1912,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
|
|||
/* check if VNICC is currently enabled */
|
||||
bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
|
||||
{
|
||||
/* if everything is turned off, VNICC is not active */
|
||||
if (!card->options.vnicc.cur_chars)
|
||||
if (!card->options.vnicc.sup_chars)
|
||||
return false;
|
||||
/* default values are only OK if rx_bcast was not enabled by user
|
||||
* or the card is offline.
|
||||
|
@ -2000,8 +1999,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
|
|||
/* enforce assumed default values and recover settings, if changed */
|
||||
error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
|
||||
timeout);
|
||||
chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
|
||||
chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
|
||||
/* Change chars, if necessary */
|
||||
chars_tmp = card->options.vnicc.wanted_chars ^
|
||||
card->options.vnicc.cur_chars;
|
||||
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
|
||||
for_each_set_bit(i, &chars_tmp, chars_len) {
|
||||
vnicc = BIT(i);
|
||||
|
|
|
@ -1183,12 +1183,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
|
|||
card->state = CARD_STATE_HARDSETUP;
|
||||
}
|
||||
if (card->state == CARD_STATE_HARDSETUP) {
|
||||
qeth_qdio_clear_card(card, 0);
|
||||
qeth_drain_output_queues(card);
|
||||
qeth_clear_working_pool_list(card);
|
||||
card->state = CARD_STATE_DOWN;
|
||||
}
|
||||
|
||||
qeth_qdio_clear_card(card, 0);
|
||||
flush_workqueue(card->event_wq);
|
||||
card->info.promisc_mode = 0;
|
||||
}
|
||||
|
|
|
@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
|
|||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
struct qeth_card *card = dev_get_drvdata(dev);
|
||||
int rc = 0;
|
||||
char *tmp;
|
||||
int rc;
|
||||
|
||||
if (!IS_IQD(card))
|
||||
return -EPERM;
|
||||
if (card->state != CARD_STATE_DOWN)
|
||||
return -EPERM;
|
||||
if (card->options.sniffer)
|
||||
return -EPERM;
|
||||
if (card->options.cq == QETH_CQ_NOTAVAILABLE)
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(&card->conf_mutex);
|
||||
if (card->state != CARD_STATE_DOWN) {
|
||||
rc = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (card->options.sniffer) {
|
||||
rc = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
|
||||
rc = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmp = strsep((char **)&buf, "\n");
|
||||
if (strlen(tmp) > 8)
|
||||
return -EINVAL;
|
||||
if (strlen(tmp) > 8) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (card->options.hsuid[0])
|
||||
/* delete old ip address */
|
||||
|
@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
|
|||
card->options.hsuid[0] = '\0';
|
||||
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
|
||||
qeth_configure_cq(card, QETH_CQ_DISABLED);
|
||||
return count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (qeth_configure_cq(card, QETH_CQ_ENABLED))
|
||||
return -EPERM;
|
||||
if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
|
||||
rc = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snprintf(card->options.hsuid, sizeof(card->options.hsuid),
|
||||
"%-8s", tmp);
|
||||
|
@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
|
|||
|
||||
rc = qeth_l3_modify_hsuid(card, true);
|
||||
|
||||
out:
|
||||
mutex_unlock(&card->conf_mutex);
|
||||
return rc ? rc : count;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
|
|||
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
|
||||
cxgbi_hbas_remove(cdev);
|
||||
cxgbi_device_portmap_cleanup(cdev);
|
||||
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
|
||||
if (cdev->cdev2ppm)
|
||||
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
|
||||
if (cdev->pmap.max_connect)
|
||||
cxgbi_free_big_mem(cdev->pmap.port_csk);
|
||||
kfree(cdev);
|
||||
|
|
|
@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
|
|||
.read = lpfc_debugfs_read,
|
||||
.release = lpfc_debugfs_ras_log_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
#undef lpfc_debugfs_op_dumpHBASlim
|
||||
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
|
||||
|
@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
|
|||
.write = lpfc_idiag_extacc_write,
|
||||
.release = lpfc_idiag_cmd_release,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
|
||||
* @phba: Pointer to HBA context object.
|
||||
|
|
|
@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
|
|||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"1804 Invalid asynchrous event code: "
|
||||
"1804 Invalid asynchronous event code: "
|
||||
"x%x\n", bf_get(lpfc_trailer_code,
|
||||
&cq_event->cqe.mcqe_cmpl));
|
||||
break;
|
||||
|
|
|
@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
|
|||
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* wake up worker thread to post asynchronlous mailbox command */
|
||||
/* wake up worker thread to post asynchronous mailbox command */
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
|
||||
|
@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* Now, interrupt mode asynchrous mailbox command */
|
||||
/* Now, interrupt mode asynchronous mailbox command */
|
||||
rc = lpfc_mbox_cmd_check(phba, mboxq);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
|
@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
|
|||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
|
||||
* lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @cqe: Pointer to mailbox completion queue entry.
|
||||
*
|
||||
* This routine process a mailbox completion queue entry with asynchrous
|
||||
* This routine process a mailbox completion queue entry with asynchronous
|
||||
* event.
|
||||
*
|
||||
* Return: true if work posted to worker thread, otherwise false.
|
||||
|
@ -13270,7 +13270,7 @@ out_no_mqe_complete:
|
|||
* @cqe: Pointer to mailbox completion queue entry.
|
||||
*
|
||||
* This routine process a mailbox completion queue entry, it invokes the
|
||||
* proper mailbox complete handling or asynchrous event handling routine
|
||||
* proper mailbox complete handling or asynchronous event handling routine
|
||||
* according to the MCQE's async bit.
|
||||
*
|
||||
* Return: true if work posted to worker thread, otherwise false.
|
||||
|
|
|
@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
&ct->chain_buffer_dma);
|
||||
if (!ct->chain_buffer) {
|
||||
ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
|
||||
_base_release_memory_pools(ioc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
|
|||
}
|
||||
|
||||
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
|
||||
bip_set_seed(bip, bio->bi_iter.bi_sector);
|
||||
/* virtual start sector must be in integrity interval units */
|
||||
bip_set_seed(bip, bio->bi_iter.bi_sector >>
|
||||
(bi->interval_exp - SECTOR_SHIFT));
|
||||
|
||||
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
|
||||
(unsigned long long)bip->bip_iter.bi_sector);
|
||||
|
|
|
@ -1693,6 +1693,7 @@ struct cifs_fattr {
|
|||
struct timespec64 cf_atime;
|
||||
struct timespec64 cf_mtime;
|
||||
struct timespec64 cf_ctime;
|
||||
u32 cf_cifstag;
|
||||
};
|
||||
|
||||
static inline void free_dfs_info_param(struct dfs_info3_param *param)
|
||||
|
|
|
@ -139,6 +139,28 @@ retry:
|
|||
dput(dentry);
|
||||
}
|
||||
|
||||
static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
|
||||
{
|
||||
if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
|
||||
return false;
|
||||
/*
|
||||
* The DFS tags should be only intepreted by server side as per
|
||||
* MS-FSCC 2.1.2.1, but let's include them anyway.
|
||||
*
|
||||
* Besides, if cf_cifstag is unset (0), then we still need it to be
|
||||
* revalidated to know exactly what reparse point it is.
|
||||
*/
|
||||
switch (fattr->cf_cifstag) {
|
||||
case IO_REPARSE_TAG_DFS:
|
||||
case IO_REPARSE_TAG_DFSR:
|
||||
case IO_REPARSE_TAG_SYMLINK:
|
||||
case IO_REPARSE_TAG_NFS:
|
||||
case 0:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
|
@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
|
|||
* is a symbolic link, DFS referral or a reparse point with a direct
|
||||
* access like junctions, deduplicated files, NFS symlinks.
|
||||
*/
|
||||
if (fattr->cf_cifsattrs & ATTR_REPARSE)
|
||||
if (reparse_file_needs_reval(fattr))
|
||||
fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
|
||||
|
||||
/* non-unix readdir doesn't provide nlink */
|
||||
|
@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
|
|||
}
|
||||
}
|
||||
|
||||
static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
|
||||
{
|
||||
const FILE_DIRECTORY_INFO *fi = info;
|
||||
|
||||
memset(fattr, 0, sizeof(*fattr));
|
||||
fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
|
||||
fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
|
||||
fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
|
||||
fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
|
||||
fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
|
||||
fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
|
||||
fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
|
||||
}
|
||||
|
||||
void
|
||||
cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
|
||||
struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
memset(fattr, 0, sizeof(*fattr));
|
||||
fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
|
||||
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
|
||||
fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
|
||||
fattr->cf_createtime = le64_to_cpu(info->CreationTime);
|
||||
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
|
||||
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
|
||||
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
|
||||
__dir_info_to_fattr(fattr, info);
|
||||
cifs_fill_common_info(fattr, cifs_sb);
|
||||
}
|
||||
|
||||
static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
|
||||
SEARCH_ID_FULL_DIR_INFO *info,
|
||||
struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
__dir_info_to_fattr(fattr, info);
|
||||
|
||||
/* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
|
||||
if (fattr->cf_cifsattrs & ATTR_REPARSE)
|
||||
fattr->cf_cifstag = le32_to_cpu(info->EaSize);
|
||||
cifs_fill_common_info(fattr, cifs_sb);
|
||||
}
|
||||
|
||||
|
@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file,
|
|||
(FIND_FILE_STANDARD_INFO *)find_entry,
|
||||
cifs_sb);
|
||||
break;
|
||||
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
|
||||
cifs_fulldir_info_to_fattr(&fattr,
|
||||
(SEARCH_ID_FULL_DIR_INFO *)find_entry,
|
||||
cifs_sb);
|
||||
break;
|
||||
default:
|
||||
cifs_dir_info_to_fattr(&fattr,
|
||||
(FILE_DIRECTORY_INFO *)find_entry,
|
||||
|
|
|
@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
|
|||
goto out;
|
||||
|
||||
|
||||
if (oparms->tcon->use_resilient) {
|
||||
if (oparms->tcon->use_resilient) {
|
||||
/* default timeout is 0, servers pick default (120 seconds) */
|
||||
nr_ioctl_req.Timeout =
|
||||
cpu_to_le32(oparms->tcon->handle_timeout);
|
||||
|
|
|
@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
|||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
||||
cond_resched();
|
||||
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
||||
iput(toput_inode);
|
||||
toput_inode = inode;
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
|
|
@ -676,6 +676,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
|
|||
struct inode *inode, *next;
|
||||
LIST_HEAD(dispose);
|
||||
|
||||
again:
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
|
||||
spin_lock(&inode->i_lock);
|
||||
|
@ -698,6 +699,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
|
|||
inode_lru_list_del(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
list_add(&inode->i_lru, &dispose);
|
||||
if (need_resched()) {
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
cond_resched();
|
||||
dispose_list(&dispose);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
||||
|
|
10
fs/io-wq.c
10
fs/io-wq.c
|
@ -92,7 +92,6 @@ struct io_wqe {
|
|||
struct io_wqe_acct acct[2];
|
||||
|
||||
struct hlist_nulls_head free_list;
|
||||
struct hlist_nulls_head busy_list;
|
||||
struct list_head all_list;
|
||||
|
||||
struct io_wq *wq;
|
||||
|
@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
|
|||
if (worker->flags & IO_WORKER_F_FREE) {
|
||||
worker->flags &= ~IO_WORKER_F_FREE;
|
||||
hlist_nulls_del_init_rcu(&worker->nulls_node);
|
||||
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
|
|||
{
|
||||
if (!(worker->flags & IO_WORKER_F_FREE)) {
|
||||
worker->flags |= IO_WORKER_F_FREE;
|
||||
hlist_nulls_del_init_rcu(&worker->nulls_node);
|
||||
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
|
||||
}
|
||||
|
||||
|
@ -432,6 +429,8 @@ next:
|
|||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
cond_resched();
|
||||
|
||||
spin_lock_irq(&worker->lock);
|
||||
worker->cur_work = work;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
|
@ -798,10 +797,6 @@ void io_wq_cancel_all(struct io_wq *wq)
|
|||
|
||||
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
|
||||
|
||||
/*
|
||||
* Browse both lists, as there's a gap between handing work off
|
||||
* to a worker and the worker putting itself on the busy_list
|
||||
*/
|
||||
rcu_read_lock();
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
@ -1049,7 +1044,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
|||
spin_lock_init(&wqe->lock);
|
||||
INIT_WQ_LIST(&wqe->work_list);
|
||||
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
|
||||
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
|
||||
INIT_LIST_HEAD(&wqe->all_list);
|
||||
}
|
||||
|
||||
|
|
690
fs/io_uring.c
690
fs/io_uring.c
File diff suppressed because it is too large
Load Diff
|
@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
|
|||
}
|
||||
if (inode) {
|
||||
/* userspace relies on this representation of dev_t */
|
||||
seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
|
||||
seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
|
||||
MAJOR(inode->i_sb->s_dev),
|
||||
MINOR(inode->i_sb->s_dev), inode->i_ino);
|
||||
} else {
|
||||
|
|
|
@ -57,6 +57,9 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
|
|||
* doing an __iget/iput with SB_ACTIVE clear would actually
|
||||
* evict all inodes with zero i_count from icache which is
|
||||
* unnecessarily violent and may in fact be illegal to do.
|
||||
* However, we should have been called /after/ evict_inodes
|
||||
* removed all zero refcount inodes, in any case. Test to
|
||||
* be sure.
|
||||
*/
|
||||
if (!atomic_read(&inode->i_count)) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
@ -77,6 +80,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
|
|||
|
||||
iput_inode = inode;
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
|
|
@ -984,6 +984,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
|
|||
* later.
|
||||
*/
|
||||
old_inode = inode;
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
|
|
@ -448,10 +448,12 @@ void generic_shutdown_super(struct super_block *sb)
|
|||
sync_filesystem(sb);
|
||||
sb->s_flags &= ~SB_ACTIVE;
|
||||
|
||||
fsnotify_sb_delete(sb);
|
||||
cgroup_writeback_umount();
|
||||
|
||||
/* evict all inodes with zero refcount */
|
||||
evict_inodes(sb);
|
||||
/* only nonzero refcount inodes can have marks */
|
||||
fsnotify_sb_delete(sb);
|
||||
|
||||
if (sb->s_dio_done_wq) {
|
||||
destroy_workqueue(sb->s_dio_done_wq);
|
||||
|
|
|
@ -19,6 +19,8 @@ struct ahci_host_priv;
|
|||
struct platform_device;
|
||||
struct scsi_host_template;
|
||||
|
||||
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
|
||||
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
|
||||
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
|
||||
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
|
||||
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
|
||||
|
|
|
@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
|
|||
struct ata_taskfile *tf, u16 *id);
|
||||
extern void ata_qc_complete(struct ata_queued_cmd *qc);
|
||||
extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
|
||||
extern u64 ata_qc_get_active(struct ata_port *ap);
|
||||
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
|
||||
extern int ata_std_bios_param(struct scsi_device *sdev,
|
||||
struct block_device *bdev,
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline int of_mdio_parse_addr(struct device *dev,
|
|||
}
|
||||
|
||||
#else /* CONFIG_OF_MDIO */
|
||||
static bool of_mdiobus_child_is_phy(struct device_node *child)
|
||||
static inline bool of_mdiobus_child_is_phy(struct device_node *child)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -69,29 +69,32 @@ struct posix_clock_operations {
|
|||
*
|
||||
* @ops: Functional interface to the clock
|
||||
* @cdev: Character device instance for this clock
|
||||
* @kref: Reference count.
|
||||
* @dev: Pointer to the clock's device.
|
||||
* @rwsem: Protects the 'zombie' field from concurrent access.
|
||||
* @zombie: If 'zombie' is true, then the hardware has disappeared.
|
||||
* @release: A function to free the structure when the reference count reaches
|
||||
* zero. May be NULL if structure is statically allocated.
|
||||
*
|
||||
* Drivers should embed their struct posix_clock within a private
|
||||
* structure, obtaining a reference to it during callbacks using
|
||||
* container_of().
|
||||
*
|
||||
* Drivers should supply an initialized but not exposed struct device
|
||||
* to posix_clock_register(). It is used to manage lifetime of the
|
||||
* driver's private structure. It's 'release' field should be set to
|
||||
* a release function for this private structure.
|
||||
*/
|
||||
struct posix_clock {
|
||||
struct posix_clock_operations ops;
|
||||
struct cdev cdev;
|
||||
struct kref kref;
|
||||
struct device *dev;
|
||||
struct rw_semaphore rwsem;
|
||||
bool zombie;
|
||||
void (*release)(struct posix_clock *clk);
|
||||
};
|
||||
|
||||
/**
|
||||
* posix_clock_register() - register a new clock
|
||||
* @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
|
||||
* @devid: Allocated device id
|
||||
* @clk: Pointer to the clock. Caller must provide 'ops' field
|
||||
* @dev: Pointer to the initialized device. Caller must provide
|
||||
* 'release' field
|
||||
*
|
||||
* A clock driver calls this function to register itself with the
|
||||
* clock device subsystem. If 'clk' points to dynamically allocated
|
||||
|
@ -100,7 +103,7 @@ struct posix_clock {
|
|||
*
|
||||
* Returns zero on success, non-zero otherwise.
|
||||
*/
|
||||
int posix_clock_register(struct posix_clock *clk, dev_t devid);
|
||||
int posix_clock_register(struct posix_clock *clk, struct device *dev);
|
||||
|
||||
/**
|
||||
* posix_clock_unregister() - unregister a clock
|
||||
|
|
|
@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
|
|||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
||||
if (dst && dst->ops->update_pmtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu);
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
|
||||
}
|
||||
|
||||
/* update dst pmtu but not do neighbor confirm */
|
||||
static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
||||
if (dst && dst->ops->update_pmtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
||||
}
|
||||
|
||||
static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
|
||||
|
@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
|
|||
u32 encap_mtu = dst_mtu(encap_dst);
|
||||
|
||||
if (skb->len > encap_mtu - headroom)
|
||||
skb_dst_update_pmtu(skb, encap_mtu - headroom);
|
||||
skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
|
||||
}
|
||||
|
||||
#endif /* _NET_DST_H */
|
||||
|
|
|
@ -27,7 +27,8 @@ struct dst_ops {
|
|||
struct dst_entry * (*negative_advice)(struct dst_entry *);
|
||||
void (*link_failure)(struct sk_buff *);
|
||||
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu);
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
bool confirm_neigh);
|
||||
void (*redirect)(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
|
|
@ -308,6 +308,7 @@ struct tcf_proto_ops {
|
|||
int (*delete)(struct tcf_proto *tp, void *arg,
|
||||
bool *last, bool rtnl_held,
|
||||
struct netlink_ext_ack *);
|
||||
bool (*delete_empty)(struct tcf_proto *tp);
|
||||
void (*walk)(struct tcf_proto *tp,
|
||||
struct tcf_walker *arg, bool rtnl_held);
|
||||
int (*reoffload)(struct tcf_proto *tp, bool add,
|
||||
|
@ -336,6 +337,10 @@ struct tcf_proto_ops {
|
|||
int flags;
|
||||
};
|
||||
|
||||
/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
|
||||
* are expected to implement tcf_proto_ops->delete_empty(), otherwise race
|
||||
* conditions can occur when filters are inserted/deleted simultaneously.
|
||||
*/
|
||||
enum tcf_proto_ops_flags {
|
||||
TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
|
||||
};
|
||||
|
|
|
@ -907,7 +907,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
|
|||
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
|
||||
};
|
||||
|
||||
static void __mark_reg_not_init(struct bpf_reg_state *reg);
|
||||
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg);
|
||||
|
||||
/* Mark the unknown part of a register (variable offset or scalar value) as
|
||||
* known to have the value @imm.
|
||||
|
@ -945,7 +946,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
|
|||
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
|
||||
/* Something bad happened, let's kill all regs */
|
||||
for (regno = 0; regno < MAX_BPF_REG; regno++)
|
||||
__mark_reg_not_init(regs + regno);
|
||||
__mark_reg_not_init(env, regs + regno);
|
||||
return;
|
||||
}
|
||||
__mark_reg_known_zero(regs + regno);
|
||||
|
@ -1054,7 +1055,8 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
|
|||
}
|
||||
|
||||
/* Mark a register as having a completely unknown (scalar) value. */
|
||||
static void __mark_reg_unknown(struct bpf_reg_state *reg)
|
||||
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg)
|
||||
{
|
||||
/*
|
||||
* Clear type, id, off, and union(map_ptr, range) and
|
||||
|
@ -1064,6 +1066,8 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
|
|||
reg->type = SCALAR_VALUE;
|
||||
reg->var_off = tnum_unknown;
|
||||
reg->frameno = 0;
|
||||
reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
|
||||
true : false;
|
||||
__mark_reg_unbounded(reg);
|
||||
}
|
||||
|
||||
|
@ -1074,19 +1078,16 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
|
|||
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
|
||||
/* Something bad happened, let's kill all regs except FP */
|
||||
for (regno = 0; regno < BPF_REG_FP; regno++)
|
||||
__mark_reg_not_init(regs + regno);
|
||||
__mark_reg_not_init(env, regs + regno);
|
||||
return;
|
||||
}
|
||||
regs += regno;
|
||||
__mark_reg_unknown(regs);
|
||||
/* constant backtracking is enabled for root without bpf2bpf calls */
|
||||
regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
|
||||
true : false;
|
||||
__mark_reg_unknown(env, regs + regno);
|
||||
}
|
||||
|
||||
static void __mark_reg_not_init(struct bpf_reg_state *reg)
|
||||
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg)
|
||||
{
|
||||
__mark_reg_unknown(reg);
|
||||
__mark_reg_unknown(env, reg);
|
||||
reg->type = NOT_INIT;
|
||||
}
|
||||
|
||||
|
@ -1097,10 +1098,10 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
|
|||
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
|
||||
/* Something bad happened, let's kill all regs except FP */
|
||||
for (regno = 0; regno < BPF_REG_FP; regno++)
|
||||
__mark_reg_not_init(regs + regno);
|
||||
__mark_reg_not_init(env, regs + regno);
|
||||
return;
|
||||
}
|
||||
__mark_reg_not_init(regs + regno);
|
||||
__mark_reg_not_init(env, regs + regno);
|
||||
}
|
||||
|
||||
#define DEF_NOT_SUBREG (0)
|
||||
|
@ -3234,7 +3235,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
|
|||
}
|
||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
||||
state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
|
||||
__mark_reg_unknown(&state->stack[spi].spilled_ptr);
|
||||
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
|
||||
for (j = 0; j < BPF_REG_SIZE; j++)
|
||||
state->stack[spi].slot_type[j] = STACK_MISC;
|
||||
goto mark;
|
||||
|
@ -3892,7 +3893,7 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
|
|||
if (!reg)
|
||||
continue;
|
||||
if (reg_is_pkt_pointer_any(reg))
|
||||
__mark_reg_unknown(reg);
|
||||
__mark_reg_unknown(env, reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3920,7 +3921,7 @@ static void release_reg_references(struct bpf_verifier_env *env,
|
|||
if (!reg)
|
||||
continue;
|
||||
if (reg->ref_obj_id == ref_obj_id)
|
||||
__mark_reg_unknown(reg);
|
||||
__mark_reg_unknown(env, reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4582,7 +4583,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
/* Taint dst register if offset had invalid bounds derived from
|
||||
* e.g. dead branches.
|
||||
*/
|
||||
__mark_reg_unknown(dst_reg);
|
||||
__mark_reg_unknown(env, dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4834,13 +4835,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
/* Taint dst register if offset had invalid bounds derived from
|
||||
* e.g. dead branches.
|
||||
*/
|
||||
__mark_reg_unknown(dst_reg);
|
||||
__mark_reg_unknown(env, dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!src_known &&
|
||||
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
|
||||
__mark_reg_unknown(dst_reg);
|
||||
__mark_reg_unknown(env, dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6982,7 +6983,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
|
|||
/* since the register is unused, clear its state
|
||||
* to make further comparison simpler
|
||||
*/
|
||||
__mark_reg_not_init(&st->regs[i]);
|
||||
__mark_reg_not_init(env, &st->regs[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
|
||||
|
@ -6990,7 +6991,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
|
|||
/* liveness must not touch this stack slot anymore */
|
||||
st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
|
||||
if (!(live & REG_LIVE_READ)) {
|
||||
__mark_reg_not_init(&st->stack[i].spilled_ptr);
|
||||
__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
|
||||
for (j = 0; j < BPF_REG_SIZE; j++)
|
||||
st->stack[i].slot_type[j] = STACK_INVALID;
|
||||
}
|
||||
|
|
|
@ -14,8 +14,6 @@
|
|||
|
||||
#include "posix-timers.h"
|
||||
|
||||
static void delete_clock(struct kref *kref);
|
||||
|
||||
/*
|
||||
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
|
||||
*/
|
||||
|
@ -125,7 +123,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
|
|||
err = 0;
|
||||
|
||||
if (!err) {
|
||||
kref_get(&clk->kref);
|
||||
get_device(clk->dev);
|
||||
fp->private_data = clk;
|
||||
}
|
||||
out:
|
||||
|
@ -141,7 +139,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
|
|||
if (clk->ops.release)
|
||||
err = clk->ops.release(clk);
|
||||
|
||||
kref_put(&clk->kref, delete_clock);
|
||||
put_device(clk->dev);
|
||||
|
||||
fp->private_data = NULL;
|
||||
|
||||
|
@ -161,38 +159,35 @@ static const struct file_operations posix_clock_file_operations = {
|
|||
#endif
|
||||
};
|
||||
|
||||
int posix_clock_register(struct posix_clock *clk, dev_t devid)
|
||||
int posix_clock_register(struct posix_clock *clk, struct device *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
kref_init(&clk->kref);
|
||||
init_rwsem(&clk->rwsem);
|
||||
|
||||
cdev_init(&clk->cdev, &posix_clock_file_operations);
|
||||
err = cdev_device_add(&clk->cdev, dev);
|
||||
if (err) {
|
||||
pr_err("%s unable to add device %d:%d\n",
|
||||
dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
|
||||
return err;
|
||||
}
|
||||
clk->cdev.owner = clk->ops.owner;
|
||||
err = cdev_add(&clk->cdev, devid, 1);
|
||||
clk->dev = dev;
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(posix_clock_register);
|
||||
|
||||
static void delete_clock(struct kref *kref)
|
||||
{
|
||||
struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
|
||||
|
||||
if (clk->release)
|
||||
clk->release(clk);
|
||||
}
|
||||
|
||||
void posix_clock_unregister(struct posix_clock *clk)
|
||||
{
|
||||
cdev_del(&clk->cdev);
|
||||
cdev_device_del(&clk->cdev, clk->dev);
|
||||
|
||||
down_write(&clk->rwsem);
|
||||
clk->zombie = true;
|
||||
up_write(&clk->rwsem);
|
||||
|
||||
kref_put(&clk->kref, delete_clock);
|
||||
put_device(clk->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(posix_clock_unregister);
|
||||
|
||||
|
|
|
@ -1222,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard);
|
|||
|
||||
unsigned long iov_iter_alignment(const struct iov_iter *i)
|
||||
{
|
||||
unsigned int p_mask = i->pipe->ring_size - 1;
|
||||
unsigned long res = 0;
|
||||
size_t size = i->count;
|
||||
|
||||
if (unlikely(iov_iter_is_pipe(i))) {
|
||||
unsigned int p_mask = i->pipe->ring_size - 1;
|
||||
|
||||
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
|
||||
return size | i->iov_offset;
|
||||
return size;
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
#endif
|
||||
|
||||
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu)
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
bool confirm_neigh)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -1867,7 +1867,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
|
|||
}
|
||||
|
||||
static int ebt_buf_add(struct ebt_entries_buf_state *state,
|
||||
void *data, unsigned int sz)
|
||||
const void *data, unsigned int sz)
|
||||
{
|
||||
if (state->buf_kern_start == NULL)
|
||||
goto count_only;
|
||||
|
@ -1901,7 +1901,7 @@ enum compat_mwt {
|
|||
EBT_COMPAT_TARGET,
|
||||
};
|
||||
|
||||
static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
||||
static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
|
||||
enum compat_mwt compat_mwt,
|
||||
struct ebt_entries_buf_state *state,
|
||||
const unsigned char *base)
|
||||
|
@ -1979,22 +1979,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
|||
/* return size of all matches, watchers or target, including necessary
|
||||
* alignment and padding.
|
||||
*/
|
||||
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
||||
static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
|
||||
unsigned int size_left, enum compat_mwt type,
|
||||
struct ebt_entries_buf_state *state, const void *base)
|
||||
{
|
||||
const char *buf = (const char *)match32;
|
||||
int growth = 0;
|
||||
char *buf;
|
||||
|
||||
if (size_left == 0)
|
||||
return 0;
|
||||
|
||||
buf = (char *) match32;
|
||||
|
||||
while (size_left >= sizeof(*match32)) {
|
||||
do {
|
||||
struct ebt_entry_match *match_kern;
|
||||
int ret;
|
||||
|
||||
if (size_left < sizeof(*match32))
|
||||
return -EINVAL;
|
||||
|
||||
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
|
||||
if (match_kern) {
|
||||
char *tmp;
|
||||
|
@ -2031,22 +2032,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
|||
if (match_kern)
|
||||
match_kern->match_size = ret;
|
||||
|
||||
/* rule should have no remaining data after target */
|
||||
if (type == EBT_COMPAT_TARGET && size_left)
|
||||
return -EINVAL;
|
||||
|
||||
match32 = (struct compat_ebt_entry_mwt *) buf;
|
||||
}
|
||||
} while (size_left);
|
||||
|
||||
return growth;
|
||||
}
|
||||
|
||||
/* called for all ebt_entry structures. */
|
||||
static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
||||
static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
|
||||
unsigned int *total,
|
||||
struct ebt_entries_buf_state *state)
|
||||
{
|
||||
unsigned int i, j, startoff, new_offset = 0;
|
||||
unsigned int i, j, startoff, next_expected_off, new_offset = 0;
|
||||
/* stores match/watchers/targets & offset of next struct ebt_entry: */
|
||||
unsigned int offsets[4];
|
||||
unsigned int *offsets_update = NULL;
|
||||
|
@ -2132,11 +2129,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
|||
return ret;
|
||||
}
|
||||
|
||||
startoff = state->buf_user_offset - startoff;
|
||||
|
||||
if (WARN_ON(*total < startoff))
|
||||
next_expected_off = state->buf_user_offset - startoff;
|
||||
if (next_expected_off != entry->next_offset)
|
||||
return -EINVAL;
|
||||
*total -= startoff;
|
||||
|
||||
if (*total < entry->next_offset)
|
||||
return -EINVAL;
|
||||
*total -= entry->next_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
|
|||
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
|
||||
static void dn_dst_link_failure(struct sk_buff *);
|
||||
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb , u32 mtu);
|
||||
struct sk_buff *skb , u32 mtu,
|
||||
bool confirm_neigh);
|
||||
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
|
||||
|
@ -251,7 +252,8 @@ static int dn_dst_gc(struct dst_ops *ops)
|
|||
* advertise to the other end).
|
||||
*/
|
||||
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu)
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
bool confirm_neigh)
|
||||
{
|
||||
struct dn_route *rt = (struct dn_route *) dst;
|
||||
struct neighbour *n = rt->n;
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include "hsr_main.h"
|
||||
#include "hsr_framereg.h"
|
||||
|
||||
static struct dentry *hsr_debugfs_root_dir;
|
||||
|
||||
static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
|
||||
{
|
||||
seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
|
||||
|
@ -63,8 +65,20 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
|
|||
return single_open(filp, hsr_node_table_show, inode->i_private);
|
||||
}
|
||||
|
||||
void hsr_debugfs_rename(struct net_device *dev)
|
||||
{
|
||||
struct hsr_priv *priv = netdev_priv(dev);
|
||||
struct dentry *d;
|
||||
|
||||
d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
|
||||
hsr_debugfs_root_dir, dev->name);
|
||||
if (IS_ERR(d))
|
||||
netdev_warn(dev, "failed to rename\n");
|
||||
else
|
||||
priv->node_tbl_root = d;
|
||||
}
|
||||
|
||||
static const struct file_operations hsr_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = hsr_node_table_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
|
@ -78,15 +92,14 @@ static const struct file_operations hsr_fops = {
|
|||
* When debugfs is configured this routine sets up the node_table file per
|
||||
* hsr device for dumping the node_table entries
|
||||
*/
|
||||
int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
|
||||
void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
|
||||
{
|
||||
int rc = -1;
|
||||
struct dentry *de = NULL;
|
||||
|
||||
de = debugfs_create_dir(hsr_dev->name, NULL);
|
||||
if (!de) {
|
||||
pr_err("Cannot create hsr debugfs root\n");
|
||||
return rc;
|
||||
de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
|
||||
if (IS_ERR(de)) {
|
||||
pr_err("Cannot create hsr debugfs directory\n");
|
||||
return;
|
||||
}
|
||||
|
||||
priv->node_tbl_root = de;
|
||||
|
@ -94,13 +107,13 @@ int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
|
|||
de = debugfs_create_file("node_table", S_IFREG | 0444,
|
||||
priv->node_tbl_root, priv,
|
||||
&hsr_fops);
|
||||
if (!de) {
|
||||
pr_err("Cannot create hsr node_table directory\n");
|
||||
return rc;
|
||||
if (IS_ERR(de)) {
|
||||
pr_err("Cannot create hsr node_table file\n");
|
||||
debugfs_remove(priv->node_tbl_root);
|
||||
priv->node_tbl_root = NULL;
|
||||
return;
|
||||
}
|
||||
priv->node_tbl_file = de;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* hsr_debugfs_term - Tear down debugfs intrastructure
|
||||
|
@ -117,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv)
|
|||
debugfs_remove(priv->node_tbl_root);
|
||||
priv->node_tbl_root = NULL;
|
||||
}
|
||||
|
||||
void hsr_debugfs_create_root(void)
|
||||
{
|
||||
hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
|
||||
if (IS_ERR(hsr_debugfs_root_dir)) {
|
||||
pr_err("Cannot create hsr debugfs root directory\n");
|
||||
hsr_debugfs_root_dir = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void hsr_debugfs_remove_root(void)
|
||||
{
|
||||
/* debugfs_remove() internally checks NULL and ERROR */
|
||||
debugfs_remove(hsr_debugfs_root_dir);
|
||||
}
|
||||
|
|
|
@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
|
|||
skb->dev->dev_addr, skb->len) <= 0)
|
||||
goto out;
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
if (hsr_ver > 0) {
|
||||
hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
|
||||
|
@ -368,7 +370,7 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
|
|||
del_timer_sync(&hsr->prune_timer);
|
||||
del_timer_sync(&hsr->announce_timer);
|
||||
|
||||
hsr_del_self_node(&hsr->self_node_db);
|
||||
hsr_del_self_node(hsr);
|
||||
hsr_del_nodes(&hsr->node_db);
|
||||
}
|
||||
|
||||
|
@ -440,11 +442,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
|
|||
INIT_LIST_HEAD(&hsr->ports);
|
||||
INIT_LIST_HEAD(&hsr->node_db);
|
||||
INIT_LIST_HEAD(&hsr->self_node_db);
|
||||
spin_lock_init(&hsr->list_lock);
|
||||
|
||||
ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
|
||||
|
||||
/* Make sure we recognize frames from ourselves in hsr_rcv() */
|
||||
res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
|
||||
res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
|
||||
slave[1]->dev_addr);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
@ -477,31 +480,32 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
|
|||
|
||||
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
|
||||
if (res)
|
||||
goto err_add_port;
|
||||
goto err_add_master;
|
||||
|
||||
res = register_netdevice(hsr_dev);
|
||||
if (res)
|
||||
goto fail;
|
||||
goto err_unregister;
|
||||
|
||||
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
|
||||
if (res)
|
||||
goto fail;
|
||||
goto err_add_slaves;
|
||||
|
||||
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
|
||||
if (res)
|
||||
goto fail;
|
||||
goto err_add_slaves;
|
||||
|
||||
hsr_debugfs_init(hsr, hsr_dev);
|
||||
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
|
||||
res = hsr_debugfs_init(hsr, hsr_dev);
|
||||
if (res)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
err_add_slaves:
|
||||
unregister_netdevice(hsr_dev);
|
||||
err_unregister:
|
||||
list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
|
||||
hsr_del_port(port);
|
||||
err_add_port:
|
||||
hsr_del_self_node(&hsr->self_node_db);
|
||||
err_add_master:
|
||||
hsr_del_self_node(hsr);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -75,10 +75,11 @@ static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
|
|||
/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
|
||||
* frames from self that's been looped over the HSR ring.
|
||||
*/
|
||||
int hsr_create_self_node(struct list_head *self_node_db,
|
||||
int hsr_create_self_node(struct hsr_priv *hsr,
|
||||
unsigned char addr_a[ETH_ALEN],
|
||||
unsigned char addr_b[ETH_ALEN])
|
||||
{
|
||||
struct list_head *self_node_db = &hsr->self_node_db;
|
||||
struct hsr_node *node, *oldnode;
|
||||
|
||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||
|
@ -88,33 +89,33 @@ int hsr_create_self_node(struct list_head *self_node_db,
|
|||
ether_addr_copy(node->macaddress_A, addr_a);
|
||||
ether_addr_copy(node->macaddress_B, addr_b);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&hsr->list_lock);
|
||||
oldnode = list_first_or_null_rcu(self_node_db,
|
||||
struct hsr_node, mac_list);
|
||||
if (oldnode) {
|
||||
list_replace_rcu(&oldnode->mac_list, &node->mac_list);
|
||||
rcu_read_unlock();
|
||||
synchronize_rcu();
|
||||
kfree(oldnode);
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
kfree_rcu(oldnode, rcu_head);
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
list_add_tail_rcu(&node->mac_list, self_node_db);
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hsr_del_self_node(struct list_head *self_node_db)
|
||||
void hsr_del_self_node(struct hsr_priv *hsr)
|
||||
{
|
||||
struct list_head *self_node_db = &hsr->self_node_db;
|
||||
struct hsr_node *node;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&hsr->list_lock);
|
||||
node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
|
||||
rcu_read_unlock();
|
||||
if (node) {
|
||||
list_del_rcu(&node->mac_list);
|
||||
kfree(node);
|
||||
kfree_rcu(node, rcu_head);
|
||||
}
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
}
|
||||
|
||||
void hsr_del_nodes(struct list_head *node_db)
|
||||
|
@ -130,30 +131,43 @@ void hsr_del_nodes(struct list_head *node_db)
|
|||
* seq_out is used to initialize filtering of outgoing duplicate frames
|
||||
* originating from the newly added node.
|
||||
*/
|
||||
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
|
||||
u16 seq_out)
|
||||
static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
|
||||
struct list_head *node_db,
|
||||
unsigned char addr[],
|
||||
u16 seq_out)
|
||||
{
|
||||
struct hsr_node *node;
|
||||
struct hsr_node *new_node, *node;
|
||||
unsigned long now;
|
||||
int i;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
|
||||
if (!new_node)
|
||||
return NULL;
|
||||
|
||||
ether_addr_copy(node->macaddress_A, addr);
|
||||
ether_addr_copy(new_node->macaddress_A, addr);
|
||||
|
||||
/* We are only interested in time diffs here, so use current jiffies
|
||||
* as initialization. (0 could trigger an spurious ring error warning).
|
||||
*/
|
||||
now = jiffies;
|
||||
for (i = 0; i < HSR_PT_PORTS; i++)
|
||||
node->time_in[i] = now;
|
||||
new_node->time_in[i] = now;
|
||||
for (i = 0; i < HSR_PT_PORTS; i++)
|
||||
node->seq_out[i] = seq_out;
|
||||
|
||||
list_add_tail_rcu(&node->mac_list, node_db);
|
||||
new_node->seq_out[i] = seq_out;
|
||||
|
||||
spin_lock_bh(&hsr->list_lock);
|
||||
list_for_each_entry_rcu(node, node_db, mac_list) {
|
||||
if (ether_addr_equal(node->macaddress_A, addr))
|
||||
goto out;
|
||||
if (ether_addr_equal(node->macaddress_B, addr))
|
||||
goto out;
|
||||
}
|
||||
list_add_tail_rcu(&new_node->mac_list, node_db);
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
return new_node;
|
||||
out:
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
kfree(new_node);
|
||||
return node;
|
||||
}
|
||||
|
||||
|
@ -163,6 +177,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
|
|||
bool is_sup)
|
||||
{
|
||||
struct list_head *node_db = &port->hsr->node_db;
|
||||
struct hsr_priv *hsr = port->hsr;
|
||||
struct hsr_node *node;
|
||||
struct ethhdr *ethhdr;
|
||||
u16 seq_out;
|
||||
|
@ -196,7 +211,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
|
|||
seq_out = HSR_SEQNR_START;
|
||||
}
|
||||
|
||||
return hsr_add_node(node_db, ethhdr->h_source, seq_out);
|
||||
return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
|
||||
}
|
||||
|
||||
/* Use the Supervision frame's info about an eventual macaddress_B for merging
|
||||
|
@ -206,10 +221,11 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
|
|||
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
|
||||
struct hsr_port *port_rcv)
|
||||
{
|
||||
struct ethhdr *ethhdr;
|
||||
struct hsr_node *node_real;
|
||||
struct hsr_priv *hsr = port_rcv->hsr;
|
||||
struct hsr_sup_payload *hsr_sp;
|
||||
struct hsr_node *node_real;
|
||||
struct list_head *node_db;
|
||||
struct ethhdr *ethhdr;
|
||||
int i;
|
||||
|
||||
ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
||||
|
@ -231,7 +247,7 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
|
|||
node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
|
||||
if (!node_real)
|
||||
/* No frame received from AddrA of this node yet */
|
||||
node_real = hsr_add_node(node_db, hsr_sp->macaddress_A,
|
||||
node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
|
||||
HSR_SEQNR_START - 1);
|
||||
if (!node_real)
|
||||
goto done; /* No mem */
|
||||
|
@ -252,7 +268,9 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
|
|||
}
|
||||
node_real->addr_B_port = port_rcv->type;
|
||||
|
||||
spin_lock_bh(&hsr->list_lock);
|
||||
list_del_rcu(&node_curr->mac_list);
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
kfree_rcu(node_curr, rcu_head);
|
||||
|
||||
done:
|
||||
|
@ -368,12 +386,13 @@ void hsr_prune_nodes(struct timer_list *t)
|
|||
{
|
||||
struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
|
||||
struct hsr_node *node;
|
||||
struct hsr_node *tmp;
|
||||
struct hsr_port *port;
|
||||
unsigned long timestamp;
|
||||
unsigned long time_a, time_b;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
|
||||
spin_lock_bh(&hsr->list_lock);
|
||||
list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
|
||||
/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
|
||||
* nor time_in[HSR_PT_SLAVE_B], will ever be updated for
|
||||
* the master port. Thus the master node will be repeatedly
|
||||
|
@ -421,7 +440,7 @@ void hsr_prune_nodes(struct timer_list *t)
|
|||
kfree_rcu(node, rcu_head);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&hsr->list_lock);
|
||||
|
||||
/* Restart timer */
|
||||
mod_timer(&hsr->prune_timer,
|
||||
|
|
|
@ -12,10 +12,8 @@
|
|||
|
||||
struct hsr_node;
|
||||
|
||||
void hsr_del_self_node(struct list_head *self_node_db);
|
||||
void hsr_del_self_node(struct hsr_priv *hsr);
|
||||
void hsr_del_nodes(struct list_head *node_db);
|
||||
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
|
||||
u16 seq_out);
|
||||
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
|
||||
bool is_sup);
|
||||
void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
|
||||
|
@ -33,7 +31,7 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
|
|||
|
||||
void hsr_prune_nodes(struct timer_list *t);
|
||||
|
||||
int hsr_create_self_node(struct list_head *self_node_db,
|
||||
int hsr_create_self_node(struct hsr_priv *hsr,
|
||||
unsigned char addr_a[ETH_ALEN],
|
||||
unsigned char addr_b[ETH_ALEN]);
|
||||
|
||||
|
|
|
@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
|
|||
case NETDEV_CHANGE: /* Link (carrier) state changes */
|
||||
hsr_check_carrier_and_operstate(hsr);
|
||||
break;
|
||||
case NETDEV_CHANGENAME:
|
||||
if (is_hsr_master(dev))
|
||||
hsr_debugfs_rename(dev);
|
||||
break;
|
||||
case NETDEV_CHANGEADDR:
|
||||
if (port->type == HSR_PT_MASTER) {
|
||||
/* This should not happen since there's no
|
||||
|
@ -64,7 +68,7 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
|
|||
|
||||
/* Make sure we recognize frames from ourselves in hsr_rcv() */
|
||||
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
|
||||
res = hsr_create_self_node(&hsr->self_node_db,
|
||||
res = hsr_create_self_node(hsr,
|
||||
master->dev->dev_addr,
|
||||
port ?
|
||||
port->dev->dev_addr :
|
||||
|
@ -123,6 +127,7 @@ static void __exit hsr_exit(void)
|
|||
{
|
||||
unregister_netdevice_notifier(&hsr_nb);
|
||||
hsr_netlink_exit();
|
||||
hsr_debugfs_remove_root();
|
||||
}
|
||||
|
||||
module_init(hsr_init);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue