Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up fixes from perf/urgent. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
c5468a28ef
2
.mailmap
2
.mailmap
|
@ -205,6 +205,7 @@ Juha Yrjola <at solidboot.com>
|
|||
Juha Yrjola <juha.yrjola@nokia.com>
|
||||
Juha Yrjola <juha.yrjola@solidboot.com>
|
||||
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
||||
Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
|
||||
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
|
||||
Kay Sievers <kay.sievers@vrfy.org>
|
||||
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
|
||||
|
@ -250,6 +251,7 @@ Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
|
|||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
|
||||
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
|
||||
Mathieu Othacehe <m.othacehe@gmail.com>
|
||||
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
|
||||
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
|
||||
|
|
|
@ -189,6 +189,9 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -244,10 +244,11 @@ disclosure of a particular issue, unless requested by a response team or by
|
|||
an involved disclosed party. The current ambassadors list:
|
||||
|
||||
============= ========================================================
|
||||
ARM Grant Likely <grant.likely@arm.com>
|
||||
AMD Tom Lendacky <tom.lendacky@amd.com>
|
||||
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
IBM Power Anton Blanchard <anton@linux.ibm.com>
|
||||
Ampere Darren Hart <darren@os.amperecomputing.com>
|
||||
ARM Catalin Marinas <catalin.marinas@arm.com>
|
||||
IBM Power Anton Blanchard <anton@linux.ibm.com>
|
||||
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
Intel Tony Luck <tony.luck@intel.com>
|
||||
Qualcomm Trilok Soni <tsoni@codeaurora.org>
|
||||
|
||||
|
|
|
@ -3571,8 +3571,9 @@ M: Andy Gospodarek <andy@greyhouse.net>
|
|||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
F: Documentation/networking/bonding.rst
|
||||
F: drivers/net/bonding/
|
||||
F: include/net/bonding.h
|
||||
F: include/net/bond*
|
||||
F: include/uapi/linux/if_bonding.h
|
||||
|
||||
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
|
||||
|
@ -5439,6 +5440,7 @@ F: net/ax25/sysctl_net_ax25.c
|
|||
|
||||
DATA ACCESS MONITOR
|
||||
M: SeongJae Park <sj@kernel.org>
|
||||
L: damon@lists.linux.dev
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-damon
|
||||
|
@ -10131,7 +10133,7 @@ S: Supported
|
|||
F: drivers/net/wireless/intel/iwlegacy/
|
||||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Gregory Greenman <gregory.greenman@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
|
||||
|
@ -15475,7 +15477,8 @@ F: tools/perf/
|
|||
PERFORMANCE EVENTS TOOLING ARM64
|
||||
R: John Garry <john.garry@huawei.com>
|
||||
R: Will Deacon <will@kernel.org>
|
||||
R: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
R: James Clark <james.clark@arm.com>
|
||||
R: Mike Leach <mike.leach@linaro.org>
|
||||
R: Leo Yan <leo.yan@linaro.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
|
|||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags);
|
||||
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -493,3 +493,11 @@ void __init early_ioremap_init(void)
|
|||
{
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
||||
return memblock_is_map_memory(pfn);
|
||||
}
|
||||
|
|
|
@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
|||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
|
||||
extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags);
|
||||
#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
|
||||
|
||||
#endif /* __ASM_IO_H */
|
||||
|
|
|
@ -75,6 +75,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
|
|||
obj-y += vdso-wrap.o
|
||||
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
|
||||
|
||||
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
|
||||
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
|
||||
$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
|
||||
|
||||
obj-y += probes/
|
||||
head-y := head.o
|
||||
extra-y += $(head-y) vmlinux.lds
|
||||
|
|
|
@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
|||
#ifdef CONFIG_ARM64_ERRATUM_1286807
|
||||
{
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
|
||||
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
#endif
|
||||
{},
|
||||
|
|
|
@ -654,7 +654,6 @@ static const struct __ftr_reg_entry {
|
|||
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
|
||||
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
|
||||
&id_aa64isar1_override),
|
||||
ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
|
||||
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
|
||||
&id_aa64isar2_override),
|
||||
|
||||
|
@ -810,7 +809,7 @@ static void __init sort_ftr_regs(void)
|
|||
* to sys_id for subsequent binary search in get_arm64_ftr_reg()
|
||||
* to work correctly.
|
||||
*/
|
||||
BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
|
||||
BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,9 +52,6 @@ GCOV_PROFILE := n
|
|||
targets += vdso.lds
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso.o : $(obj)/vdso.so
|
||||
|
||||
# Link rule for the .so file, .lds has to be first
|
||||
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
|
||||
$(call if_changed,vdsold_and_vdso_check)
|
||||
|
|
|
@ -131,9 +131,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
|
|||
targets += vdso.lds
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
# Force dependency (vdso.s includes vdso.so through incbin)
|
||||
$(obj)/vdso.o: $(obj)/vdso.so
|
||||
|
||||
include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
|
||||
$(call if_changed,vdsosym)
|
||||
|
||||
|
|
|
@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
|
|||
{
|
||||
early_ioremap_setup();
|
||||
}
|
||||
|
||||
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
||||
return pfn_is_map_memory(pfn);
|
||||
}
|
||||
|
|
|
@ -122,11 +122,27 @@
|
|||
|
||||
/* 0x0 - 0xb */
|
||||
|
||||
/* 'current->mm' needs to be in r4 */
|
||||
tophys(r4, r2)
|
||||
lwz r4, MM(r4)
|
||||
tophys(r4, r4)
|
||||
/* This only clobbers r0, r3, r4 and r5 */
|
||||
/* switch_mmu_context() needs paging, let's enable it */
|
||||
mfmsr r9
|
||||
ori r11, r9, MSR_DR
|
||||
mtmsr r11
|
||||
sync
|
||||
|
||||
/* switch_mmu_context() clobbers r12, rescue it */
|
||||
SAVE_GPR(12, r1)
|
||||
|
||||
/* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
|
||||
lwz r4, MM(r2)
|
||||
bl switch_mmu_context
|
||||
|
||||
/* restore r12 */
|
||||
REST_GPR(12, r1)
|
||||
|
||||
/* Disable paging again */
|
||||
mfmsr r9
|
||||
li r6, MSR_DR
|
||||
andc r9, r9, r6
|
||||
mtmsr r9
|
||||
sync
|
||||
|
||||
.endm
|
||||
|
|
|
@ -902,6 +902,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end
|
|||
|
||||
static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
|
||||
{
|
||||
const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
|
||||
|
||||
vmemmap_flush_unused_pmd();
|
||||
|
||||
/*
|
||||
|
@ -914,8 +916,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
|
|||
* Mark with PAGE_UNUSED the unused parts of the new memmap range
|
||||
*/
|
||||
if (!IS_ALIGNED(start, PMD_SIZE))
|
||||
memset((void *)start, PAGE_UNUSED,
|
||||
start - ALIGN_DOWN(start, PMD_SIZE));
|
||||
memset((void *)page, PAGE_UNUSED, start - page);
|
||||
|
||||
/*
|
||||
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
|
||||
|
|
|
@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|||
size_t offset, u32 opt_flags)
|
||||
{
|
||||
struct firmware *fw = NULL;
|
||||
struct cred *kern_cred = NULL;
|
||||
const struct cred *old_cred;
|
||||
bool nondirect = false;
|
||||
int ret;
|
||||
|
||||
|
@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|||
if (ret <= 0) /* error or already assigned */
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We are about to try to access the firmware file. Because we may have been
|
||||
* called by a driver when serving an unrelated request from userland, we use
|
||||
* the kernel credentials to read the file.
|
||||
*/
|
||||
kern_cred = prepare_kernel_cred(NULL);
|
||||
if (!kern_cred) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
old_cred = override_creds(kern_cred);
|
||||
|
||||
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
|
||||
|
||||
/* Only full reads can support decompression, platform, and sysfs. */
|
||||
|
@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|||
} else
|
||||
ret = assign_fw(fw, device);
|
||||
|
||||
revert_creds(old_cred);
|
||||
put_cred(kern_cred);
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
fw_abort_batch_reqs(fw);
|
||||
|
|
|
@ -543,10 +543,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
file->f_mode |= FMODE_LSEEK;
|
||||
dmabuf->file = file;
|
||||
|
||||
ret = dma_buf_stats_setup(dmabuf);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
mutex_init(&dmabuf->lock);
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
|
@ -554,6 +550,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
list_add(&dmabuf->list_node, &db_list.head);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
ret = dma_buf_stats_setup(dmabuf);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
return dmabuf;
|
||||
|
||||
err_sysfs:
|
||||
|
|
|
@ -296,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
|
|||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
enum amd_dpm_forced_level level;
|
||||
u32 current_stable_pstate;
|
||||
int r;
|
||||
|
||||
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
|
||||
|
@ -304,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
|
|||
goto done;
|
||||
}
|
||||
|
||||
r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
|
||||
if (r || (stable_pstate == current_stable_pstate))
|
||||
goto done;
|
||||
|
||||
switch (stable_pstate) {
|
||||
case AMDGPU_CTX_STABLE_PSTATE_NONE:
|
||||
level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
|
|
|
@ -81,6 +81,10 @@
|
|||
#include "mxgpu_vi.h"
|
||||
#include "amdgpu_dm.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
#include <asm/intel-family.h>
|
||||
#endif
|
||||
|
||||
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
|
||||
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
|
||||
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
|
||||
|
@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
|
|||
WREG32_PCIE(ixPCIE_LC_CNTL, data);
|
||||
}
|
||||
|
||||
static bool aspm_support_quirk_check(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_X86)
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void vi_program_aspm(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, data1, orig;
|
||||
bool bL1SS = false;
|
||||
bool bClkReqSupport = true;
|
||||
|
||||
if (!amdgpu_device_should_use_aspm(adev))
|
||||
if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
|
||||
return;
|
||||
|
||||
if (adev->flags & AMD_IS_APU ||
|
||||
|
|
|
@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
|
|||
dc->hwss.init_hw = dcn20_fpga_init_hw;
|
||||
dc->hwseq->funcs.init_pipes = NULL;
|
||||
}
|
||||
if (dc->debug.disable_z10) {
|
||||
/*hw not support z10 or sw disable it*/
|
||||
dc->hwss.z10_restore = NULL;
|
||||
dc->hwss.z10_save_init = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
/*
|
||||
* TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
|
||||
* the workaround which always reset the asic in suspend.
|
||||
* It's likely that workaround will be dropped in the future.
|
||||
* Then the change here should be dropped together.
|
||||
*/
|
||||
bool use_baco = !smu->is_apu &&
|
||||
(((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
|
||||
((amdgpu_in_reset(adev) &&
|
||||
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
|
||||
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
|
||||
|
||||
|
|
|
@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma)
|
|||
|
||||
static void __i915_vma_remove_closed(struct i915_vma *vma)
|
||||
{
|
||||
struct intel_gt *gt = vma->vm->gt;
|
||||
|
||||
spin_lock_irq(>->closed_lock);
|
||||
list_del_init(&vma->closed_link);
|
||||
spin_unlock_irq(>->closed_lock);
|
||||
}
|
||||
|
||||
void i915_vma_reopen(struct i915_vma *vma)
|
||||
{
|
||||
struct intel_gt *gt = vma->vm->gt;
|
||||
|
||||
spin_lock_irq(>->closed_lock);
|
||||
if (i915_vma_is_closed(vma))
|
||||
__i915_vma_remove_closed(vma);
|
||||
spin_unlock_irq(>->closed_lock);
|
||||
}
|
||||
|
||||
void i915_vma_release(struct kref *ref)
|
||||
|
@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma)
|
|||
static void release_references(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct intel_gt *gt = vma->vm->gt;
|
||||
|
||||
GEM_BUG_ON(i915_vma_is_active(vma));
|
||||
|
||||
|
@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma)
|
|||
rb_erase(&vma->obj_node, &obj->vma.tree);
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
spin_lock_irq(>->closed_lock);
|
||||
__i915_vma_remove_closed(vma);
|
||||
spin_unlock_irq(>->closed_lock);
|
||||
|
||||
__i915_vma_put(vma);
|
||||
}
|
||||
|
|
|
@ -46,8 +46,9 @@ static bool
|
|||
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
|
||||
struct nouveau_backlight *bl)
|
||||
{
|
||||
const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
|
||||
if (nb < 0 || nb >= 100)
|
||||
const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
|
||||
|
||||
if (nb < 0)
|
||||
return false;
|
||||
if (nb > 0)
|
||||
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
|
||||
|
@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
|
|||
nv_encoder, ops, &props);
|
||||
if (IS_ERR(bl->dev)) {
|
||||
if (bl->id >= 0)
|
||||
ida_simple_remove(&bl_ida, bl->id);
|
||||
ida_free(&bl_ida, bl->id);
|
||||
ret = PTR_ERR(bl->dev);
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
|
|||
return;
|
||||
|
||||
if (bl->id >= 0)
|
||||
ida_simple_remove(&bl_ida, bl->id);
|
||||
ida_free(&bl_ida, bl->id);
|
||||
|
||||
backlight_device_unregister(bl->dev);
|
||||
nv_conn->backlight = NULL;
|
||||
|
|
|
@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
|
|||
|
||||
mutex_init(&tdev->iommu.mutex);
|
||||
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
if (device_iommu_mapped(dev)) {
|
||||
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!tdev->iommu.domain)
|
||||
goto error;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <drm/drm_scdc_helper.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_gpio.h>
|
||||
|
|
|
@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
|||
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
|
||||
} while (*seqno == 0);
|
||||
|
||||
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
|
||||
if (!vmw_has_fences(dev_priv)) {
|
||||
|
||||
/*
|
||||
* Don't request hardware to send a fence. The
|
||||
|
@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
|
|||
*/
|
||||
bool vmw_cmd_supported(struct vmw_private *vmw)
|
||||
{
|
||||
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
|
||||
SVGA_CAP_CMD_BUFFERS_2)) != 0)
|
||||
return true;
|
||||
bool has_cmdbufs =
|
||||
(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
|
||||
SVGA_CAP_CMD_BUFFERS_2)) != 0;
|
||||
if (vmw_is_svga_v3(vmw))
|
||||
return (has_cmdbufs &&
|
||||
(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
|
||||
/*
|
||||
* We have FIFO cmd's
|
||||
*/
|
||||
return vmw->fifo_mem != NULL;
|
||||
return has_cmdbufs || vmw->fifo_mem != NULL;
|
||||
}
|
||||
|
|
|
@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
|
|||
outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
static inline bool vmw_has_fences(struct vmw_private *vmw)
|
||||
{
|
||||
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
|
||||
SVGA_CAP_CMD_BUFFERS_2)) != 0)
|
||||
return true;
|
||||
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
|
|||
|
||||
static int vmw_fb_kms_framebuffer(struct fb_info *info)
|
||||
{
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct fb_var_screeninfo *var = &info->var;
|
||||
struct drm_framebuffer *cur_fb;
|
||||
|
|
|
@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
|
|||
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
|
||||
}
|
||||
|
||||
static u32 vmw_fence_goal_read(struct vmw_private *vmw)
|
||||
{
|
||||
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
|
||||
return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
|
||||
else
|
||||
return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
|
||||
}
|
||||
|
||||
static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
|
||||
{
|
||||
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
|
||||
vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
|
||||
else
|
||||
vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note on fencing subsystem usage of irqs:
|
||||
* Typically the vmw_fences_update function is called
|
||||
|
@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
|||
if (likely(!fman->seqno_valid))
|
||||
return false;
|
||||
|
||||
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
|
||||
goal_seqno = vmw_fence_goal_read(fman->dev_priv);
|
||||
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
|
||||
return false;
|
||||
|
||||
|
@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
|||
list_for_each_entry(fence, &fman->fence_list, head) {
|
||||
if (!list_empty(&fence->seq_passed_actions)) {
|
||||
fman->seqno_valid = true;
|
||||
vmw_fifo_mem_write(fman->dev_priv,
|
||||
SVGA_FIFO_FENCE_GOAL,
|
||||
fence->base.seqno);
|
||||
vmw_fence_goal_write(fman->dev_priv,
|
||||
fence->base.seqno);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
|
|||
if (dma_fence_is_signaled_locked(&fence->base))
|
||||
return false;
|
||||
|
||||
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
|
||||
goal_seqno = vmw_fence_goal_read(fman->dev_priv);
|
||||
if (likely(fman->seqno_valid &&
|
||||
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
|
||||
return false;
|
||||
|
||||
vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
|
||||
fence->base.seqno);
|
||||
vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
|
||||
fman->seqno_valid = true;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -32,6 +32,14 @@
|
|||
|
||||
#define VMW_FENCE_WRAP (1 << 24)
|
||||
|
||||
static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
|
||||
{
|
||||
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
|
||||
return SVGA_IRQFLAG_REG_FENCE_GOAL;
|
||||
else
|
||||
return SVGA_IRQFLAG_FENCE_GOAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_thread_fn - Deferred (process context) irq handler
|
||||
*
|
||||
|
@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
|
|||
wake_up_all(&dev_priv->fifo_queue);
|
||||
|
||||
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
|
||||
SVGA_IRQFLAG_FENCE_GOAL)) &&
|
||||
vmw_irqflag_fence_goal(dev_priv))) &&
|
||||
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
|
||||
|
@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
|||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
|
||||
vmw_fifo_idle(dev_priv, seqno))
|
||||
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
|
||||
return true;
|
||||
|
||||
/**
|
||||
|
@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
unsigned long timeout)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
|
||||
bool fifo_down = false;
|
||||
|
||||
uint32_t count = 0;
|
||||
uint32_t signal_seq;
|
||||
|
@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
*/
|
||||
|
||||
if (fifo_idle) {
|
||||
down_read(&fifo_state->rwsem);
|
||||
if (dev_priv->cman) {
|
||||
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
|
||||
10*HZ);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
} else if (fifo_state) {
|
||||
down_read(&fifo_state->rwsem);
|
||||
fifo_down = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|||
}
|
||||
}
|
||||
finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle)
|
||||
if (ret == 0 && fifo_idle && fifo_state)
|
||||
vmw_fence_write(dev_priv, signal_seq);
|
||||
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
out_err:
|
||||
if (fifo_idle)
|
||||
if (fifo_down)
|
||||
up_read(&fifo_state->rwsem);
|
||||
|
||||
return ret;
|
||||
|
@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
|
|||
|
||||
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
|
||||
vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
|
||||
&dev_priv->goal_queue_waiters);
|
||||
}
|
||||
|
||||
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
|
||||
vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
|
||||
&dev_priv->goal_queue_waiters);
|
||||
}
|
||||
|
||||
|
|
|
@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
|
|||
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
|
||||
mode_cmd,
|
||||
is_bo_proxy);
|
||||
|
||||
/*
|
||||
* vmw_create_bo_proxy() adds a reference that is no longer
|
||||
* needed
|
||||
|
@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
|||
ret = vmw_user_lookup_handle(dev_priv, file_priv,
|
||||
mode_cmd->handles[0],
|
||||
&surface, &bo);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
|
||||
mode_cmd->handles[0], mode_cmd->handles[0]);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
|
||||
if (!bo &&
|
||||
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
|
||||
DRM_ERROR("Surface size cannot exceed %dx%d",
|
||||
DRM_ERROR("Surface size cannot exceed %dx%d\n",
|
||||
dev_priv->texture_max_width,
|
||||
dev_priv->texture_max_height);
|
||||
goto err_out;
|
||||
|
|
|
@ -960,7 +960,7 @@ config SENSORS_LTC4261
|
|||
|
||||
config SENSORS_LTQ_CPUTEMP
|
||||
bool "Lantiq cpu temperature sensor driver"
|
||||
depends on LANTIQ
|
||||
depends on SOC_XWAY
|
||||
help
|
||||
If you say yes here you get support for the temperature
|
||||
sensor inside your CPU.
|
||||
|
|
|
@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
|
||||
{ .compatible = "ti,tmp401", },
|
||||
{ .compatible = "ti,tmp411", },
|
||||
{ .compatible = "ti,tmp431", },
|
||||
{ .compatible = "ti,tmp432", },
|
||||
{ .compatible = "ti,tmp435", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
|
||||
|
||||
static struct i2c_driver tmp401_driver = {
|
||||
.class = I2C_CLASS_HWMON,
|
||||
.driver = {
|
||||
.name = "tmp401",
|
||||
.of_match_table = of_match_ptr(tmp4xx_of_match),
|
||||
},
|
||||
.probe_new = tmp401_probe,
|
||||
.id_table = tmp401_id,
|
||||
|
|
|
@ -1087,9 +1087,15 @@ static int of_count_icc_providers(struct device_node *np)
|
|||
{
|
||||
struct device_node *child;
|
||||
int count = 0;
|
||||
const struct of_device_id __maybe_unused ignore_list[] = {
|
||||
{ .compatible = "qcom,sc7180-ipa-virt" },
|
||||
{ .compatible = "qcom,sdx55-ipa-virt" },
|
||||
{}
|
||||
};
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
if (of_property_read_bool(child, "#interconnect-cells"))
|
||||
if (of_property_read_bool(child, "#interconnect-cells") &&
|
||||
likely(!of_match_node(ignore_list, child)))
|
||||
count++;
|
||||
count += of_count_icc_providers(child);
|
||||
}
|
||||
|
|
|
@ -809,6 +809,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
|
|||
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
|
||||
u32 reg, offset;
|
||||
|
||||
if (priv->wol_ports_mask & BIT(port))
|
||||
return;
|
||||
|
||||
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
|
||||
if (priv->type == BCM4908_DEVICE_ID ||
|
||||
priv->type == BCM7445_DEVICE_ID)
|
||||
|
|
|
@ -403,6 +403,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
|
|||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
struct felix *felix = ocelot_to_felix(ocelot);
|
||||
struct ocelot_vcap_block *block_vcap_is2;
|
||||
struct ocelot_vcap_filter *trap;
|
||||
enum ocelot_mask_mode mask_mode;
|
||||
unsigned long port_mask;
|
||||
|
@ -422,9 +423,13 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
|
|||
/* We are sure that "cpu" was found, otherwise
|
||||
* dsa_tree_setup_default_cpu() would have failed earlier.
|
||||
*/
|
||||
block_vcap_is2 = &ocelot->block[VCAP_IS2];
|
||||
|
||||
/* Make sure all traps are set up for that destination */
|
||||
list_for_each_entry(trap, &ocelot->traps, trap_list) {
|
||||
list_for_each_entry(trap, &block_vcap_is2->rules, list) {
|
||||
if (!trap->is_trap)
|
||||
continue;
|
||||
|
||||
/* Figure out the current trapping destination */
|
||||
if (using_tag_8021q) {
|
||||
/* Redirect to the tag_8021q CPU port. If timestamps
|
||||
|
|
|
@ -449,7 +449,7 @@ static int aq_pm_freeze(struct device *dev)
|
|||
|
||||
static int aq_pm_suspend_poweroff(struct device *dev)
|
||||
{
|
||||
return aq_suspend_common(dev, false);
|
||||
return aq_suspend_common(dev, true);
|
||||
}
|
||||
|
||||
static int aq_pm_thaw(struct device *dev)
|
||||
|
@ -459,7 +459,7 @@ static int aq_pm_thaw(struct device *dev)
|
|||
|
||||
static int aq_pm_resume_restore(struct device *dev)
|
||||
{
|
||||
return atl_resume_common(dev, false);
|
||||
return atl_resume_common(dev, true);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops aq_pm_ops = {
|
||||
|
|
|
@ -346,7 +346,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
int budget)
|
||||
{
|
||||
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
|
||||
bool is_rsc_completed = true;
|
||||
int err = 0;
|
||||
|
||||
for (; (self->sw_head != self->hw_head) && budget;
|
||||
|
@ -364,12 +363,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
continue;
|
||||
|
||||
if (!buff->is_eop) {
|
||||
unsigned int frag_cnt = 0U;
|
||||
buff_ = buff;
|
||||
do {
|
||||
bool is_rsc_completed = true;
|
||||
|
||||
if (buff_->next >= self->size) {
|
||||
err = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
frag_cnt++;
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
is_rsc_completed =
|
||||
|
@ -377,18 +381,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
next_,
|
||||
self->hw_head);
|
||||
|
||||
if (unlikely(!is_rsc_completed))
|
||||
break;
|
||||
if (unlikely(!is_rsc_completed) ||
|
||||
frag_cnt > MAX_SKB_FRAGS) {
|
||||
err = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
buff->is_error |= buff_->is_error;
|
||||
buff->is_cso_err |= buff_->is_cso_err;
|
||||
|
||||
} while (!buff_->is_eop);
|
||||
|
||||
if (!is_rsc_completed) {
|
||||
err = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
if (buff->is_error ||
|
||||
(buff->is_lro && buff->is_cso_err)) {
|
||||
buff_ = buff;
|
||||
|
@ -446,7 +449,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
ALIGN(hdr_len, sizeof(long)));
|
||||
|
||||
if (buff->len - hdr_len > 0) {
|
||||
skb_add_rx_frag(skb, 0, buff->rxdata.page,
|
||||
skb_add_rx_frag(skb, i++, buff->rxdata.page,
|
||||
buff->rxdata.pg_off + hdr_len,
|
||||
buff->len - hdr_len,
|
||||
AQ_CFG_RX_FRAME_MAX);
|
||||
|
@ -455,7 +458,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
|
||||
if (!buff->is_eop) {
|
||||
buff_ = buff;
|
||||
i = 1U;
|
||||
do {
|
||||
next_ = buff_->next;
|
||||
buff_ = &self->buff_ring[next_];
|
||||
|
|
|
@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
|
|||
err = -ENXIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* Validate that the new hw_head_ is reasonable. */
|
||||
if (hw_head_ >= ring->size) {
|
||||
err = -ENXIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
ring->hw_head = hw_head_;
|
||||
err = aq_hw_err_from_flags(self);
|
||||
|
||||
|
|
|
@ -3999,6 +3999,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
|||
goto err;
|
||||
}
|
||||
priv->wol_irq = platform_get_irq_optional(pdev, 2);
|
||||
if (priv->wol_irq == -EPROBE_DEFER) {
|
||||
err = priv->wol_irq;
|
||||
goto err;
|
||||
}
|
||||
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
|
|
|
@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
|||
goto out;
|
||||
na = ret;
|
||||
|
||||
memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
|
||||
memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
|
||||
strim(p->id);
|
||||
memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
|
||||
memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
|
||||
strim(p->sn);
|
||||
memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
|
||||
memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
|
||||
strim(p->pn);
|
||||
memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
|
||||
strim((char *)p->na);
|
||||
memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
|
||||
strim(p->na);
|
||||
|
||||
out:
|
||||
vfree(vpd);
|
||||
|
|
|
@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
/* alloc_etherdev ensures aligned and zeroed private structures */
|
||||
dev = alloc_etherdev (sizeof (*tp));
|
||||
if (!dev)
|
||||
if (!dev) {
|
||||
pci_disable_device(pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
|
||||
|
@ -1785,6 +1787,7 @@ err_out_free_res:
|
|||
|
||||
err_out_free_netdev:
|
||||
free_netdev (dev);
|
||||
pci_disable_device(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -7549,42 +7549,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
|
|||
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
|
||||
struct i40e_fwd_adapter *fwd)
|
||||
{
|
||||
struct i40e_channel *ch = NULL, *ch_tmp, *iter;
|
||||
int ret = 0, num_tc = 1, i, aq_err;
|
||||
struct i40e_channel *ch, *ch_tmp;
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
|
||||
if (list_empty(&vsi->macvlan_list))
|
||||
return -EINVAL;
|
||||
|
||||
/* Go through the list and find an available channel */
|
||||
list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
|
||||
if (!i40e_is_channel_macvlan(ch)) {
|
||||
ch->fwd = fwd;
|
||||
list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
|
||||
if (!i40e_is_channel_macvlan(iter)) {
|
||||
iter->fwd = fwd;
|
||||
/* record configuration for macvlan interface in vdev */
|
||||
for (i = 0; i < num_tc; i++)
|
||||
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
|
||||
i,
|
||||
ch->num_queue_pairs,
|
||||
ch->base_queue);
|
||||
for (i = 0; i < ch->num_queue_pairs; i++) {
|
||||
iter->num_queue_pairs,
|
||||
iter->base_queue);
|
||||
for (i = 0; i < iter->num_queue_pairs; i++) {
|
||||
struct i40e_ring *tx_ring, *rx_ring;
|
||||
u16 pf_q;
|
||||
|
||||
pf_q = ch->base_queue + i;
|
||||
pf_q = iter->base_queue + i;
|
||||
|
||||
/* Get to TX ring ptr */
|
||||
tx_ring = vsi->tx_rings[pf_q];
|
||||
tx_ring->ch = ch;
|
||||
tx_ring->ch = iter;
|
||||
|
||||
/* Get the RX ring ptr */
|
||||
rx_ring = vsi->rx_rings[pf_q];
|
||||
rx_ring->ch = ch;
|
||||
rx_ring->ch = iter;
|
||||
}
|
||||
ch = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ch)
|
||||
return -EINVAL;
|
||||
|
||||
/* Guarantee all rings are updated before we update the
|
||||
* MAC address filter.
|
||||
*/
|
||||
|
|
|
@ -540,6 +540,7 @@ struct ice_pf {
|
|||
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
|
||||
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
|
||||
struct mutex tc_mutex; /* lock to protect TC changes */
|
||||
struct mutex adev_mutex; /* lock to protect aux device access */
|
||||
u32 msg_enable;
|
||||
struct ice_ptp ptp;
|
||||
struct tty_driver *ice_gnss_tty_driver;
|
||||
|
|
|
@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
|
|||
if (WARN_ON_ONCE(!in_task()))
|
||||
return;
|
||||
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
if (!pf->adev)
|
||||
return;
|
||||
goto finish;
|
||||
|
||||
device_lock(&pf->adev->dev);
|
||||
iadrv = ice_get_auxiliary_drv(pf);
|
||||
if (iadrv && iadrv->event_handler)
|
||||
iadrv->event_handler(pf, event);
|
||||
device_unlock(&pf->adev->dev);
|
||||
finish:
|
||||
mutex_unlock(&pf->adev_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -290,7 +293,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
|
|||
return -ENOMEM;
|
||||
|
||||
adev = &iadev->adev;
|
||||
pf->adev = adev;
|
||||
iadev->pf = pf;
|
||||
|
||||
adev->id = pf->aux_idx;
|
||||
|
@ -300,18 +302,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
|
|||
|
||||
ret = auxiliary_device_init(adev);
|
||||
if (ret) {
|
||||
pf->adev = NULL;
|
||||
kfree(iadev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = auxiliary_device_add(adev);
|
||||
if (ret) {
|
||||
pf->adev = NULL;
|
||||
auxiliary_device_uninit(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
pf->adev = adev;
|
||||
mutex_unlock(&pf->adev_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -320,12 +324,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
|
|||
*/
|
||||
void ice_unplug_aux_dev(struct ice_pf *pf)
|
||||
{
|
||||
if (!pf->adev)
|
||||
return;
|
||||
struct auxiliary_device *adev;
|
||||
|
||||
auxiliary_device_delete(pf->adev);
|
||||
auxiliary_device_uninit(pf->adev);
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
adev = pf->adev;
|
||||
pf->adev = NULL;
|
||||
mutex_unlock(&pf->adev_mutex);
|
||||
|
||||
if (adev) {
|
||||
auxiliary_device_delete(adev);
|
||||
auxiliary_device_uninit(adev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3769,6 +3769,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
|
|||
static void ice_deinit_pf(struct ice_pf *pf)
|
||||
{
|
||||
ice_service_task_stop(pf);
|
||||
mutex_destroy(&pf->adev_mutex);
|
||||
mutex_destroy(&pf->sw_mutex);
|
||||
mutex_destroy(&pf->tc_mutex);
|
||||
mutex_destroy(&pf->avail_q_mutex);
|
||||
|
@ -3847,6 +3848,7 @@ static int ice_init_pf(struct ice_pf *pf)
|
|||
|
||||
mutex_init(&pf->sw_mutex);
|
||||
mutex_init(&pf->tc_mutex);
|
||||
mutex_init(&pf->adev_mutex);
|
||||
|
||||
INIT_HLIST_HEAD(&pf->aq_wait_list);
|
||||
spin_lock_init(&pf->aq_wait_lock);
|
||||
|
|
|
@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|||
|
||||
/**
|
||||
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
|
||||
* @hw: pointer to the hw struct
|
||||
* @tx: PTP Tx tracker to clean up
|
||||
*
|
||||
* Loop through the Tx timestamp requests and see if any of them have been
|
||||
|
@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|||
* timestamp will never be captured. This might happen if the packet gets
|
||||
* discarded before it reaches the PHY timestamping block.
|
||||
*/
|
||||
static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
|
||||
static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
|
||||
{
|
||||
u8 idx;
|
||||
|
||||
|
@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
|
|||
|
||||
for_each_set_bit(idx, tx->in_use, tx->len) {
|
||||
struct sk_buff *skb;
|
||||
u64 raw_tstamp;
|
||||
|
||||
/* Check if this SKB has been waiting for too long */
|
||||
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
|
||||
continue;
|
||||
|
||||
/* Read tstamp to be able to use this register again */
|
||||
ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
|
||||
&raw_tstamp);
|
||||
|
||||
spin_lock(&tx->lock);
|
||||
skb = tx->tstamps[idx].skb;
|
||||
tx->tstamps[idx].skb = NULL;
|
||||
|
@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
|
|||
|
||||
ice_ptp_update_cached_phctime(pf);
|
||||
|
||||
ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
|
||||
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
|
||||
|
||||
/* Run twice a second */
|
||||
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
|
||||
|
|
|
@ -1307,13 +1307,52 @@ error_param:
|
|||
NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vf_vsi_dis_single_txq - disable a single Tx queue
|
||||
* @vf: VF to disable queue for
|
||||
* @vsi: VSI for the VF
|
||||
* @q_id: VF relative (0-based) queue ID
|
||||
*
|
||||
* Attempt to disable the Tx queue passed in. If the Tx queue was successfully
|
||||
* disabled then clear q_id bit in the enabled queues bitmap and return
|
||||
* success. Otherwise return error.
|
||||
*/
|
||||
static int
|
||||
ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
|
||||
{
|
||||
struct ice_txq_meta txq_meta = { 0 };
|
||||
struct ice_tx_ring *ring;
|
||||
int err;
|
||||
|
||||
if (!test_bit(q_id, vf->txq_ena))
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
|
||||
q_id, vsi->vsi_num);
|
||||
|
||||
ring = vsi->tx_rings[q_id];
|
||||
if (!ring)
|
||||
return -EINVAL;
|
||||
|
||||
ice_fill_txq_meta(vsi, ring, &txq_meta);
|
||||
|
||||
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
|
||||
if (err) {
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
|
||||
q_id, vsi->vsi_num);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Clear enabled queues flag */
|
||||
clear_bit(q_id, vf->txq_ena);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vc_dis_qs_msg
|
||||
* @vf: pointer to the VF info
|
||||
* @msg: pointer to the msg buffer
|
||||
*
|
||||
* called from the VF to disable all or specific
|
||||
* queue(s)
|
||||
* called from the VF to disable all or specific queue(s)
|
||||
*/
|
||||
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
|
@ -1350,30 +1389,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
|||
q_map = vqs->tx_queues;
|
||||
|
||||
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
|
||||
struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
|
||||
struct ice_txq_meta txq_meta = { 0 };
|
||||
|
||||
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!test_bit(vf_q_id, vf->txq_ena))
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
|
||||
vf_q_id, vsi->vsi_num);
|
||||
|
||||
ice_fill_txq_meta(vsi, ring, &txq_meta);
|
||||
|
||||
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
|
||||
ring, &txq_meta)) {
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
|
||||
vf_q_id, vsi->vsi_num);
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
/* Clear enabled queues flag */
|
||||
clear_bit(vf_q_id, vf->txq_ena);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1622,6 +1646,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
|||
if (qpi->txq.ring_len > 0) {
|
||||
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
|
||||
vsi->tx_rings[i]->count = qpi->txq.ring_len;
|
||||
|
||||
/* Disable any existing queue first */
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
/* Configure a queue with the requested settings */
|
||||
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
|
|
|
@ -395,7 +395,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
|
|||
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
|
||||
int i, k;
|
||||
|
||||
memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
|
||||
memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SOC_MT7621))
|
||||
return;
|
||||
|
|
|
@ -568,10 +568,8 @@ static int
|
|||
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_ipip_entry *ipip_entry)
|
||||
{
|
||||
struct __ip6_tnl_parm parms6;
|
||||
|
||||
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
|
||||
return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
|
||||
return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
|
||||
&ipip_entry->parms.daddr.addr6,
|
||||
&ipip_entry->dip_kvdl_index);
|
||||
}
|
||||
|
||||
|
@ -579,10 +577,7 @@ static void
|
|||
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct mlxsw_sp_ipip_entry *ipip_entry)
|
||||
{
|
||||
struct __ip6_tnl_parm parms6;
|
||||
|
||||
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
|
||||
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
|
||||
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
|
||||
}
|
||||
|
||||
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
|
||||
|
|
|
@ -1622,7 +1622,7 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
|
|||
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
|
||||
trap->action.port_mask = 0;
|
||||
trap->take_ts = take_ts;
|
||||
list_add_tail(&trap->trap_list, &ocelot->traps);
|
||||
trap->is_trap = true;
|
||||
new = true;
|
||||
}
|
||||
|
||||
|
@ -1634,10 +1634,8 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
|
|||
err = ocelot_vcap_filter_replace(ocelot, trap);
|
||||
if (err) {
|
||||
trap->ingress_port_mask &= ~BIT(port);
|
||||
if (!trap->ingress_port_mask) {
|
||||
list_del(&trap->trap_list);
|
||||
if (!trap->ingress_port_mask)
|
||||
kfree(trap);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1657,11 +1655,8 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
|
|||
return 0;
|
||||
|
||||
trap->ingress_port_mask &= ~BIT(port);
|
||||
if (!trap->ingress_port_mask) {
|
||||
list_del(&trap->trap_list);
|
||||
|
||||
if (!trap->ingress_port_mask)
|
||||
return ocelot_vcap_filter_del(ocelot, trap);
|
||||
}
|
||||
|
||||
return ocelot_vcap_filter_replace(ocelot, trap);
|
||||
}
|
||||
|
|
|
@ -280,9 +280,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
|
|||
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
|
||||
break;
|
||||
case FLOW_ACTION_TRAP:
|
||||
if (filter->block_id != VCAP_IS2) {
|
||||
if (filter->block_id != VCAP_IS2 ||
|
||||
filter->lookup != 0) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Trap action can only be offloaded to VCAP IS2");
|
||||
"Trap action can only be offloaded to VCAP IS2 lookup 0");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (filter->goto_target != -1) {
|
||||
|
@ -295,7 +296,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
|
|||
filter->action.cpu_copy_ena = true;
|
||||
filter->action.cpu_qu_num = 0;
|
||||
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
|
||||
list_add_tail(&filter->trap_list, &ocelot->traps);
|
||||
filter->is_trap = true;
|
||||
break;
|
||||
case FLOW_ACTION_POLICE:
|
||||
if (filter->block_id == PSFP_BLOCK_ID) {
|
||||
|
@ -878,8 +879,6 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
|
|||
|
||||
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
|
||||
if (ret) {
|
||||
if (!list_empty(&filter->trap_list))
|
||||
list_del(&filter->trap_list);
|
||||
kfree(filter);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -374,7 +374,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
|
|||
OCELOT_VCAP_BIT_0);
|
||||
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
|
||||
~filter->ingress_port_mask);
|
||||
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
|
||||
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
|
||||
OCELOT_VCAP_BIT_ANY);
|
||||
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
|
||||
|
@ -1217,6 +1216,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
|
|||
struct ocelot_vcap_filter *tmp;
|
||||
|
||||
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
|
||||
/* Read back the filter's counters before moving it */
|
||||
vcap_entry_get(ocelot, i - 1, tmp);
|
||||
vcap_entry_set(ocelot, i, tmp);
|
||||
}
|
||||
|
||||
|
@ -1250,7 +1251,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
|
|||
struct ocelot_vcap_filter del_filter;
|
||||
int i, index;
|
||||
|
||||
/* Need to inherit the block_id so that vcap_entry_set()
|
||||
* does not get confused and knows where to install it.
|
||||
*/
|
||||
memset(&del_filter, 0, sizeof(del_filter));
|
||||
del_filter.block_id = filter->block_id;
|
||||
|
||||
/* Gets index of the filter */
|
||||
index = ocelot_vcap_block_get_filter_index(block, filter);
|
||||
|
@ -1265,6 +1270,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
|
|||
struct ocelot_vcap_filter *tmp;
|
||||
|
||||
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
|
||||
/* Read back the filter's counters before moving it */
|
||||
vcap_entry_get(ocelot, i + 1, tmp);
|
||||
vcap_entry_set(ocelot, i, tmp);
|
||||
}
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err = ionic_map_bars(ionic);
|
||||
if (err)
|
||||
goto err_out_pci_disable_device;
|
||||
goto err_out_pci_release_regions;
|
||||
|
||||
/* Configure the device */
|
||||
err = ionic_setup(ionic);
|
||||
|
@ -360,6 +360,7 @@ err_out_teardown:
|
|||
|
||||
err_out_unmap_bars:
|
||||
ionic_unmap_bars(ionic);
|
||||
err_out_pci_release_regions:
|
||||
pci_release_regions(pdev);
|
||||
err_out_pci_disable_device:
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -3579,6 +3579,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
|
|||
n_parts++;
|
||||
}
|
||||
|
||||
if (!n_parts) {
|
||||
kfree(parts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
|
||||
fail:
|
||||
if (rc)
|
||||
|
|
|
@ -867,7 +867,9 @@ static void efx_set_xdp_channels(struct efx_nic *efx)
|
|||
|
||||
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
||||
{
|
||||
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
|
||||
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
|
||||
*ptp_channel = efx_ptp_channel(efx);
|
||||
struct efx_ptp_data *ptp_data = efx->ptp_data;
|
||||
unsigned int i, next_buffer_table = 0;
|
||||
u32 old_rxq_entries, old_txq_entries;
|
||||
int rc, rc2;
|
||||
|
@ -938,6 +940,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
|||
|
||||
efx_set_xdp_channels(efx);
|
||||
out:
|
||||
efx->ptp_data = NULL;
|
||||
/* Destroy unused channel structures */
|
||||
for (i = 0; i < efx->n_channels; i++) {
|
||||
channel = other_channel[i];
|
||||
|
@ -948,6 +951,7 @@ out:
|
|||
}
|
||||
}
|
||||
|
||||
efx->ptp_data = ptp_data;
|
||||
rc2 = efx_soft_enable_interrupts(efx);
|
||||
if (rc2) {
|
||||
rc = rc ? rc : rc2;
|
||||
|
@ -966,6 +970,7 @@ rollback:
|
|||
efx->txq_entries = old_txq_entries;
|
||||
for (i = 0; i < efx->n_channels; i++)
|
||||
swap(efx->channel[i], other_channel[i]);
|
||||
efx_ptp_update_channel(efx, ptp_channel);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include "farch_regs.h"
|
||||
#include "tx.h"
|
||||
#include "nic.h" /* indirectly includes ptp.h */
|
||||
#include "efx_channels.h"
|
||||
|
||||
/* Maximum number of events expected to make up a PTP event */
|
||||
#define MAX_EVENT_FRAGS 3
|
||||
|
@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
|
|||
return efx->ptp_data ? efx->ptp_data->channel : NULL;
|
||||
}
|
||||
|
||||
void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
|
||||
{
|
||||
if (efx->ptp_data)
|
||||
efx->ptp_data->channel = channel;
|
||||
}
|
||||
|
||||
static u32 last_sync_timestamp_major(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel = efx_ptp_channel(efx);
|
||||
|
@ -1443,6 +1450,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
|
|||
int rc = 0;
|
||||
unsigned int pos;
|
||||
|
||||
if (efx->ptp_data) {
|
||||
efx->ptp_data->channel = channel;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
|
||||
efx->ptp_data = ptp;
|
||||
if (!efx->ptp_data)
|
||||
|
@ -2176,7 +2188,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
|
|||
.pre_probe = efx_ptp_probe_channel,
|
||||
.post_remove = efx_ptp_remove_channel,
|
||||
.get_name = efx_ptp_get_channel_name,
|
||||
/* no copy operation; there is no need to reallocate this channel */
|
||||
.copy = efx_copy_channel,
|
||||
.receive_skb = efx_ptp_rx,
|
||||
.want_txqs = efx_ptp_want_txqs,
|
||||
.keep_eventq = false,
|
||||
|
|
|
@ -16,6 +16,7 @@ struct ethtool_ts_info;
|
|||
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
|
||||
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
|
||||
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
|
||||
void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
|
||||
void efx_ptp_remove(struct efx_nic *efx);
|
||||
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
|
|
|
@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
|||
return -ENOMEM;
|
||||
|
||||
/* Enable pci device */
|
||||
ret = pci_enable_device(pdev);
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
|
||||
__func__);
|
||||
|
@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
|
|||
pcim_iounmap_regions(pdev, BIT(i));
|
||||
break;
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
|
||||
|
|
|
@ -1743,7 +1743,7 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
|
|||
|
||||
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
|
||||
{
|
||||
u32 data;
|
||||
int data;
|
||||
|
||||
phy_lock_mdio_bus(phydev);
|
||||
__phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
|
||||
|
@ -2444,8 +2444,7 @@ static int lan8804_config_init(struct phy_device *phydev)
|
|||
|
||||
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
|
||||
{
|
||||
u16 tsu_irq_status;
|
||||
int irq_status;
|
||||
int irq_status, tsu_irq_status;
|
||||
|
||||
irq_status = phy_read(phydev, LAN8814_INTS);
|
||||
if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
|
||||
|
@ -2657,6 +2656,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.name = "Micrel KS8737",
|
||||
/* PHY_BASIC_FEATURES */
|
||||
.driver_data = &ks8737_type,
|
||||
.probe = kszphy_probe,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_intr = kszphy_config_intr,
|
||||
.handle_interrupt = kszphy_handle_interrupt,
|
||||
|
@ -2782,8 +2782,8 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.config_init = ksz8061_config_init,
|
||||
.config_intr = kszphy_config_intr,
|
||||
.handle_interrupt = kszphy_handle_interrupt,
|
||||
.suspend = kszphy_suspend,
|
||||
.resume = kszphy_resume,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ9021,
|
||||
.phy_id_mask = 0x000ffffe,
|
||||
|
|
|
@ -970,8 +970,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
|
|||
{
|
||||
struct phy_device *phydev = phy_dat;
|
||||
struct phy_driver *drv = phydev->drv;
|
||||
irqreturn_t ret;
|
||||
|
||||
return drv->handle_interrupt(phydev);
|
||||
mutex_lock(&phydev->lock);
|
||||
ret = drv->handle_interrupt(phydev);
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1288,6 +1288,7 @@ static void ath11k_core_restart(struct work_struct *work)
|
|||
|
||||
ieee80211_stop_queues(ar->hw);
|
||||
ath11k_mac_drain_tx(ar);
|
||||
complete(&ar->completed_11d_scan);
|
||||
complete(&ar->scan.started);
|
||||
complete(&ar->scan.completed);
|
||||
complete(&ar->peer_assoc_done);
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
|
||||
extern unsigned int ath11k_frame_mode;
|
||||
|
||||
#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
|
||||
|
||||
#define ATH11K_MON_TIMER_INTERVAL 10
|
||||
|
||||
enum ath11k_supported_bw {
|
||||
|
@ -189,6 +191,12 @@ enum ath11k_scan_state {
|
|||
ATH11K_SCAN_ABORTING,
|
||||
};
|
||||
|
||||
enum ath11k_11d_state {
|
||||
ATH11K_11D_IDLE,
|
||||
ATH11K_11D_PREPARING,
|
||||
ATH11K_11D_RUNNING,
|
||||
};
|
||||
|
||||
enum ath11k_dev_flags {
|
||||
ATH11K_CAC_RUNNING,
|
||||
ATH11K_FLAG_CORE_REGISTERED,
|
||||
|
@ -607,9 +615,8 @@ struct ath11k {
|
|||
bool dfs_block_radar_events;
|
||||
struct ath11k_thermal thermal;
|
||||
u32 vdev_id_11d_scan;
|
||||
struct completion finish_11d_scan;
|
||||
struct completion finish_11d_ch_list;
|
||||
bool pending_11d;
|
||||
struct completion completed_11d_scan;
|
||||
enum ath11k_11d_state state_11d;
|
||||
bool regdom_set_by_user;
|
||||
int hw_rate_code;
|
||||
u8 twt_enabled;
|
||||
|
|
|
@ -3601,26 +3601,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
|
|||
if (ret)
|
||||
goto exit;
|
||||
|
||||
/* Currently the pending_11d=true only happened 1 time while
|
||||
* wlan interface up in ath11k_mac_11d_scan_start(), it is called by
|
||||
* ath11k_mac_op_add_interface(), after wlan interface up,
|
||||
* pending_11d=false always.
|
||||
* If remove below wait, it always happened scan fail and lead connect
|
||||
* fail while wlan interface up, because it has a 11d scan which is running
|
||||
* in firmware, and lead this scan failed.
|
||||
*/
|
||||
if (ar->pending_11d) {
|
||||
long time_left;
|
||||
unsigned long timeout = 5 * HZ;
|
||||
|
||||
if (ar->supports_6ghz)
|
||||
timeout += 5 * HZ;
|
||||
|
||||
time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
|
||||
"mac wait 11d channel list time left %ld\n", time_left);
|
||||
}
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
ath11k_wmi_start_scan_init(ar, &arg);
|
||||
arg.vdev_id = arvif->vdev_id;
|
||||
|
@ -3686,6 +3666,10 @@ exit:
|
|||
kfree(arg.extraie.ptr);
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
if (ar->state_11d == ATH11K_11D_PREPARING)
|
||||
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5814,7 +5798,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
|
|||
|
||||
/* TODO: Do we need to enable ANI? */
|
||||
|
||||
ath11k_reg_update_chan_list(ar);
|
||||
ath11k_reg_update_chan_list(ar, false);
|
||||
|
||||
ar->num_started_vdevs = 0;
|
||||
ar->num_created_vdevs = 0;
|
||||
|
@ -5881,6 +5865,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
|
|||
cancel_work_sync(&ar->ab->update_11d_work);
|
||||
cancel_work_sync(&ar->ab->rfkill_work);
|
||||
|
||||
if (ar->state_11d == ATH11K_11D_PREPARING) {
|
||||
ar->state_11d = ATH11K_11D_IDLE;
|
||||
complete(&ar->completed_11d_scan);
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
|
||||
list_del(&ppdu_stats->list);
|
||||
|
@ -6051,7 +6040,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
|
|||
return false;
|
||||
}
|
||||
|
||||
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
|
||||
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
|
||||
{
|
||||
struct wmi_11d_scan_start_params param;
|
||||
int ret;
|
||||
|
@ -6079,28 +6068,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
|
|||
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
|
||||
|
||||
if (wait)
|
||||
reinit_completion(&ar->finish_11d_scan);
|
||||
|
||||
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m);
|
||||
if (ret) {
|
||||
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
|
||||
vdev_id, ret);
|
||||
} else {
|
||||
ar->vdev_id_11d_scan = vdev_id;
|
||||
if (wait) {
|
||||
ar->pending_11d = true;
|
||||
ret = wait_for_completion_timeout(&ar->finish_11d_scan,
|
||||
5 * HZ);
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
|
||||
"mac 11d scan left time %d\n", ret);
|
||||
|
||||
if (!ret)
|
||||
ar->pending_11d = false;
|
||||
}
|
||||
if (ar->state_11d == ATH11K_11D_PREPARING)
|
||||
ar->state_11d = ATH11K_11D_RUNNING;
|
||||
}
|
||||
|
||||
fin:
|
||||
if (ar->state_11d == ATH11K_11D_PREPARING) {
|
||||
ar->state_11d = ATH11K_11D_IDLE;
|
||||
complete(&ar->completed_11d_scan);
|
||||
}
|
||||
|
||||
mutex_unlock(&ar->ab->vdev_id_11d_lock);
|
||||
}
|
||||
|
||||
|
@ -6123,12 +6106,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
|
|||
vdev_id = ar->vdev_id_11d_scan;
|
||||
|
||||
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ath11k_warn(ar->ab,
|
||||
"failed to stopt 11d scan vdev %d ret: %d\n",
|
||||
vdev_id, ret);
|
||||
else
|
||||
} else {
|
||||
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
|
||||
ar->state_11d = ATH11K_11D_IDLE;
|
||||
complete(&ar->completed_11d_scan);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ar->ab->vdev_id_11d_lock);
|
||||
}
|
||||
|
@ -6324,8 +6310,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
|
|||
goto err_peer_del;
|
||||
}
|
||||
|
||||
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
|
||||
|
||||
if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
|
||||
reinit_completion(&ar->completed_11d_scan);
|
||||
ar->state_11d = ATH11K_11D_PREPARING;
|
||||
}
|
||||
break;
|
||||
case WMI_VDEV_TYPE_MONITOR:
|
||||
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
|
||||
|
@ -7190,7 +7178,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
|
|||
}
|
||||
|
||||
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
|
||||
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
|
||||
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
@ -8671,8 +8659,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
|
|||
ar->monitor_vdev_id = -1;
|
||||
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
|
||||
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
|
||||
init_completion(&ar->finish_11d_scan);
|
||||
init_completion(&ar->finish_11d_ch_list);
|
||||
init_completion(&ar->completed_11d_scan);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
|
|||
#define ATH11K_SCAN_11D_INTERVAL 600000
|
||||
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
|
||||
|
||||
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
|
||||
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
|
||||
void ath11k_mac_11d_scan_stop(struct ath11k *ar);
|
||||
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
|
||||
|
||||
|
|
|
@ -102,7 +102,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
|
|||
ar->regdom_set_by_user = true;
|
||||
}
|
||||
|
||||
int ath11k_reg_update_chan_list(struct ath11k *ar)
|
||||
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
|
||||
{
|
||||
struct ieee80211_supported_band **bands;
|
||||
struct scan_chan_list_params *params;
|
||||
|
@ -111,7 +111,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
|
|||
struct channel_param *ch;
|
||||
enum nl80211_band band;
|
||||
int num_channels = 0;
|
||||
int i, ret;
|
||||
int i, ret, left;
|
||||
|
||||
if (wait && ar->state_11d != ATH11K_11D_IDLE) {
|
||||
left = wait_for_completion_timeout(&ar->completed_11d_scan,
|
||||
ATH11K_SCAN_TIMEOUT_HZ);
|
||||
if (!left) {
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
|
||||
"failed to receive 11d scan complete: timed out\n");
|
||||
ar->state_11d = ATH11K_11D_IDLE;
|
||||
}
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
|
||||
"reg 11d scan wait left time %d\n", left);
|
||||
}
|
||||
|
||||
if (wait &&
|
||||
(ar->scan.state == ATH11K_SCAN_STARTING ||
|
||||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
|
||||
left = wait_for_completion_timeout(&ar->scan.completed,
|
||||
ATH11K_SCAN_TIMEOUT_HZ);
|
||||
if (!left)
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
|
||||
"failed to receive hw scan complete: timed out\n");
|
||||
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
|
||||
"reg hw scan wait left time %d\n", left);
|
||||
}
|
||||
|
||||
bands = hw->wiphy->bands;
|
||||
for (band = 0; band < NUM_NL80211_BANDS; band++) {
|
||||
|
@ -193,11 +218,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
|
|||
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
|
||||
kfree(params);
|
||||
|
||||
if (ar->pending_11d) {
|
||||
complete(&ar->finish_11d_ch_list);
|
||||
ar->pending_11d = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -263,15 +283,8 @@ int ath11k_regd_update(struct ath11k *ar)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (ar->pending_11d)
|
||||
complete(&ar->finish_11d_scan);
|
||||
|
||||
rtnl_lock();
|
||||
wiphy_lock(ar->hw->wiphy);
|
||||
|
||||
if (ar->pending_11d)
|
||||
reinit_completion(&ar->finish_11d_ch_list);
|
||||
|
||||
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
|
||||
wiphy_unlock(ar->hw->wiphy);
|
||||
rtnl_unlock();
|
||||
|
@ -282,7 +295,7 @@ int ath11k_regd_update(struct ath11k *ar)
|
|||
goto err;
|
||||
|
||||
if (ar->state == ATH11K_STATE_ON) {
|
||||
ret = ath11k_reg_update_chan_list(ar);
|
||||
ret = ath11k_reg_update_chan_list(ar, true);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -32,5 +32,5 @@ struct ieee80211_regdomain *
|
|||
ath11k_reg_build_regd(struct ath11k_base *ab,
|
||||
struct cur_regulatory_info *reg_info, bool intersect);
|
||||
int ath11k_regd_update(struct ath11k *ar);
|
||||
int ath11k_reg_update_chan_list(struct ath11k *ar);
|
||||
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
|
||||
#endif
|
||||
|
|
|
@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
|
|||
{
|
||||
/* setup commonly used values */
|
||||
arg->scan_req_id = 1;
|
||||
arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
|
||||
if (ar->state_11d == ATH11K_11D_PREPARING)
|
||||
arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
|
||||
else
|
||||
arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
|
||||
arg->dwell_time_active = 50;
|
||||
arg->dwell_time_active_2g = 0;
|
||||
arg->dwell_time_passive = 150;
|
||||
|
@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
|
|||
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
|
||||
{
|
||||
const struct wmi_11d_new_cc_ev *ev;
|
||||
struct ath11k *ar;
|
||||
struct ath11k_pdev *pdev;
|
||||
const void **tb;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
|
@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
|
|||
|
||||
kfree(tb);
|
||||
|
||||
for (i = 0; i < ab->num_radios; i++) {
|
||||
pdev = &ab->pdevs[i];
|
||||
ar = pdev->ar;
|
||||
ar->state_11d = ATH11K_11D_IDLE;
|
||||
complete(&ar->completed_11d_scan);
|
||||
}
|
||||
|
||||
queue_work(ab->workqueue, &ab->update_11d_work);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -371,7 +371,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
|
|||
struct iwl_dbg_tlv_timer_node *node, *tmp;
|
||||
|
||||
list_for_each_entry_safe(node, tmp, timer_list, list) {
|
||||
del_timer(&node->timer);
|
||||
del_timer_sync(&node->timer);
|
||||
list_del(&node->list);
|
||||
kfree(node);
|
||||
}
|
||||
|
|
|
@ -2202,11 +2202,14 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
|
|||
if (!data->use_chanctx) {
|
||||
confbw = data->bw;
|
||||
} else {
|
||||
struct ieee80211_chanctx_conf *chanctx_conf =
|
||||
rcu_dereference(vif->chanctx_conf);
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
|
||||
rcu_read_lock();
|
||||
chanctx_conf = rcu_dereference(vif->chanctx_conf);
|
||||
|
||||
if (!WARN_ON(!chanctx_conf))
|
||||
confbw = chanctx_conf->def.width;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
WARN(bw > hwsim_get_chanwidth(confbw),
|
||||
|
@ -2475,11 +2478,13 @@ static void hw_scan_work(struct work_struct *work)
|
|||
if (req->ie_len)
|
||||
skb_put_data(probe, req->ie, req->ie_len);
|
||||
|
||||
rcu_read_lock();
|
||||
if (!ieee80211_tx_prepare_skb(hwsim->hw,
|
||||
hwsim->hw_scan_vif,
|
||||
probe,
|
||||
hwsim->tmp_chan->band,
|
||||
NULL)) {
|
||||
rcu_read_unlock();
|
||||
kfree_skb(probe);
|
||||
continue;
|
||||
}
|
||||
|
@ -2487,6 +2492,7 @@ static void hw_scan_work(struct work_struct *work)
|
|||
local_bh_disable();
|
||||
mac80211_hwsim_tx_frame(hwsim->hw, probe,
|
||||
hwsim->tmp_chan);
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1557,7 +1557,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
|
|||
start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
|
||||
if (!s->start) {
|
||||
/* roundup() does not work on 32-bit systems */
|
||||
s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
|
||||
s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
|
||||
s->start = ktime_add(s->start, s->phase);
|
||||
}
|
||||
|
||||
|
|
|
@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
|
|||
ctcm_clear_busy_do(dev);
|
||||
}
|
||||
|
||||
kfree(mpcginfo);
|
||||
|
||||
return;
|
||||
|
||||
}
|
||||
|
@ -1192,10 +1190,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
|
|||
CTCM_FUNTAIL, dev->name);
|
||||
priv->stats.rx_dropped++;
|
||||
/* mpcginfo only used for non-data transfers */
|
||||
kfree(mpcginfo);
|
||||
if (do_debug_data)
|
||||
ctcmpc_dump_skb(pskb, -8);
|
||||
}
|
||||
kfree(mpcginfo);
|
||||
}
|
||||
done:
|
||||
|
||||
|
@ -1977,7 +1975,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
|
|||
}
|
||||
break;
|
||||
}
|
||||
kfree(mpcginfo);
|
||||
|
||||
CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
|
||||
__func__, ch->id, grp->outstanding_xid2,
|
||||
|
@ -2038,7 +2035,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
|
|||
mpc_validate_xid(mpcginfo);
|
||||
break;
|
||||
}
|
||||
kfree(mpcginfo);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
|
|||
struct ctcm_priv *priv = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
ndev = priv->channel[CTCM_READ]->netdev;
|
||||
if (!(priv && priv->channel[CTCM_READ] && ndev)) {
|
||||
if (!(priv && priv->channel[CTCM_READ] &&
|
||||
priv->channel[CTCM_READ]->netdev)) {
|
||||
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
|
||||
return -ENODEV;
|
||||
}
|
||||
ndev = priv->channel[CTCM_READ]->netdev;
|
||||
|
||||
rc = kstrtouint(buf, 0, &bs1);
|
||||
if (rc)
|
||||
|
|
|
@ -1736,10 +1736,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
|
|||
lcs_schedule_recovery(card);
|
||||
break;
|
||||
case LCS_CMD_STOPLAN:
|
||||
pr_warn("Stoplan for %s initiated by LGW\n",
|
||||
card->dev->name);
|
||||
if (card->dev)
|
||||
if (card->dev) {
|
||||
pr_warn("Stoplan for %s initiated by LGW\n",
|
||||
card->dev->name);
|
||||
netif_carrier_off(card->dev);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LCS_DBF_TEXT(5, trace, "noLGWcmd");
|
||||
|
|
|
@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
|
|||
case SCSI_ACCESS_STATE_OPTIMAL:
|
||||
case SCSI_ACCESS_STATE_ACTIVE:
|
||||
case SCSI_ACCESS_STATE_LBA:
|
||||
return BLK_STS_OK;
|
||||
case SCSI_ACCESS_STATE_TRANSITIONING:
|
||||
return BLK_STS_AGAIN;
|
||||
return BLK_STS_OK;
|
||||
default:
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
return BLK_STS_IOERR;
|
||||
|
|
|
@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
|
||||
LPFC_SLI_INTF_IF_TYPE_0) {
|
||||
/* FLOGI needs to be 3 for WQE FCFI */
|
||||
ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
|
||||
ct = SLI4_CT_FCFI;
|
||||
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
|
||||
|
||||
/* Set the fcfi to the fcfi we registered with */
|
||||
|
|
|
@ -10720,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
|
|||
|
||||
/* Words 0 - 2 */
|
||||
bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
|
||||
bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
|
||||
bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
|
||||
bde->addr_low = bpl->addr_low;
|
||||
bde->addr_high = bpl->addr_high;
|
||||
bde->type_size = cpu_to_le32(xmit_len);
|
||||
bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
|
||||
bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
|
||||
|
||||
/* Word 3 */
|
||||
cmdwqe->gen_req.request_payload_len = xmit_len;
|
||||
|
|
|
@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
|
|||
|
||||
spin_lock_irqsave(&cmd->cmd_lock, flags);
|
||||
if (cmd->aborted) {
|
||||
if (cmd->sg_mapped)
|
||||
qlt_unmap_sg(vha, cmd);
|
||||
|
||||
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
|
||||
/*
|
||||
* It's normal to see 2 calls in this path:
|
||||
|
|
|
@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
ctrl->irq = platform_get_irq(pdev, 0);
|
||||
if (!ctrl->irq) {
|
||||
if (ctrl->irq < 0) {
|
||||
dev_err(&pdev->dev, "no slimbus IRQ\n");
|
||||
return -ENODEV;
|
||||
return ctrl->irq;
|
||||
}
|
||||
|
||||
sctrl = &ctrl->ctrl;
|
||||
|
|
|
@ -137,6 +137,7 @@ struct gsm_dlci {
|
|||
int retries;
|
||||
/* Uplink tty if active */
|
||||
struct tty_port port; /* The tty bound to this DLCI if there is one */
|
||||
#define TX_SIZE 4096 /* Must be power of 2. */
|
||||
struct kfifo fifo; /* Queue fifo for the DLCI */
|
||||
int adaption; /* Adaption layer in use */
|
||||
int prev_adaption;
|
||||
|
@ -1658,6 +1659,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
|
|||
if (len == 0)
|
||||
return;
|
||||
}
|
||||
len--;
|
||||
slen++;
|
||||
tty = tty_port_tty_get(port);
|
||||
if (tty) {
|
||||
|
@ -1730,7 +1732,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
|
|||
return NULL;
|
||||
spin_lock_init(&dlci->lock);
|
||||
mutex_init(&dlci->mutex);
|
||||
if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) {
|
||||
if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
|
||||
kfree(dlci);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2351,6 +2353,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
|
|||
|
||||
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
|
||||
{
|
||||
int ret = 0;
|
||||
int need_close = 0;
|
||||
int need_restart = 0;
|
||||
|
||||
|
@ -2418,10 +2421,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
|
|||
* FIXME: We need to separate activation/deactivation from adding
|
||||
* and removing from the mux array
|
||||
*/
|
||||
if (need_restart)
|
||||
gsm_activate_mux(gsm);
|
||||
if (gsm->initiator && need_close)
|
||||
gsm_dlci_begin_open(gsm->dlci[0]);
|
||||
if (gsm->dead) {
|
||||
ret = gsm_activate_mux(gsm);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (gsm->initiator)
|
||||
gsm_dlci_begin_open(gsm->dlci[0]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2971,8 +2977,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
|
|||
* Virtual tty side
|
||||
*/
|
||||
|
||||
#define TX_SIZE 512
|
||||
|
||||
/**
|
||||
* gsm_modem_upd_via_data - send modem bits via convergence layer
|
||||
* @dlci: channel
|
||||
|
@ -3212,7 +3216,7 @@ static unsigned int gsmtty_write_room(struct tty_struct *tty)
|
|||
struct gsm_dlci *dlci = tty->driver_data;
|
||||
if (dlci->state == DLCI_CLOSED)
|
||||
return 0;
|
||||
return TX_SIZE - kfifo_len(&dlci->fifo);
|
||||
return kfifo_avail(&dlci->fifo);
|
||||
}
|
||||
|
||||
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
|
||||
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
|
||||
|
||||
#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
|
||||
#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
|
||||
#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
|
||||
#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
|
||||
|
@ -53,6 +54,12 @@
|
|||
#define MTK_UART_TX_TRIGGER 1
|
||||
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
|
||||
|
||||
#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
|
||||
#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
|
||||
|
||||
#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
|
||||
#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_DMA
|
||||
enum dma_rx_status {
|
||||
DMA_RX_START = 0,
|
||||
|
@ -169,7 +176,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up)
|
|||
MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(up, UART_EFR, UART_EFR_ECB);
|
||||
serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
|
||||
serial_out(up, UART_LCR, lcr);
|
||||
|
||||
if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
|
||||
|
@ -232,7 +239,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
|
|||
int lcr = serial_in(up, UART_LCR);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(up, UART_EFR, UART_EFR_ECB);
|
||||
serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
|
||||
serial_out(up, UART_LCR, lcr);
|
||||
lcr = serial_in(up, UART_LCR);
|
||||
|
||||
|
@ -241,7 +248,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
|
|||
serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
|
||||
serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
|
||||
serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
|
||||
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
|
||||
serial_out(up, UART_LCR, lcr);
|
||||
mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
|
||||
|
@ -255,8 +262,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
|
|||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
|
||||
/*enable hw flow control*/
|
||||
serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
|
||||
(serial_in(up, UART_EFR) &
|
||||
serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
|
||||
(serial_in(up, MTK_UART_EFR) &
|
||||
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
|
||||
|
||||
serial_out(up, UART_LCR, lcr);
|
||||
|
@ -270,12 +277,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
|
|||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
|
||||
/*enable sw flow control */
|
||||
serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
|
||||
(serial_in(up, UART_EFR) &
|
||||
serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
|
||||
(serial_in(up, MTK_UART_EFR) &
|
||||
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
|
||||
|
||||
serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
|
||||
serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
|
||||
serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
|
||||
serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
|
||||
serial_out(up, UART_LCR, lcr);
|
||||
mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
|
||||
mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
|
||||
|
@ -568,6 +575,10 @@ static int mtk8250_probe(struct platform_device *pdev)
|
|||
uart.dma = data->dma;
|
||||
#endif
|
||||
|
||||
/* Set AP UART new register map */
|
||||
writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
|
||||
(MTK_UART_FEATURE_SEL << uart.port.regshift));
|
||||
|
||||
/* Disable Rate Fix function */
|
||||
writel(0x0, uart.port.membase +
|
||||
(MTK_UART_RATE_FIX << uart.port.regshift));
|
||||
|
|
|
@ -471,11 +471,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(uart_clk))
|
||||
return PTR_ERR(uart_clk);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dp->port.mapbase = res->start;
|
||||
dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
|
||||
dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(dp->port.membase))
|
||||
return PTR_ERR(dp->port.membase);
|
||||
dp->port.mapbase = res->start;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
|
|
|
@ -2664,6 +2664,7 @@ static int lpuart_probe(struct platform_device *pdev)
|
|||
struct device_node *np = pdev->dev.of_node;
|
||||
struct lpuart_port *sport;
|
||||
struct resource *res;
|
||||
irq_handler_t handler;
|
||||
int ret;
|
||||
|
||||
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
|
||||
|
@ -2741,17 +2742,11 @@ static int lpuart_probe(struct platform_device *pdev)
|
|||
|
||||
if (lpuart_is_32(sport)) {
|
||||
lpuart_reg.cons = LPUART32_CONSOLE;
|
||||
ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
|
||||
DRIVER_NAME, sport);
|
||||
handler = lpuart32_int;
|
||||
} else {
|
||||
lpuart_reg.cons = LPUART_CONSOLE;
|
||||
ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
|
||||
DRIVER_NAME, sport);
|
||||
handler = lpuart_int;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto failed_irq_request;
|
||||
|
||||
ret = uart_add_one_port(&lpuart_reg, &sport->port);
|
||||
if (ret)
|
||||
goto failed_attach_port;
|
||||
|
@ -2773,13 +2768,18 @@ static int lpuart_probe(struct platform_device *pdev)
|
|||
|
||||
sport->port.rs485_config(&sport->port, &sport->port.rs485);
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
|
||||
DRIVER_NAME, sport);
|
||||
if (ret)
|
||||
goto failed_irq_request;
|
||||
|
||||
return 0;
|
||||
|
||||
failed_irq_request:
|
||||
failed_get_rs485:
|
||||
failed_reset:
|
||||
uart_remove_one_port(&lpuart_reg, &sport->port);
|
||||
failed_attach_port:
|
||||
failed_irq_request:
|
||||
lpuart_disable_clks(sport);
|
||||
failed_clock_enable:
|
||||
failed_out_of_range:
|
||||
|
|
|
@ -774,6 +774,7 @@ static int wdm_release(struct inode *inode, struct file *file)
|
|||
poison_urbs(desc);
|
||||
spin_lock_irq(&desc->iuspin);
|
||||
desc->resp_count = 0;
|
||||
clear_bit(WDM_RESPONDING, &desc->flags);
|
||||
spin_unlock_irq(&desc->iuspin);
|
||||
desc->manage_power(desc->intf, 0);
|
||||
unpoison_urbs(desc);
|
||||
|
|
|
@ -890,13 +890,37 @@ static void uvc_function_unbind(struct usb_configuration *c,
|
|||
{
|
||||
struct usb_composite_dev *cdev = c->cdev;
|
||||
struct uvc_device *uvc = to_uvc(f);
|
||||
long wait_ret = 1;
|
||||
|
||||
uvcg_info(f, "%s()\n", __func__);
|
||||
|
||||
/* If we know we're connected via v4l2, then there should be a cleanup
|
||||
* of the device from userspace either via UVC_EVENT_DISCONNECT or
|
||||
* though the video device removal uevent. Allow some time for the
|
||||
* application to close out before things get deleted.
|
||||
*/
|
||||
if (uvc->func_connected) {
|
||||
uvcg_dbg(f, "waiting for clean disconnect\n");
|
||||
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
|
||||
uvc->func_connected == false, msecs_to_jiffies(500));
|
||||
uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
|
||||
}
|
||||
|
||||
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
|
||||
video_unregister_device(&uvc->vdev);
|
||||
v4l2_device_unregister(&uvc->v4l2_dev);
|
||||
|
||||
if (uvc->func_connected) {
|
||||
/* Wait for the release to occur to ensure there are no longer any
|
||||
* pending operations that may cause panics when resources are cleaned
|
||||
* up.
|
||||
*/
|
||||
uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
|
||||
wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
|
||||
uvc->func_connected == false, msecs_to_jiffies(1000));
|
||||
uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
|
||||
}
|
||||
|
||||
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
|
||||
kfree(uvc->control_buf);
|
||||
|
||||
|
@ -915,6 +939,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
|
|||
|
||||
mutex_init(&uvc->video.mutex);
|
||||
uvc->state = UVC_STATE_DISCONNECTED;
|
||||
init_waitqueue_head(&uvc->func_connected_queue);
|
||||
opts = fi_to_f_uvc_opts(fi);
|
||||
|
||||
mutex_lock(&opts->lock);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/usb/composite.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <media/v4l2-device.h>
|
||||
#include <media/v4l2-dev.h>
|
||||
|
@ -129,6 +130,7 @@ struct uvc_device {
|
|||
struct usb_function func;
|
||||
struct uvc_video video;
|
||||
bool func_connected;
|
||||
wait_queue_head_t func_connected_queue;
|
||||
|
||||
/* Descriptors */
|
||||
struct {
|
||||
|
|
|
@ -253,10 +253,11 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
|
|||
|
||||
static void uvc_v4l2_disable(struct uvc_device *uvc)
|
||||
{
|
||||
uvc->func_connected = false;
|
||||
uvc_function_disconnect(uvc);
|
||||
uvcg_video_enable(&uvc->video, 0);
|
||||
uvcg_free_buffers(&uvc->video.queue);
|
||||
uvc->func_connected = false;
|
||||
wake_up_interruptible(&uvc->func_connected_queue);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -145,6 +145,7 @@ enum dev_state {
|
|||
STATE_DEV_INVALID = 0,
|
||||
STATE_DEV_OPENED,
|
||||
STATE_DEV_INITIALIZED,
|
||||
STATE_DEV_REGISTERING,
|
||||
STATE_DEV_RUNNING,
|
||||
STATE_DEV_CLOSED,
|
||||
STATE_DEV_FAILED
|
||||
|
@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
|
|||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
dev->state = STATE_DEV_REGISTERING;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
ret = usb_gadget_probe_driver(&dev->driver);
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
#define HS_BW_BOUNDARY 6144
|
||||
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
|
||||
#define FS_PAYLOAD_MAX 188
|
||||
/*
|
||||
* max number of microframes for split transfer,
|
||||
* for fs isoc in : 1 ss + 1 idle + 7 cs
|
||||
*/
|
||||
#define TT_MICROFRAMES_MAX 9
|
||||
|
||||
#define DBG_BUF_EN 64
|
||||
|
||||
|
@ -242,28 +237,17 @@ static void drop_tt(struct usb_device *udev)
|
|||
|
||||
static struct mu3h_sch_ep_info *
|
||||
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
|
||||
struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
|
||||
struct usb_host_endpoint *ep)
|
||||
{
|
||||
struct mu3h_sch_ep_info *sch_ep;
|
||||
struct mu3h_sch_bw_info *bw_info;
|
||||
struct mu3h_sch_tt *tt = NULL;
|
||||
u32 len_bw_budget_table;
|
||||
|
||||
bw_info = get_bw_info(mtk, udev, ep);
|
||||
if (!bw_info)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (is_fs_or_ls(udev->speed))
|
||||
len_bw_budget_table = TT_MICROFRAMES_MAX;
|
||||
else if ((udev->speed >= USB_SPEED_SUPER)
|
||||
&& usb_endpoint_xfer_isoc(&ep->desc))
|
||||
len_bw_budget_table = get_esit(ep_ctx);
|
||||
else
|
||||
len_bw_budget_table = 1;
|
||||
|
||||
sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table,
|
||||
len_bw_budget_table),
|
||||
GFP_KERNEL);
|
||||
sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
|
||||
if (!sch_ep)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -295,8 +279,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
|
|||
u32 mult;
|
||||
u32 esit_pkts;
|
||||
u32 max_esit_payload;
|
||||
u32 *bwb_table = sch_ep->bw_budget_table;
|
||||
int i;
|
||||
|
||||
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
|
||||
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
|
||||
|
@ -332,7 +314,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
|
|||
*/
|
||||
sch_ep->pkts = max_burst + 1;
|
||||
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
|
||||
bwb_table[0] = sch_ep->bw_cost_per_microframe;
|
||||
} else if (sch_ep->speed >= USB_SPEED_SUPER) {
|
||||
/* usb3_r1 spec section4.4.7 & 4.4.8 */
|
||||
sch_ep->cs_count = 0;
|
||||
|
@ -349,7 +330,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
|
|||
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
|
||||
sch_ep->pkts = esit_pkts;
|
||||
sch_ep->num_budget_microframes = 1;
|
||||
bwb_table[0] = maxpkt * sch_ep->pkts;
|
||||
}
|
||||
|
||||
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
|
||||
|
@ -366,15 +346,8 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
|
|||
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
|
||||
|
||||
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
|
||||
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
|
||||
|
||||
for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
|
||||
bwb_table[i] = sch_ep->bw_cost_per_microframe;
|
||||
|
||||
/* last one <= bw_cost_per_microframe */
|
||||
bwb_table[i] = maxpkt * esit_pkts
|
||||
- i * sch_ep->bw_cost_per_microframe;
|
||||
}
|
||||
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
|
||||
} else if (is_fs_or_ls(sch_ep->speed)) {
|
||||
sch_ep->pkts = 1; /* at most one packet for each microframe */
|
||||
|
||||
|
@ -384,28 +357,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
|
|||
*/
|
||||
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
|
||||
sch_ep->num_budget_microframes = sch_ep->cs_count;
|
||||
sch_ep->bw_cost_per_microframe =
|
||||
(maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
|
||||
|
||||
/* init budget table */
|
||||
if (ep_type == ISOC_OUT_EP) {
|
||||
for (i = 0; i < sch_ep->num_budget_microframes; i++)
|
||||
bwb_table[i] = sch_ep->bw_cost_per_microframe;
|
||||
} else if (ep_type == INT_OUT_EP) {
|
||||
/* only first one consumes bandwidth, others as zero */
|
||||
bwb_table[0] = sch_ep->bw_cost_per_microframe;
|
||||
} else { /* INT_IN_EP or ISOC_IN_EP */
|
||||
bwb_table[0] = 0; /* start split */
|
||||
bwb_table[1] = 0; /* idle */
|
||||
/*
|
||||
* due to cs_count will be updated according to cs
|
||||
* position, assign all remainder budget array
|
||||
* elements as @bw_cost_per_microframe, but only first
|
||||
* @num_budget_microframes elements will be used later
|
||||
*/
|
||||
for (i = 2; i < TT_MICROFRAMES_MAX; i++)
|
||||
bwb_table[i] = sch_ep->bw_cost_per_microframe;
|
||||
}
|
||||
sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -422,7 +374,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
|
|||
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
|
||||
k = XHCI_MTK_BW_INDEX(base + j);
|
||||
bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j];
|
||||
bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
|
||||
if (bw > max_bw)
|
||||
max_bw = bw;
|
||||
}
|
||||
|
@ -433,18 +385,16 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
|
|||
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
|
||||
struct mu3h_sch_ep_info *sch_ep, bool used)
|
||||
{
|
||||
int bw_updated;
|
||||
u32 base;
|
||||
int i, j, k;
|
||||
int i, j;
|
||||
|
||||
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
|
||||
|
||||
for (i = 0; i < sch_ep->num_esit; i++) {
|
||||
base = sch_ep->offset + i * sch_ep->esit;
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
|
||||
k = XHCI_MTK_BW_INDEX(base + j);
|
||||
if (used)
|
||||
sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
|
||||
else
|
||||
sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
|
||||
}
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++)
|
||||
sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -464,7 +414,7 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
|
|||
*/
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
|
||||
k = XHCI_MTK_BW_INDEX(base + j);
|
||||
tmp = tt->fs_bus_bw[k] + sch_ep->bw_budget_table[j];
|
||||
tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
|
||||
if (tmp > FS_PAYLOAD_MAX)
|
||||
return -ESCH_BW_OVERFLOW;
|
||||
}
|
||||
|
@ -538,19 +488,17 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
|
|||
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
|
||||
{
|
||||
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
|
||||
int bw_updated;
|
||||
u32 base;
|
||||
int i, j, k;
|
||||
int i, j;
|
||||
|
||||
bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
|
||||
|
||||
for (i = 0; i < sch_ep->num_esit; i++) {
|
||||
base = sch_ep->offset + i * sch_ep->esit;
|
||||
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
|
||||
k = XHCI_MTK_BW_INDEX(base + j);
|
||||
if (used)
|
||||
tt->fs_bus_bw[k] += sch_ep->bw_budget_table[j];
|
||||
else
|
||||
tt->fs_bus_bw[k] -= sch_ep->bw_budget_table[j];
|
||||
}
|
||||
for (j = 0; j < sch_ep->num_budget_microframes; j++)
|
||||
tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
|
||||
}
|
||||
|
||||
if (used)
|
||||
|
@ -710,7 +658,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
|
|||
|
||||
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
|
||||
|
||||
sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
|
||||
sch_ep = create_sch_ep(mtk, udev, ep);
|
||||
if (IS_ERR_OR_NULL(sch_ep))
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -83,7 +83,6 @@ struct mu3h_sch_bw_info {
|
|||
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
|
||||
* according to @pkts and @repeat. normal mode is used by
|
||||
* default
|
||||
* @bw_budget_table: table to record bandwidth budget per microframe
|
||||
*/
|
||||
struct mu3h_sch_ep_info {
|
||||
u32 esit;
|
||||
|
@ -109,7 +108,6 @@ struct mu3h_sch_ep_info {
|
|||
u32 pkts;
|
||||
u32 cs_count;
|
||||
u32 burst_mode;
|
||||
u32 bw_budget_table[];
|
||||
};
|
||||
|
||||
#define MU3C_U3_PORT_MAX 4
|
||||
|
|
|
@ -2123,10 +2123,14 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = RSVD(3) },
|
||||
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
|
||||
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
|
||||
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
|
||||
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(4) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(6) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
|
|
|
@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
|
|||
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
|
||||
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
|
||||
|
|
|
@ -135,6 +135,7 @@
|
|||
#define HP_TD620_PRODUCT_ID 0x0956
|
||||
#define HP_LD960_PRODUCT_ID 0x0b39
|
||||
#define HP_LD381_PRODUCT_ID 0x0f7f
|
||||
#define HP_LM930_PRODUCT_ID 0x0f9b
|
||||
#define HP_LCM220_PRODUCT_ID 0x3139
|
||||
#define HP_LCM960_PRODUCT_ID 0x3239
|
||||
#define HP_LD220_PRODUCT_ID 0x3524
|
||||
|
|
|
@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
|
|||
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
|
||||
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
|
||||
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
|
||||
{DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
|
||||
{DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
|
||||
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
|
|
|
@ -877,7 +877,7 @@ static int tcpci_remove(struct i2c_client *client)
|
|||
/* Disable chip interrupts before unregistering port */
|
||||
err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
|
||||
|
||||
tcpci_unregister_port(chip->tcpci);
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
|
||||
#include "tcpci.h"
|
||||
|
||||
#define MT6360_REG_PHYCTRL1 0x80
|
||||
#define MT6360_REG_PHYCTRL3 0x82
|
||||
#define MT6360_REG_PHYCTRL7 0x86
|
||||
#define MT6360_REG_VCONNCTRL1 0x8C
|
||||
#define MT6360_REG_MODECTRL2 0x8F
|
||||
#define MT6360_REG_SWRESET 0xA0
|
||||
|
@ -22,6 +25,8 @@
|
|||
#define MT6360_REG_DRPCTRL1 0xA2
|
||||
#define MT6360_REG_DRPCTRL2 0xA3
|
||||
#define MT6360_REG_I2CTORST 0xBF
|
||||
#define MT6360_REG_PHYCTRL11 0xCA
|
||||
#define MT6360_REG_RXCTRL1 0xCE
|
||||
#define MT6360_REG_RXCTRL2 0xCF
|
||||
#define MT6360_REG_CTDCTRL2 0xEC
|
||||
|
||||
|
@ -106,6 +111,27 @@ static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* BMC PHY */
|
||||
ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Set shipping mode off, AUTOIDLE on */
|
||||
return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
|
||||
}
|
||||
|
|
|
@ -1434,10 +1434,7 @@ fb_release(struct inode *inode, struct file *file)
|
|||
__acquires(&info->lock)
|
||||
__releases(&info->lock)
|
||||
{
|
||||
struct fb_info * const info = file_fb_info(file);
|
||||
|
||||
if (!info)
|
||||
return -ENODEV;
|
||||
struct fb_info * const info = file->private_data;
|
||||
|
||||
lock_fb_info(info);
|
||||
if (info->fbops->fb_release)
|
||||
|
|
|
@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
|
|||
{
|
||||
if (!info)
|
||||
return;
|
||||
|
||||
if (WARN_ON(refcount_read(&info->count)))
|
||||
return;
|
||||
|
||||
kfree(info->apertures);
|
||||
kfree(info);
|
||||
}
|
||||
|
|
|
@ -243,6 +243,10 @@ error:
|
|||
static inline void efifb_show_boot_graphics(struct fb_info *info) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end
|
||||
* of unregister_framebuffer() or fb_release(). Do any cleanup here.
|
||||
*/
|
||||
static void efifb_destroy(struct fb_info *info)
|
||||
{
|
||||
if (efifb_pci_dev)
|
||||
|
@ -254,10 +258,13 @@ static void efifb_destroy(struct fb_info *info)
|
|||
else
|
||||
memunmap(info->screen_base);
|
||||
}
|
||||
|
||||
if (request_mem_succeeded)
|
||||
release_mem_region(info->apertures->ranges[0].base,
|
||||
info->apertures->ranges[0].size);
|
||||
fb_dealloc_cmap(&info->cmap);
|
||||
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
static const struct fb_ops efifb_ops = {
|
||||
|
@ -620,9 +627,9 @@ static int efifb_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct fb_info *info = platform_get_drvdata(pdev);
|
||||
|
||||
/* efifb_destroy takes care of info cleanup */
|
||||
unregister_framebuffer(info);
|
||||
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
|
||||
framebuffer_release(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue