Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Minor conflict in drivers/s390/net/qeth_l2_main.c, kept the lock
from commit c8183f5489 ("s390/qeth: fix potential deadlock on
workqueue flush"), removed the code which was removed by commit
9897d583b0 ("s390/qeth: consolidate some duplicated HW cmd code").

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
Jakub Kicinski 2019-11-22 16:27:24 -08:00
commit a9f852e92e
93 changed files with 715 additions and 303 deletions

View File

@ -643,7 +643,7 @@ F: drivers/net/ethernet/alacritech/*
FORCEDETH GIGABIT ETHERNET DRIVER FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com> M: Rain River <rain.1986.08.12@gmail.com>
M: Zhu Yanjun <yanjun.zhu@oracle.com> M: Zhu Yanjun <zyjzyj2000@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/nvidia/* F: drivers/net/ethernet/nvidia/*
@ -8311,11 +8311,14 @@ F: drivers/hid/intel-ish-hid/
INTEL IOMMU (VT-d) INTEL IOMMU (VT-d)
M: David Woodhouse <dwmw2@infradead.org> M: David Woodhouse <dwmw2@infradead.org>
M: Lu Baolu <baolu.lu@linux.intel.com>
L: iommu@lists.linux-foundation.org L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/iommu-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Supported S: Supported
F: drivers/iommu/intel-iommu.c F: drivers/iommu/dmar.c
F: drivers/iommu/intel*.[ch]
F: include/linux/intel-iommu.h F: include/linux/intel-iommu.h
F: include/linux/intel-svm.h
INTEL IOP-ADMA DMA DRIVER INTEL IOP-ADMA DMA DRIVER
R: Dan Williams <dan.j.williams@intel.com> R: Dan Williams <dan.j.williams@intel.com>
@ -17227,6 +17230,7 @@ F: virt/lib/
VIRTIO AND VHOST VSOCK DRIVER VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com>
M: Stefano Garzarella <sgarzare@redhat.com>
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION = -rc8
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -58,23 +58,6 @@ alternative_else_nop_endif
.endm .endm
#endif #endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1, tmp2
uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
.endm
/* /*
* Remove the address tag from a virtual address, if present. * Remove the address tag from a virtual address, if present.
*/ */

View File

@ -378,20 +378,34 @@ do { \
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \ #define raw_copy_from_user(to, from, n) \
({ \ ({ \
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ unsigned long __acfu_ret; \
uaccess_enable_not_uao(); \
__acfu_ret = __arch_copy_from_user((to), \
__uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__acfu_ret; \
}) })
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
#define raw_copy_to_user(to, from, n) \ #define raw_copy_to_user(to, from, n) \
({ \ ({ \
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ unsigned long __actu_ret; \
uaccess_enable_not_uao(); \
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
(from), (n)); \
uaccess_disable_not_uao(); \
__actu_ret; \
}) })
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \ #define raw_copy_in_user(to, from, n) \
({ \ ({ \
__arch_copy_in_user(__uaccess_mask_ptr(to), \ unsigned long __aciu_ret; \
__uaccess_mask_ptr(from), (n)); \ uaccess_enable_not_uao(); \
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
__uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__aciu_ret; \
}) })
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{ {
if (access_ok(to, n)) if (access_ok(to, n)) {
uaccess_enable_not_uao();
n = __arch_clear_user(__uaccess_mask_ptr(to), n); n = __arch_clear_user(__uaccess_mask_ptr(to), n);
uaccess_disable_not_uao();
}
return n; return n;
} }
#define clear_user __clear_user #define clear_user __clear_user

View File

@ -20,7 +20,6 @@
* Alignment fixed up by hardware. * Alignment fixed up by hardware.
*/ */
ENTRY(__arch_clear_user) ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return mov x2, x1 // save the size for fixup return
subs x1, x1, #8 subs x1, x1, #8
b.mi 2f b.mi 2f
@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
uaccess_disable_not_uao x2, x3
ret ret
ENDPROC(__arch_clear_user) ENDPROC(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user)

View File

@ -54,10 +54,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_from_user) ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__arch_copy_from_user) ENDPROC(__arch_copy_from_user)

View File

@ -56,10 +56,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_in_user) ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_in_user) ENDPROC(__arch_copy_in_user)

View File

@ -53,10 +53,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_to_user) ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_to_user) ENDPROC(__arch_copy_to_user)

View File

@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
unsigned long rc = __arch_copy_from_user(to, from, n); unsigned long rc;
uaccess_enable_not_uao();
rc = __arch_copy_from_user(to, from, n);
uaccess_disable_not_uao();
/* See above */ /* See above */
__clean_dcache_area_pop(to, n - rc); __clean_dcache_area_pop(to, n - rc);

View File

@ -993,6 +993,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (sock->ops->shutdown == sock_no_shutdown) { if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL; *err = -EINVAL;
sockfd_put(sock);
return NULL; return NULL;
} }

View File

@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
case 0: case 0:
val = BD70528_DEBOUNCE_DISABLE; val = BD70528_DEBOUNCE_DISABLE;
break; break;
case 1 ... 15: case 1 ... 15000:
val = BD70528_DEBOUNCE_15MS; val = BD70528_DEBOUNCE_15MS;
break; break;
case 16 ... 30: case 15001 ... 30000:
val = BD70528_DEBOUNCE_30MS; val = BD70528_DEBOUNCE_30MS;
break; break;
case 31 ... 50: case 30001 ... 50000:
val = BD70528_DEBOUNCE_50MS; val = BD70528_DEBOUNCE_50MS;
break; break;
default: default:

View File

@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0: case 0:
val = MAX77620_CNFG_GPIO_DBNC_None; val = MAX77620_CNFG_GPIO_DBNC_None;
break; break;
case 1000 ... 8000: case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms; val = MAX77620_CNFG_GPIO_DBNC_8ms;
break; break;
case 9000 ... 16000: case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms; val = MAX77620_CNFG_GPIO_DBNC_16ms;
break; break;
case 17000 ... 32000: case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms; val = MAX77620_CNFG_GPIO_DBNC_32ms;
break; break;
default: default:

View File

@ -1304,11 +1304,28 @@ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
{ {
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
* a non existing micro-USB-B connector which puts the HDMI
* DDC pins in GPIO mode, breaking HDMI support.
*/
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
} }
}, },
{
/*
* The Terra Pad 1061 has a micro-USB-B id-pin handler, which
* instead of controlling the actual micro-USB-B turns the 5V
* boost for its USB-A connector off. The actual micro-USB-B
* connector is wired for charging only.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
}
},
{} /* Terminating entry */ {} /* Terminating entry */
}; };

View File

@ -511,7 +511,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set. * Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/ */
if (adev->asic_type >= CHIP_CARRIZO && if (adev->asic_type >= CHIP_CARRIZO &&
adev->asic_type <= CHIP_RAVEN && adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) && (adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) && amdgpu_bo_support_uswc(bo_flags) &&

View File

@ -1013,10 +1013,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */ /* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */ /* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},

View File

@ -649,15 +649,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM; return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs); alloc_size = info->read_mmr_reg.count * sizeof(*regs);
for (i = 0; i < info->read_mmr_reg.count; i++) amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num, if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i, info->read_mmr_reg.dword_offset + i,
&regs[i])) { &regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n", DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i); info->read_mmr_reg.dword_offset + i);
kfree(regs); kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT; return -EFAULT;
} }
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size)); n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs); kfree(regs);
return n ? -EFAULT : 0; return n ? -EFAULT : 0;

View File

@ -1038,8 +1038,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20: case CHIP_VEGA20:
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) /* Disable GFXOFF on original raven. There are combinations
&&((adev->gfx.rlc_fw_version != 106 && * of sbios and platforms that are not stable.
*/
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
&&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) || adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) || (adev->gfx.rlc_feature_version < 1) ||

View File

@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/ */
if (adev->flags & AMD_IS_APU && if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO && adev->asic_type >= CHIP_CARRIZO &&
adev->asic_type <= CHIP_RAVEN) adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true; init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK) if (amdgpu_dc_feature_mask & DC_FBC_MASK)

View File

@ -3478,18 +3478,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{ {
struct amdgpu_device *adev = hwmgr->adev;
int i; int i;
u32 tmp = 0; u32 tmp = 0;
if (!query) if (!query)
return -EINVAL; return -EINVAL;
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); /*
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); * PPSMC_MSG_GetCurrPkgPwr is not supported on:
*query = tmp; * - Hawaii
* - Bonaire
* - Fiji
* - Tonga
*/
if ((adev->asic_type != CHIP_HAWAII) &&
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*query = tmp;
if (tmp != 0) if (tmp != 0)
return 0; return 0;
}
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,

View File

@ -759,6 +759,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK: case SMU_UCLK:
case SMU_DCEFCLK: case SMU_DCEFCLK:
case SMU_FCLK: case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
}
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret) if (ret)
return size; return size;

View File

@ -201,6 +201,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false; crtc_state->update_wm_post = false;
crtc_state->fb_changed = false; crtc_state->fb_changed = false;
crtc_state->fifo_changed = false; crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false; crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0; crtc_state->fb_bits = 0;
crtc_state->update_planes = 0; crtc_state->update_planes = 0;

View File

@ -990,6 +990,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
dev_priv->display.color_commit(crtc_state); dev_priv->display.color_commit(crtc_state);
} }
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
return !old_crtc_state->base.gamma_lut &&
!old_crtc_state->base.degamma_lut;
}
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
/*
* CGM_PIPE_MODE is itself single buffered. We'd have to
* somehow split it out from chv_load_luts() if we wanted
* the ability to preload the CGM LUTs/CSC without tearing.
*/
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
return false;
return !old_crtc_state->base.gamma_lut;
}
static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
/*
* The hardware degamma is active whenever the pipe
* CSC is active. Thus even if the old state has no
* software degamma we need to avoid clobbering the
* linear hardware degamma mid scanout.
*/
return !old_crtc_state->csc_enable &&
!old_crtc_state->base.gamma_lut;
}
int intel_color_check(struct intel_crtc_state *crtc_state) int intel_color_check(struct intel_crtc_state *crtc_state)
{ {
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
@ -1133,6 +1182,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1185,6 +1236,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1224,6 +1277,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1281,6 +1336,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1319,6 +1376,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1368,6 +1427,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state); crtc_state->csc_mode = icl_csc_mode(crtc_state);
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }

View File

@ -2504,6 +2504,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all. * the highest stride limits of them all.
*/ */
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
if (!crtc)
return 0;
plane = to_intel_plane(crtc->base.primary); plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier, return plane->max_stride(plane, pixel_format, modifier,
@ -13740,6 +13743,11 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* vblanks work again, re-enable pipe CRC. */ /* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc); intel_crtc_enable_pipe_crc(crtc);
} else { } else {
if (new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
intel_pre_plane_update(old_crtc_state, new_crtc_state); intel_pre_plane_update(old_crtc_state, new_crtc_state);
if (new_crtc_state->update_pipe) if (new_crtc_state->update_pipe)
@ -14034,6 +14042,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->base.active && if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) && !needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed || (new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe)) new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state); intel_color_load_luts(new_crtc_state);

View File

@ -761,6 +761,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */ bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */ bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
/* Pipe source size (ie. panel fitter input size) /* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space, * All planes will be positioned inside this space,

View File

@ -235,6 +235,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = ggtt->gmadr.start; info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end; info->apertures->ranges[0].size = ggtt->mappable_end;
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
(unsigned long)(ggtt->gmadr.start + vma->node.start);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma); vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@ -244,10 +249,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr; info->screen_base = vaddr;
info->screen_size = vma->node.size; info->screen_size = vma->node.size;
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start = (unsigned long)info->screen_base;
info->fix.smem_len = info->screen_size;
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages. /* If the object is shmemfs backed, it will have given us zeroed pages.

View File

@ -671,8 +671,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
obj->mm.dirty = false; obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty) if (obj->mm.dirty && trylock_page(page)) {
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
* the page in order to dirty it -- holding
* the page reference is not sufficient to
* prevent the inode from being truncated.
* Play safe and take the lock.
*
* However...!
*
* The mmu-notifier can be invalidated for a
* migrate_page, that is alreadying holding the lock
* on the page. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing
* some page dirtying.
*/
set_page_dirty(page); set_page_dirty(page);
unlock_page(page);
}
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); put_page(page);

View File

@ -103,6 +103,8 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_CAST(obj); return ERR_CAST(obj);
} }
i915_gem_object_set_readonly(obj);
node->obj = obj; node->obj = obj;
return node; return node;
} }

View File

@ -843,8 +843,8 @@ create_event_attributes(struct i915_pmu *pmu)
const char *name; const char *name;
const char *unit; const char *unit;
} events[] = { } events[] = {
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
}; };

View File

@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION; return rq->sched.attr.priority | __NO_PREEMPTION;
} }
static void kick_submission(struct intel_engine_cs *engine, int prio) static inline bool need_preempt(int prio, int active)
{ {
const struct i915_request *inflight = *engine->execlists.active; /*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*/
return prio >= max(I915_PRIORITY_NORMAL, active);
}
static void kick_submission(struct intel_engine_cs *engine,
const struct i915_request *rq,
int prio)
{
const struct i915_request *inflight;
/*
* We only need to kick the tasklet once for the high priority
* new context we add into the queue.
*/
if (prio <= engine->execlists.queue_priority_hint)
return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
goto unlock;
/* /*
* If we are already the currently executing context, don't * If we are already the currently executing context, don't
@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue * tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context. * sufficiently to oust the running context.
*/ */
if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) if (inflight->hw_context == rq->hw_context)
return; goto unlock;
tasklet_hi_schedule(&engine->execlists.tasklet); engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
unlock:
rcu_read_unlock();
} }
static void __i915_schedule(struct i915_sched_node *node, static void __i915_schedule(struct i915_sched_node *node,
@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist); list_move_tail(&node->link, cache.priolist);
} }
if (prio <= engine->execlists.queue_priority_hint)
continue;
engine->execlists.queue_priority_hint = prio;
/* Defer (tasklet) submission until after all of our updates. */ /* Defer (tasklet) submission until after all of our updates. */
kick_submission(engine, prio); kick_submission(engine, node_to_request(node), prio);
} }
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);

View File

@ -39,6 +39,7 @@ struct i2c_acpi_lookup {
int index; int index;
u32 speed; u32 speed;
u32 min_speed; u32 min_speed;
u32 force_speed;
}; };
/** /**
@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches,
return acpi_match_device(matches, &client->dev); return acpi_match_device(matches, &client->dev);
} }
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
/*
* These Silead touchscreen controllers only work at 400KHz, for
* some reason they do not work at 100KHz. On some devices the ACPI
* tables list another device at their bus as only being capable
* of 100KHz, testing has shown that these other devices work fine
* at 400KHz (as can be expected of any recent i2c hw) so we force
* the speed of the bus to 400 KHz if a Silead device is present.
*/
{ "MSSL1680", 0 },
{}
};
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value) void *data, void **return_value)
{ {
@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
if (lookup->speed <= lookup->min_speed) if (lookup->speed <= lookup->min_speed)
lookup->min_speed = lookup->speed; lookup->min_speed = lookup->speed;
if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
lookup->force_speed = 400000;
return AE_OK; return AE_OK;
} }
@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
return 0; return 0;
} }
return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; if (lookup.force_speed) {
if (lookup.force_speed != lookup.min_speed)
dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n",
lookup.min_speed, lookup.force_speed);
return lookup.force_speed;
} else if (lookup.min_speed != UINT_MAX) {
return lookup.min_speed;
} else {
return 0;
}
} }
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);

View File

@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
} }
client = of_i2c_register_device(adap, rd->dn); client = of_i2c_register_device(adap, rd->dn);
put_device(&adap->dev);
if (IS_ERR(client)) { if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n", dev_err(&adap->dev, "failed to create client for '%pOF'\n",
rd->dn); rd->dn);
put_device(&adap->dev);
of_node_clear_flag(rd->dn, OF_POPULATED); of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client)); return notifier_from_errno(PTR_ERR(client));
} }
put_device(&adap->dev);
break; break;
case OF_RECONFIG_CHANGE_REMOVE: case OF_RECONFIG_CHANGE_REMOVE:
/* already depopulated? */ /* already depopulated? */

View File

@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->is_peripheral = false; mcan_class->is_peripheral = false;
platform_set_drvdata(pdev, mcan_class->dev); platform_set_drvdata(pdev, mcan_class->net);
m_can_init_ram(mcan_class); m_can_init_ram(mcan_class);
@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev)
if (err) if (err)
clk_disable_unprepare(mcan_class->hclk); clk_disable_unprepare(mcan_class->hclk);
m_can_class_resume(dev);
return err; return err;
} }

View File

@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
cancel_work_sync(&fep->tx_timeout_work); cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev); fec_ptp_stop(pdev);
@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep); fec_enet_mii_remove(fep);
if (fep->reg_phy) if (fep->reg_phy)
regulator_disable(fep->reg_phy); regulator_disable(fep->reg_phy);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
if (of_phy_is_fixed_link(np)) if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np); of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node); of_node_put(fep->phy_node);
free_netdev(ndev); free_netdev(ndev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0; return 0;
} }

View File

@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
u64 iov_offset, u64 iov_len) u64 iov_offset, u64 iov_len)
{ {
u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
u64 first_page = iov_offset / PAGE_SIZE;
dma_addr_t dma; dma_addr_t dma;
u64 addr; u64 page;
for (addr = iov_offset; addr < iov_offset + iov_len; for (page = first_page; page <= last_page; page++) {
addr += PAGE_SIZE) { dma = page_buses[page];
dma = page_buses[addr / PAGE_SIZE];
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
} }
} }

View File

@ -165,7 +165,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1 #define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2 #define HCLGE_IMP_RESET_BIT 2
#define HCLGE_RESET_INT_M GENMASK(2, 0) #define HCLGE_RESET_INT_M GENMASK(7, 5)
#define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0 #define HCLGE_FUN_RST_ING_B 0

View File

@ -2878,10 +2878,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) && if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) { adapter->reset_reason == VNIC_RESET_MOBILITY) {
struct irq_desc *desc = irq_to_desc(scrq->irq); u64 val = (0xff000000) | scrq->hw_irq;
struct irq_chip *chip = irq_desc_get_chip(desc);
chip->irq_eoi(&desc->irq_data); rc = plpar_hcall_norets(H_EOI, val);
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
} }
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,

View File

@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break; break;
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i); err = mlx4_en_get_flow(dev, cmd, i);
if (!err) if (!err)
@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof; struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp; struct mlx4_en_priv *tmp;
int total_tx_count;
int port_up = 0; int port_up = 0;
int xdp_count; int xdp_count;
int err = 0; int err = 0;
@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count > total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL; err = -EINVAL;
en_err(priv, en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
channel->tx_count * priv->prof->num_up + xdp_count, total_tx_count, MAX_TX_RINGS);
MAX_TX_RINGS);
goto out; goto out;
} }

View File

@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof; struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp; struct mlx4_en_priv *tmp;
int total_count;
int port_up = 0; int port_up = 0;
int err = 0; int err = 0;
@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
MLX4_EN_NUM_UP_HIGH; MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up; new_prof.num_up;
total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
if (total_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
total_count, MAX_TX_RINGS);
goto out;
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err) if (err)
goto out; goto out;

View File

@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap, u32 eth_proto_cap,
u8 connector_type) u8 connector_type, bool ext)
{ {
if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER, [MLX5E_PORT_OTHER] = PORT_OTHER,
}; };
static u8 get_connector_port(u32 eth_proto, u8 connector_type) static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{ {
if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type]; return ptys2connector_type[connector_type];
if (eth_proto & if (eth_proto &
@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper, link_ksettings->base.port = get_connector_port(eth_proto_oper,
connector_type); connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type); connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings); get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE) if (an_status == MLX5_AN_COMPLETE)

View File

@ -4250,9 +4250,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) { switch (proto) {
case IPPROTO_GRE: case IPPROTO_GRE:
return features;
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
return features; if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
return features;
break;
case IPPROTO_UDP: case IPPROTO_UDP:
udph = udp_hdr(skb); udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest); port = be16_to_cpu(udph->dest);

View File

@ -3291,7 +3291,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (netdev_port_same_parent_id(priv->netdev, out_dev)) { if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper; struct net_device *uplink_upper;
@ -3333,19 +3346,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep; attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev; attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++; attr->out_count++;
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (parse_attr->filter_dev != priv->netdev) { } else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure /* All mlx5 devices are called to configure
* high level device filters. Therefore, the * high level device filters. Therefore, the
@ -4035,9 +4035,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma) struct tc_cls_matchall_offload *ma)
{ {
struct netlink_ext_ack *extack = ma->common.extack; struct netlink_ext_ack *extack = ma->common.extack;
int prio = TC_H_MAJ(ma->common.prio) >> 16;
if (prio != 1) { if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL; return -EINVAL;
} }

View File

@ -2245,7 +2245,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock: unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return 0; return err;
} }
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,

View File

@ -587,7 +587,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash); rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator); ida_destroy(&fg->fte_allocator);
if (ft->autogroup.active) if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--; ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash, err = rhltable_remove(&ft->fgs_hash,
&fg->hash, &fg->hash,
@ -1134,6 +1134,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true; ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups; ft->autogroup.required_groups = max_num_groups;
/* We save place for flow groups in addition to max types */
ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft; return ft;
} }
@ -1336,8 +1338,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups) if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */ group_size = ft->autogroup.group_size;
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
/* ft->max_fte == ft->autogroup.max_types */ /* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0) if (group_size == 0)
@ -1364,7 +1365,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg)) if (IS_ERR(fg))
goto out; goto out;
ft->autogroup.num_groups++; if (group_size == ft->autogroup.group_size)
ft->autogroup.num_groups++;
out: out:
return fg; return fg;

View File

@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct { struct {
bool active; bool active;
unsigned int required_groups; unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups; unsigned int num_groups;
} autogroup; } autogroup;
/* Protect fwd_rules */ /* Protect fwd_rules */

View File

@ -1564,6 +1564,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */

View File

@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
} }
} }
static u16 dr_get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn, struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn) struct mlx5dr_domain_rx_tx *nic_dmn)
@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow) if (!ctrl->may_grow)
return false; return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
if (ctrl->num_of_collisions >= ctrl->increase_threshold && if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true; return true;

View File

@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn; unsigned int irqn;
void *cqc, *in; void *cqc, *in;
__be64 *pas; __be64 *pas;
int vector;
u32 i; u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in) if (!in)
goto err_cqwq; goto err_cqwq;
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) { if (err) {
kvfree(in); kvfree(in);
goto err_cqwq; goto err_cqwq;

View File

@ -568,18 +568,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount); return !refcount_read(&ste->refcount);
} }
static u16 get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
/* Init one ste as a pattern for ste data array */ /* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi, void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_domain_rx_tx *nic_dmn,
@ -628,20 +616,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl; struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
u32 bits_in_mask;
u8 next_lu_type; u8 next_lu_type;
u16 byte_mask; u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
/* Don't allocate table more than required,
* the size of the table defined via the byte_mask, so no need
* to allocate more than that.
*/
bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
log_table_size = min(log_table_size, bits_in_mask);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size, log_table_size,
next_lu_type, next_lu_type,
@ -679,7 +659,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true; htbl->ctrl.may_grow = true;
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1) if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false; htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */ /* Threshold is 50%, one is added to table of size 1 */

View File

@ -66,6 +66,8 @@ retry:
return err; return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
fsm_state_err = min_t(enum mlxfw_fsm_state_err,
fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n", pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]); mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed"); NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");

View File

@ -998,7 +998,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
if (d) if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else else
return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; return RT_TABLE_MAIN;
} }
static struct mlxsw_sp_rif * static struct mlxsw_sp_rif *
@ -1602,27 +1602,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_ipip_entry *ipip_entry = struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr saddr;
u32 ul_tb_id;
if (!ipip_entry) if (!ipip_entry)
return 0; return 0;
/* For flat configuration cases, moving overlay to a different VRF might
* cause local address conflict, and the conflicting tunnels need to be
* demoted.
*/
ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
saddr, ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack); true, false, false, extack);
} }

View File

@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available); available);
if (available > QCASPI_HW_BUF_LEN) { if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
/* This could only happen by interferences on the SPI line. /* This could only happen by interferences on the SPI line.
* So retry later ... * So retry later ...
*/ */
@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0; u16 signature = 0;
u16 spi_config; u16 spi_config;
u16 wrbuf_space = 0; u16 wrbuf_space = 0;
static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) { if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid /* Read signature twice, if not valid
@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET; qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++; qca->stats.trig_reset++;
reset_count = 0; qca->reset_count = 0;
break; break;
case QCASPI_SYNC_RESET: case QCASPI_SYNC_RESET:
reset_count++; qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
reset_count); qca->reset_count);
if (reset_count >= QCASPI_RESET_TIMEOUT) { if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */ /* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN; qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++; qca->stats.reset_timeout++;

View File

@ -94,6 +94,7 @@ struct qcaspi {
unsigned int intr_req; unsigned int intr_req;
unsigned int intr_svc; unsigned int intr_svc;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *device_root; struct dentry *device_root;

View File

@ -6952,8 +6952,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
} }
/* RTL8168e-vl has a HW issue with TSO */ /* RTL8168e-vl and one RTL8168c variant are known to have a
if (tp->mac_version == RTL_GIGA_MAC_VER_34) { * HW issue with TSO.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);

View File

@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx); (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work); cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work); if (efx->ptp_data->pps_workwq)
cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq); skb_queue_purge(&efx->ptp_data->txq);

View File

@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */ /* The number of entries in the send indirection table */
u32 count; u32 count;
/* The offset of the send indirection table from top of this struct. /* The offset of the send indirection table from the beginning of
* struct nvsp_message.
* The send indirection table tells which channel to put the send * The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number. * traffic on. Each entry is a channel number.
*/ */

View File

@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev,
} }
static void netvsc_send_table(struct net_device *ndev, static void netvsc_send_table(struct net_device *ndev,
const struct nvsp_message *nvmsg) struct netvsc_device *nvscdev,
const struct nvsp_message *nvmsg,
u32 msglen)
{ {
struct net_device_context *net_device_ctx = netdev_priv(ndev); struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 count, *tab; u32 count, offset, *tab;
int i; int i;
count = nvmsg->msg.v5_msg.send_table.count; count = nvmsg->msg.v5_msg.send_table.count;
offset = nvmsg->msg.v5_msg.send_table.offset;
if (count != VRSS_SEND_TAB_SIZE) { if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count); netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return; return;
} }
tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
nvmsg->msg.v5_msg.send_table.offset); * wrong due to a host bug. So fix the offset here.
*/
if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
msglen >= sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
offset = sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
if (offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
}
tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i]; net_device_ctx->tx_table[i] = tab[i];
@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc ? "added" : "removed"); net_device_ctx->vf_alloc ? "added" : "removed");
} }
static void netvsc_receive_inband(struct net_device *ndev, static void netvsc_receive_inband(struct net_device *ndev,
const struct nvsp_message *nvmsg) struct netvsc_device *nvscdev,
const struct nvsp_message *nvmsg,
u32 msglen)
{ {
switch (nvmsg->hdr.msg_type) { switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
netvsc_send_table(ndev, nvmsg); netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break; break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{ {
struct vmbus_channel *channel = nvchan->channel; struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc); const struct nvsp_message *nvmsg = hv_pkt_data(desc);
u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg); trace_nvsp_recv(ndev, channel, nvmsg);
@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break; break;
case VM_PKT_DATA_INBAND: case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, nvmsg); netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
break; break;
default: default:

View File

@ -145,8 +145,11 @@ err_out_free_mdiobus:
static int sun4i_mdio_remove(struct platform_device *pdev) static int sun4i_mdio_remove(struct platform_device *pdev)
{ {
struct mii_bus *bus = platform_get_drvdata(pdev); struct mii_bus *bus = platform_get_drvdata(pdev);
struct sun4i_mdio_data *data = bus->priv;
mdiobus_unregister(bus); mdiobus_unregister(bus);
if (data->regulator)
regulator_disable(data->regulator);
mdiobus_free(bus); mdiobus_free(bus);
return 0; return 0;

View File

@ -65,7 +65,7 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
reset = devm_reset_control_get_exclusive(&mdiodev->dev, reset = devm_reset_control_get_exclusive(&mdiodev->dev,
"phy"); "phy");
if (IS_ERR(reset)) { if (IS_ERR(reset)) {
if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS) if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
reset = NULL; reset = NULL;
else else
return PTR_ERR(reset); return PTR_ERR(reset);

View File

@ -593,6 +593,8 @@ static int phylink_register_sfp(struct phylink *pl,
* Create a new phylink instance, and parse the link parameters found in @np. * Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration. * This will parse in-band modes, fixed-link or SFP configuration.
* *
* Note: the rtnl lock must not be held when calling this function.
*
* Returns a pointer to a &struct phylink, or an error-pointer value. Users * Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function. * must use IS_ERR() to check for errors from this function.
*/ */
@ -670,6 +672,8 @@ EXPORT_SYMBOL_GPL(phylink_create);
* *
* Destroy a phylink instance. Any PHY that has been attached must have been * Destroy a phylink instance. Any PHY that has been attached must have been
* cleaned up via phylink_disconnect_phy() prior to calling this function. * cleaned up via phylink_disconnect_phy() prior to calling this function.
*
* Note: the rtnl lock must not be held when calling this function.
*/ */
void phylink_destroy(struct phylink *pl) void phylink_destroy(struct phylink *pl)
{ {
@ -1242,7 +1246,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
pl->link_config.duplex = our_kset.base.duplex; pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { /* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration. For a fixed link, this isn't able to change any
* parameters, which just leaves inband mode.
*/
if (pl->link_an_mode == MLO_AN_INBAND &&
!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
phylink_mac_config(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config);
phylink_mac_an_restart(pl); phylink_mac_an_restart(pl);
} }
@ -1322,15 +1332,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause) if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX; config->pause |= MLO_PAUSE_TX;
if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { /* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration.
*/
if (pl->phydev) {
phy_set_asym_pause(pl->phydev, pause->rx_pause,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
switch (pl->link_an_mode) { switch (pl->link_an_mode) {
case MLO_AN_PHY:
/* Silently mark the carrier down, and then trigger a resolve */
if (pl->netdev)
netif_carrier_off(pl->netdev);
phylink_run_resolve(pl);
break;
case MLO_AN_FIXED: case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */ /* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config); phylink_resolve_flow(pl, config);

View File

@ -5249,10 +5249,10 @@ static int rtl8152_close(struct net_device *netdev)
unregister_pm_notifier(&tp->pm_notifier); unregister_pm_notifier(&tp->pm_notifier);
#endif #endif
tasklet_disable(&tp->tx_tl); tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags); clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb); usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule); cancel_delayed_work_sync(&tp->schedule);
napi_disable(&tp->napi);
netif_stop_queue(netdev); netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf); res = usb_autopm_get_interface(tp->intf);
@ -5518,10 +5518,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
netif_stop_queue(netdev); netif_stop_queue(netdev);
tasklet_disable(&tp->tx_tl); tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags); clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb); usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule); cancel_delayed_work_sync(&tp->schedule);
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control); mutex_lock(&tp->control);
tp->rtl_ops.disable(tp); tp->rtl_ops.disable(tp);
@ -5639,7 +5639,7 @@ static int rtl8152_system_resume(struct r8152 *tp)
netif_device_attach(netdev); netif_device_attach(netdev);
if (netif_running(netdev) && netdev->flags & IFF_UP) { if (netif_running(netdev) && (netdev->flags & IFF_UP)) {
tp->rtl_ops.up(tp); tp->rtl_ops.up(tp);
netif_carrier_off(netdev); netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags); set_bit(WORK_ENABLE, &tp->flags);
@ -6213,9 +6213,15 @@ static int rtl8152_set_tunable(struct net_device *netdev,
} }
if (tp->rx_copybreak != val) { if (tp->rx_copybreak != val) {
napi_disable(&tp->napi); if (netdev->flags & IFF_UP) {
tp->rx_copybreak = val; mutex_lock(&tp->control);
napi_enable(&tp->napi); napi_disable(&tp->napi);
tp->rx_copybreak = val;
napi_enable(&tp->napi);
mutex_unlock(&tp->control);
} else {
tp->rx_copybreak = val;
}
} }
break; break;
default: default:
@ -6243,9 +6249,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return -EINVAL; return -EINVAL;
if (tp->rx_pending != ring->rx_pending) { if (tp->rx_pending != ring->rx_pending) {
napi_disable(&tp->napi); if (netdev->flags & IFF_UP) {
tp->rx_pending = ring->rx_pending; mutex_lock(&tp->control);
napi_enable(&tp->napi); napi_disable(&tp->napi);
tp->rx_pending = ring->rx_pending;
napi_enable(&tp->napi);
mutex_unlock(&tp->control);
} else {
tp->rx_pending = ring->rx_pending;
}
} }
return 0; return 0;

View File

@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc) if (rc)
usb_unlink_urb(dev->out_urb); usb_kill_urb(dev->out_urb);
exit: exit:
mutex_unlock(&dev->out_urb_lock); mutex_unlock(&dev->out_urb_lock);

View File

@ -845,6 +845,7 @@ struct qeth_card {
struct service_level qeth_service_level; struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd; struct qdio_ssqd_desc ssqd;
debug_info_t *debug; debug_info_t *debug;
struct mutex sbp_lock;
struct mutex conf_mutex; struct mutex conf_mutex;
struct mutex discipline_mutex; struct mutex discipline_mutex;
struct napi_struct napi; struct napi_struct napi;

View File

@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
CCW_DEVID(cdev), dstat, cstat); CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1); 16, 1, irb, 64, 1);
return 1; return -EIO;
} }
if (dstat & DEV_STAT_UNIT_CHECK) { if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] & if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) { SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND"); QETH_CARD_TEXT(card, 2, "REVIND");
return 1; return -EIO;
} }
if (sense[SENSE_COMMAND_REJECT_BYTE] & if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) { SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi"); QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1; return -EIO;
} }
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE"); QETH_CARD_TEXT(card, 2, "AFFE");
return 1; return -EIO;
} }
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN"); QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0; return 0;
} }
QETH_CARD_TEXT(card, 2, "DGENCHK"); QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1; return -EIO;
} }
return 0; return 0;
} }

View File

@ -457,10 +457,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card)
if (card->info.promisc_mode == enable) if (card->info.promisc_mode == enable)
return; return;
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable); qeth_setadp_promisc_mode(card, enable);
else if (card->options.sbp.reflect_promisc) } else {
qeth_l2_promisc_to_bridge(card, enable); mutex_lock(&card->sbp_lock);
if (card->options.sbp.reflect_promisc)
qeth_l2_promisc_to_bridge(card, enable);
mutex_unlock(&card->sbp_lock);
}
} }
/* New MAC address is added to the hash table and marked to be written on card /* New MAC address is added to the hash table and marked to be written on card
@ -621,6 +625,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
int rc; int rc;
qeth_l2_vnicc_set_defaults(card); qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
if (gdev->dev.type == &qeth_generic_devtype) { if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev); rc = qeth_l2_create_device_attributes(&gdev->dev);
@ -779,10 +784,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
goto out_remove; goto out_remove;
} }
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card); qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs) if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev, dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n"); "The device represents a Bridge Capable Port\n");
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card); qeth_l2_register_dev_addr(card);
@ -1131,9 +1138,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
/* Role should not change by itself, but if it did, */ /* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */ /* information from the hardware is authoritative. */
mutex_lock(&data->card->conf_mutex); mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role; data->card->options.sbp.role = entry->role;
mutex_unlock(&data->card->conf_mutex); mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s", snprintf(env_role, sizeof(env_role), "ROLE=%s",
@ -1199,9 +1206,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
: (data->hostevs.lost_event_mask == 0x02) : (data->hostevs.lost_event_mask == 0x02)
? "Bridge port state change" ? "Bridge port state change"
: "Unknown reason"); : "Unknown reason");
mutex_lock(&data->card->conf_mutex); mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0; data->card->options.sbp.hostnotification = 0;
mutex_unlock(&data->card->conf_mutex); mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort, qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL); 0, NULL, NULL);
} else } else

View File

@ -21,6 +21,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n"); return sprintf(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) && if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs) card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card, rc = qeth_bridgeport_query_ports(card,
@ -54,6 +55,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
else else
rc = sprintf(buf, "%s\n", word); rc = sprintf(buf, "%s\n", word);
} }
mutex_unlock(&card->sbp_lock);
return rc; return rc;
} }
@ -86,6 +88,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
return -EINVAL; return -EINVAL;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -99,6 +102,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
} else } else
card->options.sbp.role = role; card->options.sbp.role = role;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -147,6 +151,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
return rc; return rc;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -157,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
} else } else
card->options.sbp.hostnotification = enable; card->options.sbp.hostnotification = enable;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -206,6 +212,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
return -EINVAL; return -EINVAL;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -217,6 +224,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
rc = 0; rc = 0;
} }
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -252,6 +260,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return; return;
if (!card->options.sbp.supported_funcs) if (!card->options.sbp.supported_funcs)
return; return;
mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */ /* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role); qeth_bridgeport_setrole(card, card->options.sbp.role);
@ -263,8 +273,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
rc = qeth_bridgeport_an_set(card, 1); rc = qeth_bridgeport_an_set(card, 1);
if (rc) if (rc)
card->options.sbp.hostnotification = 0; card->options.sbp.hostnotification = 0;
} else } else {
qeth_bridgeport_an_set(card, 0); qeth_bridgeport_an_set(card, 0);
}
mutex_unlock(&card->sbp_lock);
} }
/* VNIC CHARS support */ /* VNIC CHARS support */

View File

@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
_enter("%p,%zu,", server, count); _enter("%p,%zu,", server, count);
ASSERT(server != NULL); ASSERT(server != NULL);
ASSERTCMP(count, <=, AFSCBMAX);
/* TODO: Sort the callback break list by volume ID */ /* TODO: Sort the callback break list by volume ID */

View File

@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
call->need_attention = false; call->need_attention = false;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
afs_deliver_to_call(call); afs_deliver_to_call(call);
timeout = rtt2;
continue; continue;
} }

View File

@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* fill in the superblock */ /* fill in the superblock */
sb->s_blocksize = PAGE_SIZE; sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT; sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = AFS_FS_MAGIC; sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops; sb->s_op = &afs_super_ops;
if (!as->dyn_root) if (!as->dyn_root)

View File

@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi); return loc->xl_ops->xlo_check_space(loc, xi);
} }
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
/*
* We can't leave the new entry's xe_name_offset at zero or
* add_namevalue() will go nuts. We set it to the size of our
* storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi) struct ocfs2_xattr_info *xi)
{ {
@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc) if (rc)
goto out; goto out;
if (!loc->xl_entry) { if (loc->xl_entry) {
rc = -EINVAL; if (ocfs2_xa_can_reuse_entry(loc, xi)) {
goto out; orig_value_size = loc->xl_entry->xe_value_size;
} rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
if (ocfs2_xa_can_reuse_entry(loc, xi)) { goto out;
orig_value_size = loc->xl_entry->xe_value_size; goto alloc_value;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
} }
}
ocfs2_xa_wipe_namevalue(loc); if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
}
ocfs2_xa_wipe_namevalue(loc);
} else
ocfs2_xa_add_entry(loc, name_hash);
/* /*
* If we get here, we have a blank entry. Fill it. We grow our * If we get here, we have a blank entry. Fill it. We grow our

View File

@ -336,7 +336,8 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32 #define QI_DEV_IOTLB_MAX_INVS 32
@ -360,7 +361,8 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_DEV_EIOTLB_MAX_INVS 32
/* Page group response descriptor QW0 */ /* Page group response descriptor QW0 */

View File

@ -4171,12 +4171,18 @@ static inline void skb_ext_reset(struct sk_buff *skb)
skb->active_extensions = 0; skb->active_extensions = 0;
} }
} }
static inline bool skb_has_extensions(struct sk_buff *skb)
{
return unlikely(skb->active_extensions);
}
#else #else
static inline void skb_ext_put(struct sk_buff *skb) {} static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
#endif /* CONFIG_SKB_EXTENSIONS */ #endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset_ct(struct sk_buff *skb) static inline void nf_reset_ct(struct sk_buff *skb)

View File

@ -337,6 +337,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx); void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int tls_sw_sendpage(struct sock *sk, struct page *page, int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags); int offset, size_t size, int flags);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);

View File

@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
if (!offdevs_inited) { if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params); err = rhashtable_init(&offdevs, &offdevs_params);
if (err) if (err) {
up_write(&bpf_devs_lock);
return ERR_PTR(err); return ERR_PTR(err);
}
offdevs_inited = true; offdevs_inited = true;
} }
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);

View File

@ -1708,11 +1708,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
/* /*
* Poll support for process exit notification. * Poll support for process exit notification.
*/ */
static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
{ {
struct task_struct *task; struct task_struct *task;
struct pid *pid = file->private_data; struct pid *pid = file->private_data;
int poll_flags = 0; __poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts); poll_wait(file, &pid->wait_pidfd, pts);
@ -1724,7 +1724,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
* group, then poll(2) should block, similar to the wait(2) family. * group, then poll(2) should block, similar to the wait(2) family.
*/ */
if (!task || (task->exit_state && thread_group_empty(task))) if (!task || (task->exit_state && thread_group_empty(task)))
poll_flags = POLLIN | POLLRDNORM; poll_flags = EPOLLIN | EPOLLRDNORM;
rcu_read_unlock(); rcu_read_unlock();
return poll_flags; return poll_flags;

View File

@ -814,6 +814,8 @@ EXPORT_SYMBOL_GPL(freq_qos_update_request);
*/ */
int freq_qos_remove_request(struct freq_qos_request *req) int freq_qos_remove_request(struct freq_qos_request *req)
{ {
int ret;
if (!req) if (!req)
return -EINVAL; return -EINVAL;
@ -821,7 +823,11 @@ int freq_qos_remove_request(struct freq_qos_request *req)
"%s() called for unknown object\n", __func__)) "%s() called for unknown object\n", __func__))
return -EINVAL; return -EINVAL;
return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
req->qos = NULL;
req->type = 0;
return ret;
} }
EXPORT_SYMBOL_GPL(freq_qos_remove_request); EXPORT_SYMBOL_GPL(freq_qos_remove_request);

View File

@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
* affecting a valid clamp bucket, the next time it's enqueued, * affecting a valid clamp bucket, the next time it's enqueued,
* it will already see the updated clamp bucket value. * it will already see the updated clamp bucket value.
*/ */
if (!p->uclamp[clamp_id].active) { if (p->uclamp[clamp_id].active) {
uclamp_rq_dec_id(rq, p, clamp_id); uclamp_rq_dec_id(rq, p, clamp_id);
uclamp_rq_inc_id(rq, p, clamp_id); uclamp_rq_inc_id(rq, p, clamp_id);
} }
@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long flags; unsigned long flags;
__sched_fork(0, idle);
raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
__sched_fork(0, idle);
idle->state = TASK_RUNNING; idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock(); idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE; idle->flags |= PF_IDLE;

View File

@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
done = false;
/* /*
* Iterates the task_group tree in a bottom up fashion, see * Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details. * list_add_leaf_cfs_rq() for details.
@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
done = false; done = false;
} }
curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
done = false;
update_blocked_load_status(rq, !done); update_blocked_load_status(rq, !done);
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
} }
@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
/*
* update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
* that RT, DL and IRQ signals have been updated before updating CFS.
*/
curr_class = rq->curr->sched_class; curr_class = rq->curr->sched_class;
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0); update_irq_load_avg(rq, 0);
update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq)); update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
} }

View File

@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
return 0; return 0;
} }
if (WARN_ON_ONCE(page_mapped(page))) { /*
/* * Page could be still mapped if this races with __mmput() running in
* This should not happen: but if it does, just refuse to let * between ksm_exit() and exit_mmap(). Just refuse to let
* merge_across_nodes be switched - there is no need to panic. * merge_across_nodes/max_page_sharing be switched.
*/ */
err = -EBUSY; err = -EBUSY;
} else { if (!page_mapped(page)) {
/* /*
* The stable node did not yet appear stale to get_ksm_page(), * The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized * since that allows for an unmapped ksm page to be recognized

View File

@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long end_pfn) unsigned long end_pfn)
{ {
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(start_pfn))) if (unlikely(!pfn_to_online_page(start_pfn)))
continue; continue;
if (unlikely(pfn_to_nid(start_pfn) != nid)) if (unlikely(pfn_to_nid(start_pfn) != nid))
@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
/* pfn is the end pfn of a memory section. */ /* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1; pfn = end_pfn - 1;
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_to_online_page(pfn)))
continue; continue;
if (unlikely(pfn_to_nid(pfn) != nid)) if (unlikely(pfn_to_nid(pfn) != nid))
@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
*/ */
pfn = zone_start_pfn; pfn = zone_start_pfn;
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_to_online_page(pfn)))
continue; continue;
if (page_zone(pfn_to_page(pfn)) != zone) if (page_zone(pfn_to_page(pfn)) != zone)
@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
struct pglist_data *pgdat = zone->zone_pgdat; struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long flags; unsigned long flags;
#ifdef CONFIG_ZONE_DEVICE
/*
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
* we will not try to shrink the zones - which is okay as
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
*/
if (zone_idx(zone) == ZONE_DEVICE)
return;
#endif
pgdat_resize_lock(zone->zone_pgdat, &flags); pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat); update_pgdat_span(pgdat);

View File

@ -89,7 +89,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
struct clip_vcc **walk; struct clip_vcc **walk;
if (!entry) { if (!entry) {
pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return; return;
} }
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@ -109,10 +109,10 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
error = neigh_update(entry->neigh, NULL, NUD_NONE, error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN, 0); NEIGH_UPDATE_F_ADMIN, 0);
if (error) if (error)
pr_crit("neigh_update failed with %d\n", error); pr_err("neigh_update failed with %d\n", error);
goto out; goto out;
} }
pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out: out:
netif_tx_unlock_bh(entry->neigh->dev); netif_tx_unlock_bh(entry->neigh->dev);
} }

View File

@ -923,21 +923,23 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index); "rx-%u", index);
if (error) if (error)
return error; goto err;
dev_hold(queue->dev); dev_hold(queue->dev);
if (dev->sysfs_rx_queue_group) { if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
if (error) { if (error)
kobject_put(kobj); goto err;
return error;
}
} }
kobject_uevent(kobj, KOBJ_ADD); kobject_uevent(kobj, KOBJ_ADD);
return error; return error;
err:
kobject_put(kobj);
return error;
} }
#endif /* CONFIG_SYSFS */ #endif /* CONFIG_SYSFS */
@ -1461,21 +1463,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index); "tx-%u", index);
if (error) if (error)
return error; goto err;
dev_hold(queue->dev); dev_hold(queue->dev);
#ifdef CONFIG_BQL #ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group); error = sysfs_create_group(kobj, &dql_group);
if (error) { if (error)
kobject_put(kobj); goto err;
return error;
}
#endif #endif
kobject_uevent(kobj, KOBJ_ADD); kobject_uevent(kobj, KOBJ_ADD);
return 0; return 0;
err:
kobject_put(kobj);
return error;
} }
#endif /* CONFIG_SYSFS */ #endif /* CONFIG_SYSFS */

View File

@ -2251,6 +2251,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_MAC]) { if (tb[IFLA_VF_MAC]) {
struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
if (ivm->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac) if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf, err = ops->ndo_set_vf_mac(dev, ivm->vf,
@ -2262,6 +2264,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_VLAN]) { if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
if (ivv->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan) if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
@ -2294,6 +2298,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (len == 0) if (len == 0)
return -EINVAL; return -EINVAL;
if (ivvl[0]->vf >= INT_MAX)
return -EINVAL;
err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
ivvl[0]->qos, ivvl[0]->vlan_proto); ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0) if (err < 0)
@ -2304,6 +2310,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
struct ifla_vf_info ivf; struct ifla_vf_info ivf;
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_get_vf_config) if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
@ -2322,6 +2330,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_RATE]) { if (tb[IFLA_VF_RATE]) {
struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate) if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf, err = ops->ndo_set_vf_rate(dev, ivt->vf,
@ -2334,6 +2344,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_SPOOFCHK]) { if (tb[IFLA_VF_SPOOFCHK]) {
struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
if (ivs->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk) if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
@ -2345,6 +2357,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_LINK_STATE]) { if (tb[IFLA_VF_LINK_STATE]) {
struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
if (ivl->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_link_state) if (ops->ndo_set_vf_link_state)
err = ops->ndo_set_vf_link_state(dev, ivl->vf, err = ops->ndo_set_vf_link_state(dev, ivl->vf,
@ -2358,6 +2372,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
if (ivrssq_en->vf >= INT_MAX)
return -EINVAL;
if (ops->ndo_set_vf_rss_query_en) if (ops->ndo_set_vf_rss_query_en)
err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
ivrssq_en->setting); ivrssq_en->setting);
@ -2368,6 +2384,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_TRUST]) { if (tb[IFLA_VF_TRUST]) {
struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_trust) if (ops->ndo_set_vf_trust)
err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
@ -2378,15 +2396,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_IB_NODE_GUID]) { if (tb[IFLA_VF_IB_NODE_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
if (!ops->ndo_set_vf_guid) if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
} }
if (tb[IFLA_VF_IB_PORT_GUID]) { if (tb[IFLA_VF_IB_PORT_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
if (!ops->ndo_set_vf_guid) if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -1041,7 +1041,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy, .proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO, .extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE, .extra2 = &two,
}, },
#endif #endif
{ {

View File

@ -1297,6 +1297,27 @@ out:
#define UDP_SKB_IS_STATELESS 0x80000000 #define UDP_SKB_IS_STATELESS 0x80000000
/* all head states (dst, sk, nf conntrack) except skb extensions are
* cleared by udp_rcv().
*
* We need to preserve secpath, if present, to eventually process
* IP_CMSG_PASSSEC at recvmsg() time.
*
* Other extensions can be cleared.
*/
static bool udp_try_make_stateless(struct sk_buff *skb)
{
if (!skb_has_extensions(skb))
return true;
if (!secpath_exists(skb)) {
skb_ext_reset(skb);
return true;
}
return false;
}
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
struct udp_dev_scratch *scratch = udp_skb_scratch(skb); struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
@ -1308,11 +1329,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb); scratch->is_linear = !skb_is_nonlinear(skb);
#endif #endif
/* all head states execept sp (dst, sk, nf) are always cleared by if (udp_try_make_stateless(skb))
* udp_rcv() and we need to preserve secpath, if present, to eventually
* process IP_CMSG_PASSSEC at recvmsg() time
*/
if (likely(!skb_sec_path(skb)))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS; scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
} }

View File

@ -363,8 +363,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break; break;
case IPV6_TRANSPARENT: case IPV6_TRANSPARENT:
if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) { !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
retv = -EPERM; retv = -EPERM;
break; break;
} }

View File

@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
* Router Reachability Probe MUST be rate-limited * Router Reachability Probe MUST be rate-limited
* to no more than one per minute. * to no more than one per minute.
*/ */
if (fib6_nh->fib_nh_gw_family) if (!fib6_nh->fib_nh_gw_family)
return; return;
nh_gw = &fib6_nh->fib_nh_gw6; nh_gw = &fib6_nh->fib_nh_gw6;

View File

@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
int err = -EINVAL; int err = -EINVAL;
int rem; int rem;
if (!nla || !n) if (!nla)
return NULL; return NULL;
keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
@ -171,6 +171,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
} }
parm = nla_data(pattr); parm = nla_data(pattr);
if (!parm->nkeys) {
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
return -EINVAL;
}
ksize = parm->nkeys * sizeof(struct tc_pedit_key); ksize = parm->nkeys * sizeof(struct tc_pedit_key);
if (nla_len(pattr) < sizeof(*parm) + ksize) { if (nla_len(pattr) < sizeof(*parm) + ksize) {
NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
@ -184,12 +188,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
index = parm->index; index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind); err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) { if (!err) {
if (!parm->nkeys) {
tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
ret = -EINVAL;
goto out_free;
}
ret = tcf_idr_create(tn, index, est, a, ret = tcf_idr_create(tn, index, est, a,
&act_pedit_ops, bind, false, 0); &act_pedit_ops, bind, false, 0);
if (ret) { if (ret) {

View File

@ -237,6 +237,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0) if (opt_len < 0)
return opt_len; return opt_len;
opts_len += opt_len; opts_len += opt_len;
if (opts_len > IP_TUNNEL_OPTS_MAX) {
NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
return -EINVAL;
}
if (dst) { if (dst) {
dst_len -= opt_len; dst_len -= opt_len;
dst += opt_len; dst += opt_len;

View File

@ -922,7 +922,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
} }
/* Verify priority mapping uses valid tcs */ /* Verify priority mapping uses valid tcs */
for (i = 0; i < TC_BITMASK + 1; i++) { for (i = 0; i <= TC_BITMASK; i++) {
if (qopt->prio_tc_map[i] >= qopt->num_tc) { if (qopt->prio_tc_map[i] >= qopt->num_tc) {
NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
return -EINVAL; return -EINVAL;
@ -1347,6 +1347,26 @@ out:
return err; return err;
} }
static int taprio_mqprio_cmp(const struct net_device *dev,
const struct tc_mqprio_qopt *mqprio)
{
int i;
if (!mqprio || mqprio->num_tc != dev->num_tc)
return -1;
for (i = 0; i < mqprio->num_tc; i++)
if (dev->tc_to_txq[i].count != mqprio->count[i] ||
dev->tc_to_txq[i].offset != mqprio->offset[i])
return -1;
for (i = 0; i <= TC_BITMASK; i++)
if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
return -1;
return 0;
}
static int taprio_change(struct Qdisc *sch, struct nlattr *opt, static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
@ -1398,6 +1418,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
admin = rcu_dereference(q->admin_sched); admin = rcu_dereference(q->admin_sched);
rcu_read_unlock(); rcu_read_unlock();
/* no changes - no new mqprio settings */
if (!taprio_mqprio_cmp(dev, mqprio))
mqprio = NULL;
if (mqprio && (oper || admin)) { if (mqprio && (oper || admin)) {
NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
err = -ENOTSUPP; err = -ENOTSUPP;
@ -1455,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->offset[i]); mqprio->offset[i]);
/* Always use supplied priority mappings */ /* Always use supplied priority mappings */
for (i = 0; i < TC_BITMASK + 1; i++) for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i, netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]); mqprio->prio_tc_map[i]);
} }

View File

@ -861,6 +861,7 @@ static int __init tls_register(void)
tls_sw_proto_ops = inet_stream_ops; tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read; tls_sw_proto_ops.splice_read = tls_sw_splice_read;
tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
tls_device_init(); tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops); tcp_register_ulp(&tcp_tls_ulp_ops);

View File

@ -1209,6 +1209,17 @@ sendpage_end:
return copied ? copied : ret; return copied ? copied : ret;
} }
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
MSG_NO_SHARED_FRAGS))
return -ENOTSUPP;
return tls_sw_do_sendpage(sk, page, offset, size, flags);
}
int tls_sw_sendpage(struct sock *sk, struct page *page, int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags) int offset, size_t size, int flags)
{ {

View File

@ -1,3 +1,4 @@
gpio-utils-y += gpio-utils.o
lsgpio-y += lsgpio.o gpio-utils.o lsgpio-y += lsgpio.o gpio-utils.o
gpio-hammer-y += gpio-hammer.o gpio-utils.o gpio-hammer-y += gpio-hammer.o gpio-utils.o
gpio-event-mon-y += gpio-event-mon.o gpio-utils.o gpio-event-mon-y += gpio-event-mon.o gpio-utils.o

View File

@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h prepare: $(OUTPUT)include/linux/gpio.h
GPIO_UTILS_IN := $(output)gpio-utils-in.o
$(GPIO_UTILS_IN): prepare FORCE
$(Q)$(MAKE) $(build)=gpio-utils
# #
# lsgpio # lsgpio
# #
LSGPIO_IN := $(OUTPUT)lsgpio-in.o LSGPIO_IN := $(OUTPUT)lsgpio-in.o
$(LSGPIO_IN): prepare FORCE $(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=lsgpio $(Q)$(MAKE) $(build)=lsgpio
$(OUTPUT)lsgpio: $(LSGPIO_IN) $(OUTPUT)lsgpio: $(LSGPIO_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
# gpio-hammer # gpio-hammer
# #
GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
$(GPIO_HAMMER_IN): prepare FORCE $(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-hammer $(Q)$(MAKE) $(build)=gpio-hammer
$(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
# gpio-event-mon # gpio-event-mon
# #
GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
$(GPIO_EVENT_MON_IN): prepare FORCE $(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-event-mon $(Q)$(MAKE) $(build)=gpio-event-mon
$(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN) $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@