Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-01-05 14:36:10 -08:00
commit b9adba350a
88 changed files with 934 additions and 420 deletions

View File

@ -11,9 +11,11 @@ systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
The official I2C specification is the `"I2C-bus specification and user
manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
published by NXP Semiconductors.
The latest official I2C specification is the `"I2C-bus specification and user
manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
published by NXP Semiconductors. However, you need to log-in to the site to
access the PDF. An older version of the specification (revision 6) is archived
`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
a subset of I2C protocols and signaling. Many I2C devices will work on an

View File

@ -3769,7 +3769,8 @@ S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
BROADCOM BRCMSTB GPIO DRIVER
M: Gregory Fong <gregory.0xf0@gmail.com>
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Gobble Gobble
# *DOCUMENTATION*

View File

@ -68,7 +68,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"(__parainstructions|__alt_instructions)(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
#if CONFIG_FW_LOADER_BUILTIN
#if CONFIG_FW_LOADER
"__(start|end)_builtin_fw|"
#endif
"__(start|stop)___ksymtab(_gpl)?|"

View File

@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32)
generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
}
chained_irq_exit(ic, desc);

View File

@ -3166,6 +3166,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
#endif
case CHIP_TOPAZ:
/* chips with no display hardware */
return false;
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@ -4461,7 +4467,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
int i, j, r = 0;
int i, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@ -4483,15 +4489,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
struct dma_fence *old, **ptr;
amdgpu_fence_driver_clear_job_fences(ring);
ptr = &ring->fence_drv.fences[j];
old = rcu_dereference_protected(*ptr, 1);
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
RCU_INIT_POINTER(*ptr, NULL);
}
}
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}

View File

@ -526,10 +526,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
}
}
union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v2_0 v2;
};
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
union gc_info *gc_info;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
@ -537,28 +542,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
gc_info = (union gc_info *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
le32_to_cpu(gc_info->gc_num_sa_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
switch (gc_info->v1.header.version_major) {
case 1:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
break;
default:
dev_err(adev->dev,
"Unhandled GC info table %d.%d\n",
gc_info->v1.header.version_major,
gc_info->v1.header.version_minor);
return -EINVAL;
}
return 0;
}

View File

@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);

View File

@ -328,10 +328,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
/**
* DOC: runpm (int)
* Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
* the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
* Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
* the dGPUs when they are idle if supported. The default is -1 (auto enable).
* Setting the value to 0 disables this functionality.
*/
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@ -2153,7 +2154,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
if (r)
return r;
if (!adev->in_s0ix)
r = amdgpu_asic_reset(adev);
return r;
}
@ -2234,12 +2238,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/*
* By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
* proper cleanups and put itself into a state ready for PNP. That
* can address some random resuming failure observed on BOCO capable
* platforms.
* TODO: this may be also needed for PX capable platform.
*/
if (amdgpu_device_supports_boco(drm_dev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
ret = amdgpu_device_suspend(drm_dev, false);
if (ret) {
adev->in_runpm = false;
if (amdgpu_device_supports_boco(drm_dev))
adev->mp1_state = PP_MP1_STATE_NONE;
return ret;
}
if (amdgpu_device_supports_boco(drm_dev))
adev->mp1_state = PP_MP1_STATE_NONE;
if (amdgpu_device_supports_px(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.

View File

@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
if (__f->base.ops == &amdgpu_fence_ops)
if (__f->base.ops == &amdgpu_fence_ops ||
__f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
if (job != NULL && job->job_run_counter) {
if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
}
if (job != NULL) {
/* mark this fence has a parent job */
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
if (job)
dma_fence_init(fence, &amdgpu_job_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
else
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@ -620,6 +621,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
}
/**
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
*
* @ring: fence of the ring to be cleared
*
*/
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
{
int i;
struct dma_fence *old, **ptr;
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops)
RCU_INIT_POINTER(*ptr, NULL);
}
}
/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_ring *ring;
return (const char *)to_amdgpu_fence(f)->ring->name;
}
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
return (const char *)ring->name;
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_ring *ring;
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
return true;
}
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
/**
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
* @f: fence
*
* This is the simliar function with amdgpu_fence_enable_signaling above, it
* only handles the job embedded fence.
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
/* free job if fence has a parent job */
struct amdgpu_job *job;
job = container_of(f, struct amdgpu_job, hw_fence);
kfree(job);
} else {
/* free fence_slab if it's separated fence*/
struct amdgpu_fence *fence;
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
}
fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
/**
* amdgpu_job_fence_free - free up the job with embedded fence
*
* @rcu: RCU callback head
*
* Free up the job with embedded fence after the RCU grace period.
*/
static void amdgpu_job_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
/**
* amdgpu_job_fence_release - callback that job embedded fence can be freed
*
* @f: fence
*
* This is the simliar function with amdgpu_fence_release above, it
* only handles the job embedded fence.
*/
static void amdgpu_job_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_job_fence_free);
}
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
static const struct dma_fence_ops amdgpu_job_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
.enable_signaling = amdgpu_job_fence_enable_signaling,
.release = amdgpu_job_fence_release,
};
/*
* Fence debugfs

View File

@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
/* fence flag bit to indicate the face is embedded in job*/
#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,

View File

@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool idle_work_unexecuted;
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
}
r = vcn_v1_0_hw_fini(adev);
if (r)

View File

@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;

View File

@ -3948,9 +3948,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
link_enc = pipe_ctx->stream->link->link_enc;
config.dio_output_type = pipe_ctx->stream->link->ep_type;
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
link_enc = pipe_ctx->stream->link->link_enc;
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)

View File

@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,

View File

@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};

View File

@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(3, D),
clk_src_regs(4, E)
};
/*pll_id being rempped in dmub, in driver it is logical instance*/
static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
clk_src_regs(0, A),
clk_src_regs(1, B),
clk_src_regs(2, F),
clk_src_regs(3, G),
clk_src_regs(4, E)
};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
@ -994,7 +1002,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@ -2276,14 +2284,27 @@ static bool dcn31_resource_construct(
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
/*move phypllx_pixclk_resync to dmub next*/
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs_b0[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs_b0[3], false);
} else {
pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
}
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,

View File

@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
//PHYPLLF_PIXCLK_RESYNC_CNTL
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
//PHYPLLG_PIXCLK_RESYNC_CNTL
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
#endif
#endif /* _DCN31_RESOURCE_H_ */

View File

@ -143,6 +143,55 @@ struct gc_info_v1_0 {
uint32_t gc_num_gl2a;
};
struct gc_info_v1_1 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_wgp0_per_sa;
uint32_t gc_num_wgp1_per_sa;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_gl2c;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_sa_per_se;
uint32_t gc_num_packer_per_sc;
uint32_t gc_num_gl2a;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_tcps;
};
struct gc_info_v2_0 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_cu_per_sh;
uint32_t gc_num_sh_per_se;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_tccs;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_packer_per_sc;
};
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */

View File

@ -1568,9 +1568,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
/* skip CGPG when in S0ix */
if (smu->is_apu && !adev->in_s0ix)
smu_set_gfx_cgpg(&adev->smu, false);
smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@ -1601,8 +1599,7 @@ static int smu_resume(void *handle)
return ret;
}
if (smu->is_apu)
smu_set_gfx_cgpg(&adev->smu, true);
smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;

View File

@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
/* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,

View File

@ -1621,7 +1621,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
en ? 1 : 0,
en ? 0 : 1,
NULL);
}

View File

@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
container_of_user(base, typeof(*ext), base);
const struct set_proto_ctx_engines *set = data;
struct drm_i915_private *i915 = set->i915;
struct i915_engine_class_instance prev_engine;
u64 flags;
int err = 0, n, i, j;
u16 slot, width, num_siblings;
@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
/* Create contexts / engines */
for (i = 0; i < width; ++i) {
intel_engine_mask_t current_mask = 0;
struct i915_engine_class_instance prev_engine;
for (j = 0; j < num_siblings; ++j) {
struct i915_engine_class_instance ci;

View File

@ -3017,7 +3017,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
fence_array = dma_fence_array_create(eb->num_batches,
fences,
eb->context->parallel.fence_context,
eb->context->parallel.seqno,
eb->context->parallel.seqno++,
false);
if (!fence_array) {
kfree(fences);

View File

@ -353,34 +353,16 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (ret)
return ret;
fobj = NULL;
} else {
fobj = dma_resv_shared_list(resv);
}
fobj = dma_resv_shared_list(resv);
fence = dma_resv_excl_fence(resv);
if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
}
if (must_wait)
ret = dma_fence_wait(fence, intr);
return ret;
}
if (!exclusive || !fobj)
return ret;
for (i = 0; i < fobj->shared_count && !ret; ++i) {
/* Waiting for the exclusive fence first causes performance regressions
* under some circumstances. So manually wait for the shared ones first.
*/
for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
@ -400,6 +382,26 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
ret = dma_fence_wait(fence, intr);
}
fence = dma_resv_excl_fence(resv);
if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
}
if (must_wait)
ret = dma_fence_wait(fence, intr);
return ret;
}
return ret;
}

View File

@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
sizeof(rdwr_arg)))
return -EFAULT;
if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
return -EINVAL;
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;

View File

@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
case 'D': /* Ball data */
if (spaceball->idx != 15) return;
for (i = 0; i < 6; i++)
/*
* Skip first three bytes; read six axes worth of data.
* Axis values are signed 16-bit big-endian.
*/
data += 3;
for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
input_report_abs(dev, spaceball_axes[i],
(__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
(__s16)get_unaligned_be16(&data[i * 2]));
}
break;
case 'K': /* Button data */

View File

@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
set_bit(BTN_LEFT, input_dev->keybit);
INIT_WORK(&dev->work, atp_reinit);
error = input_register_device(dev->input);
if (error)
goto err_free_buffer;
@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
/* save our data pointer in this interface device */
usb_set_intfdata(iface, dev);
INIT_WORK(&dev->work, atp_reinit);
return 0;
err_free_buffer:

View File

@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
struct ena_tx_buffer *tx_info = NULL;
struct ena_tx_buffer *tx_info;
if (likely(req_id < tx_ring->ring_size)) {
tx_info = &tx_ring->tx_buffer_info[req_id];
if (likely(tx_info->skb))
return 0;
}
tx_info = &tx_ring->tx_buffer_info[req_id];
if (likely(tx_info->skb))
return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{
struct ena_tx_buffer *tx_info = NULL;
struct ena_tx_buffer *tx_info;
if (likely(req_id < xdp_ring->ring_size)) {
tx_info = &xdp_ring->tx_buffer_info[req_id];
if (likely(tx_info->xdpf))
return 0;
}
tx_info = &xdp_ring->tx_buffer_info[req_id];
if (likely(tx_info->xdpf))
return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
if (rc)
if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(tx_ring, req_id, NULL,
false);
break;
}
/* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id);
if (rc)
break;
@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean)
{
struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter;
u16 len, req_id, buf = 0;
struct sk_buff *skb;
void *page_addr;
@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Page is NULL\n");
adapter = rx_ring->adapter;
netif_err(adapter, rx_err, rx_ring->netdev,
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
/* Make sure reset reason is set before triggering the reset */
smp_mb__before_atomic();
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return NULL;
}
@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id);
if (rc)
if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(xdp_ring, req_id, NULL,
true);
break;
}
/* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc)
break;
@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
return max_num_io_queues;
}

View File

@ -47,7 +47,6 @@ struct tgec_mdio_controller {
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
#define MDIO_DATA_BSY BIT(31)
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;

View File

@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
netdev_for_each_mc_addr(ha, netdev) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
ha->refcount = 1;
break;
}
}
}
/**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
hlist_for_each_entry_safe(new, h, from, hlist) {
/* We can simply free the wrapper structure */
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
}
@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
&tmp_add_list,
&tmp_del_list,
vlan_filters);
hlist_for_each_entry(new, &tmp_add_list, hlist)
netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
if (retval)
goto err_no_memory_locked;
@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (new->f->state == I40E_FILTER_NEW)
new->f->state = new->state;
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@ -8716,6 +8740,27 @@ int i40e_open(struct net_device *netdev)
return 0;
}
/**
* i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
* @vsi: vsi structure
*
* This updates netdev's number of tx/rx queues
*
* Returns status of setting tx/rx queues
**/
static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
{
int ret;
ret = netif_set_real_num_rx_queues(vsi->netdev,
vsi->num_queue_pairs);
if (ret)
return ret;
return netif_set_real_num_tx_queues(vsi->netdev,
vsi->num_queue_pairs);
}
/**
* i40e_vsi_open -
* @vsi: the VSI to open
@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev,
vsi->num_queue_pairs);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(vsi->netdev,
vsi->num_queue_pairs);
err = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (err)
goto err_set_queues;
@ -14149,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
case I40E_VSI_MAIN:
case I40E_VSI_VMDQ2:
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (ret)
goto err_netdev;
ret = register_netdev(vsi->netdev);
@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
dev_dbg(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,

View File

@ -1877,17 +1877,19 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
* i40e_vc_send_msg_to_vf
* i40e_vc_send_msg_to_vf_ex
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
* @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen,
bool is_quiet)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* single place to detect unsuccessful return values */
if (v_retval) {
if (v_retval && !is_quiet) {
vf->num_invalid_msgs++;
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
vf->vf_id, v_opcode, v_retval);
@ -1933,6 +1935,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
return 0;
}
/**
* i40e_vc_send_msg_to_vf
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* send msg to VF
**/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
msg, msglen, false);
}
/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
@ -2695,6 +2714,7 @@ error_param:
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
* @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@ -2709,13 +2729,15 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct virtchnl_ether_addr_list *al)
struct virtchnl_ether_addr_list *al,
bool *is_quiet)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
int mac2add_cnt = 0;
int i;
*is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@ -2739,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
*is_quiet = true;
return -EPERM;
}
@ -2775,6 +2798,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
bool is_quiet = false;
i40e_status ret = 0;
int i;
@ -2791,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
ret = i40e_check_vf_permission(vf, al);
ret = i40e_check_vf_permission(vf, al, &is_quiet);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@ -2829,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret);
return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
ret, NULL, 0, is_quiet);
}
/**

View File

@ -3123,8 +3123,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
if (num_qps > IAVF_MAX_REQ_QUEUES)
if (num_qps > adapter->num_active_queues) {
dev_err(&adapter->pdev->dev,
"Cannot support requested number of queues\n");
return -EINVAL;
}
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret;

View File

@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
struct ef4_rx_page_state *state;
unsigned index;
if (unlikely(!rx_queue->page_ring))
return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
if (unlikely(!rx_queue->page_ring))
return;
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);

View File

@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
unsigned int index;
struct page *page;
if (unlikely(!rx_queue->page_ring))
return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(!rx_queue->page_ring))
return;
do {
efx_recycle_rx_page(channel, rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);

View File

@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
ret = usb_control_msg(usb_dev, pipe, request, requesttype,
value, index, data, size, timeout);
if (ret < 0) {
if (ret < size) {
ret = ret < 0 ? ret : -ENODATA;
atusb->err = ret;
dev_err(&usb_dev->dev,
"%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build)
return -ENOMEM;
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
/* We cannot call atusb_control_msg() here, since this request may read various length data */
ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
build[ret] = 0;
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);

View File

@ -239,8 +239,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
/* Check if we have a GPIO associated with this fixed phy */
if (!gpiod) {
gpiod = fixed_phy_get_gpiod(np);
if (!gpiod)
return ERR_PTR(-EINVAL);
if (IS_ERR(gpiod))
return ERR_CAST(gpiod);
}
/* Get the next available PHY address, up to PHY_MAX_ADDR */

View File

@ -9642,9 +9642,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
if (udev->parent &&
le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
tp->lenovo_macpassthru = 1;
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
switch (le16_to_cpu(udev->descriptor.idProduct)) {
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
tp->lenovo_macpassthru = 1;
}
}
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&

View File

@ -608,6 +608,11 @@ static const struct usb_device_id products [] = {
USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_poll_status_info,
}, {
/* Hytera Communications DMR radios' "Radio to PC Network" */
USB_VENDOR_AND_INTERFACE_INFO(0x238b,
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long)&rndis_info,
}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),

View File

@ -3100,6 +3100,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
char *tmp_persistent_address = conn->persistent_address;
char *tmp_local_ipaddr = conn->local_ipaddr;
del_timer_sync(&conn->transport_timer);
@ -3121,8 +3123,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@ -3134,6 +3134,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
kfree(tmp_persistent_address);
kfree(tmp_local_ipaddr);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);

View File

@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
if (nbytes > 64)
nbytes = 64;
if (nbytes > 63)
nbytes = 63;
memset(mybuf, 0, sizeof(mybuf));

View File

@ -586,9 +586,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
* Commands like INQUIRY may transfer less data than
* requested by the initiator via bufflen. Set residual
* count to make upper layer aware of the actual amount
* of data returned.
* of data returned. There are cases when controller
* returns zero dataLen with non zero data - do not set
* residual count in that case.
*/
scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;

View File

@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
if (err)
return err;
err = do_mount_setattr(&target, &kattr);
if (!err) {
err = do_mount_setattr(&target, &kattr);
path_put(&target);
}
finish_mount_kattr(&kattr);
path_put(&target);
return err;
}

View File

@ -133,6 +133,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
__u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@ -142,6 +143,7 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
};
#if defined(CONFIG_NET_L3_MASTER_DEV)

View File

@ -277,6 +277,7 @@ enum vmscan_throttle_state {
VMSCAN_THROTTLE_WRITEBACK,
VMSCAN_THROTTLE_ISOLATED,
VMSCAN_THROTTLE_NOPROGRESS,
VMSCAN_THROTTLE_CONGESTED,
NR_VMSCAN_THROTTLE,
};

View File

@ -112,8 +112,7 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter, int pos);
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net,
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p);
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,

View File

@ -58,9 +58,30 @@ extern int seg6_local_init(void);
extern void seg6_local_exit(void);
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
/* If the packet which invoked an ICMP error contains an SRH return
* the true destination address from within the SRH, otherwise use the
* destination address in the IP header.
*/
static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
struct inet6_skb_parm *opt)
{
struct ipv6_sr_hdr *srh;
if (opt->flags & IP6SKB_SEG6) {
srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
return &srh->segments[0];
}
return NULL;
}
#endif

View File

@ -30,12 +30,14 @@
#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
#define _VMSCAN_THROTTLE_CONGESTED (1 << VMSCAN_THROTTLE_CONGESTED)
#define show_throttle_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
{_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
{_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
{_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"}, \
{_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
) : "VMSCAN_THROTTLE_NONE"

View File

@ -353,6 +353,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct damon_ctx *ctx = file->private_data;
struct damon_target *t, *next_t;
bool id_is_pid = true;
char *kbuf, *nrs;
unsigned long *targets;
@ -397,8 +398,12 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
goto unlock_out;
}
/* remove targets with previously-set primitive */
damon_set_targets(ctx, NULL, 0);
/* remove previously set targets */
damon_for_each_target_safe(t, next_t, ctx) {
if (targetid_is_pid(ctx))
put_pid((struct pid *)t->id);
damon_destroy_target(t);
}
/* Configure the context for the address space type */
if (id_is_pid)

View File

@ -1021,6 +1021,39 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
static bool skip_throttle_noprogress(pg_data_t *pgdat)
{
int reclaimable = 0, write_pending = 0;
int i;
/*
* If kswapd is disabled, reschedule if necessary but do not
* throttle as the system is likely near OOM.
*/
if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
return true;
/*
* If there are a lot of dirty/writeback pages then do not
* throttle as throttling will occur when the pages cycle
* towards the end of the LRU if still under writeback.
*/
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
reclaimable += zone_reclaimable_pages(zone);
write_pending += zone_page_state_snapshot(zone,
NR_ZONE_WRITE_PENDING);
}
if (2 * write_pending <= reclaimable)
return true;
return false;
}
void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
{
wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
@ -1056,8 +1089,16 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
}
break;
case VMSCAN_THROTTLE_CONGESTED:
fallthrough;
case VMSCAN_THROTTLE_NOPROGRESS:
timeout = HZ/2;
if (skip_throttle_noprogress(pgdat)) {
cond_resched();
return;
}
timeout = 1;
break;
case VMSCAN_THROTTLE_ISOLATED:
timeout = HZ/50;
@ -3321,7 +3362,7 @@ again:
if (!current_is_kswapd() && current_may_throttle() &&
!sc->hibernation_mode &&
test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
sc))
@ -3386,16 +3427,16 @@ static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
}
/*
* Do not throttle kswapd on NOPROGRESS as it will throttle on
* VMSCAN_THROTTLE_WRITEBACK if there are too many pages under
* writeback and marked for immediate reclaim at the tail of
* the LRU.
* Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
* throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
* under writeback and marked for immediate reclaim at the tail of the
* LRU.
*/
if (current_is_kswapd())
if (current_is_kswapd() || cgroup_reclaim(sc))
return;
/* Throttle if making no progress at high prioities. */
if (sc->priority < DEF_PRIORITY - 2)
if (sc->priority == 1 && !sc->nr_reclaimed)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
}
@ -3415,6 +3456,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
unsigned long nr_soft_scanned;
gfp_t orig_mask;
pg_data_t *last_pgdat = NULL;
pg_data_t *first_pgdat = NULL;
/*
* If the number of buffer_heads in the machine exceeds the maximum
@ -3478,14 +3520,19 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
if (!first_pgdat)
first_pgdat = zone->zone_pgdat;
/* See comment about same check for global reclaim above */
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc);
consider_reclaim_throttle(zone->zone_pgdat, sc);
}
if (first_pgdat)
consider_reclaim_throttle(first_pgdat, sc);
/*
* Restore to original mask to avoid the impact on the caller if we
* promoted it to __GFP_HIGHMEM.

View File

@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to check
* @orig: an originator to be set to forward the skb to
* @is_routable: stores whether the destination is routable
*
* Return: the forwarding mode as enum batadv_forw_mode and in case of
* BATADV_FORW_SINGLE set the orig to the single originator the skb
@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
*/
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **orig)
struct batadv_orig_node **orig, int *is_routable)
{
int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false;
unsigned int mcast_fanout;
struct ethhdr *ethhdr;
int is_routable = 0;
int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
&is_routable);
is_routable);
if (ret == -ENOMEM)
return BATADV_FORW_NONE;
else if (ret < 0)
@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
unsnoop_count = !is_unsnoopable ? 0 :
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
* @is_routable: stores whether the destination is routable
*
* Sends copies of a frame with multicast destination to any node that signaled
* interest in it, that is either via the translation table or the according
@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
unsigned short vid, int is_routable)
{
int ret;
@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
return ret;
}
if (!is_routable)
goto skip_mc_router;
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
skip_mc_router:
consume_skb(skb);
return ret;
}

View File

@ -43,7 +43,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig);
struct batadv_orig_node **mcast_single_orig,
int *is_routable);
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
unsigned short vid, int is_routable);
void batadv_mcast_init(struct batadv_priv *bat_priv);
@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig)
struct batadv_orig_node **mcast_single_orig,
int *is_routable)
{
return BATADV_FORW_ALL;
}
@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
static inline int
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
unsigned short vid, int is_routable)
{
kfree_skb(skb);
return NET_XMIT_DROP;

View File

@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
int gw_mode;
enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
struct batadv_orig_node *mcast_single_orig = NULL;
int mcast_is_routable = 0;
int network_offset = ETH_HLEN;
__be16 proto;
@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
&mcast_single_orig);
&mcast_single_orig,
&mcast_is_routable);
if (forw_mode == BATADV_FORW_NONE)
goto dropped;
@ -359,7 +361,8 @@ send:
ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
mcast_single_orig);
} else if (forw_mode == BATADV_FORW_SOME) {
ret = batadv_mcast_forw_send(bat_priv, skb, vid);
ret = batadv_mcast_forw_send(bat_priv, skb, vid,
mcast_is_routable);
} else {
if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
skb))

View File

@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) {
if (nla_len(nla_entype) < sizeof(u16)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
return -EINVAL;
}
encap_type = nla_get_u16(nla_entype);
if (lwtunnel_valid_encap_type(encap_type,

View File

@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
return nhs;
}
static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
return -EINVAL;
}
*gw = nla_get_in_addr(nla);
return 0;
}
/* only called when fib_nh is integrated into fib_info */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
return -EINVAL;
}
if (nla) {
fib_cfg.fc_gw4 = nla_get_in_addr(nla);
ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
extack);
if (ret)
goto errout;
if (fib_cfg.fc_gw4)
fib_cfg.fc_gw_family = AF_INET;
} else if (nlav) {
@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
}
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla)
if (nla) {
if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
fib_cfg.fc_flow = nla_get_u32(nla);
}
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
/* RTA_ENCAP_TYPE length checked in
* lwtunnel_valid_encap_type_attr
*/
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
fib_cfg.fc_encap_type = nla_get_u16(nla);
@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA);
@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
}
if (nla) {
__be32 gw;
err = fib_gw_from_attr(&gw, nla, extack);
if (err)
return err;
if (nh->fib_nh_gw_family != AF_INET ||
nla_get_in_addr(nla) != nh->fib_nh_gw4)
gw != nh->fib_nh_gw4)
return 1;
} else if (nlav) {
struct fib_config cfg2;
int err;
err = fib_gw_from_via(&cfg2, nlav, extack);
if (err)
@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
if (nla) {
if (nla_len(nla) < sizeof(u32)) {
NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
return -EINVAL;
}
if (nla_get_u32(nla) != nh->nh_tclassid)
return 1;
}
#endif
}

View File

@ -57,6 +57,7 @@
#include <net/protocol.h>
#include <net/raw.h>
#include <net/rawv6.h>
#include <net/seg6.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
@ -820,6 +821,7 @@ out_bh_enable:
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
struct inet6_skb_parm *opt = IP6CB(skb);
const struct inet6_protocol *ipprot;
int inner_offset;
__be16 frag_off;
@ -829,6 +831,8 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
seg6_icmp_srh(skb, opt);
nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
if (ipv6_ext_hdr(nexthdr)) {
/* now skip over extension headers */
@ -853,7 +857,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
ipprot->err_handler(skb, opt, type, code, inner_offset, info);
raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
return;

View File

@ -5210,6 +5210,19 @@ out:
return should_notify;
}
static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
if (nla_len(nla) < sizeof(*gw)) {
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
return -EINVAL;
}
*gw = nla_get_in6_addr(nla);
return 0;
}
static int ip6_route_multipath_add(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
@ -5250,10 +5263,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
r_cfg.fc_gateway = nla_get_in6_addr(nla);
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err)
goto cleanup;
r_cfg.fc_flags |= RTF_GATEWAY;
}
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
/* RTA_ENCAP_TYPE length checked in
* lwtunnel_valid_encap_type_attr
*/
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
@ -5420,7 +5441,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
nla_memcpy(&r_cfg.fc_gateway, nla, 16);
err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
extack);
if (err) {
last_err = err;
goto next_rtnh;
}
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
@ -5428,6 +5455,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
if (err)
last_err = err;
next_rtnh:
rtnh = rtnh_next(rtnh, &remaining);
}

View File

@ -75,6 +75,65 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
return true;
}
struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags)
{
struct ipv6_sr_hdr *srh;
int len, srhoff = 0;
if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
return NULL;
if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
return NULL;
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
len = (srh->hdrlen + 1) << 3;
if (!pskb_may_pull(skb, srhoff + len))
return NULL;
/* note that pskb_may_pull may change pointers in header;
* for this reason it is necessary to reload them when needed.
*/
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
if (!seg6_validate_srh(srh, len, true))
return NULL;
return srh;
}
/* Determine if an ICMP invoking packet contains a segment routing
* header. If it does, extract the offset to the true destination
* address, which is in the first segment address.
*/
void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt)
{
__u16 network_header = skb->network_header;
struct ipv6_sr_hdr *srh;
/* Update network header to point to the invoking packet
* inside the ICMP packet, so we can use the seg6_get_srh()
* helper.
*/
skb_reset_network_header(skb);
srh = seg6_get_srh(skb, 0);
if (!srh)
goto out;
if (srh->type != IPV6_SRCRT_TYPE_4)
goto out;
opt->flags |= IP6SKB_SEG6;
opt->srhoff = (unsigned char *)srh - skb->data;
out:
/* Restore the network header back to the ICMP packet */
skb->network_header = network_header;
}
static struct genl_family seg6_genl_family;
static const struct nla_policy seg6_genl_policy[SEG6_ATTR_MAX + 1] = {

View File

@ -151,40 +151,11 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
return (struct seg6_local_lwt *)lwt->data;
}
static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb, int flags)
{
struct ipv6_sr_hdr *srh;
int len, srhoff = 0;
if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
return NULL;
if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
return NULL;
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
len = (srh->hdrlen + 1) << 3;
if (!pskb_may_pull(skb, srhoff + len))
return NULL;
/* note that pskb_may_pull may change pointers in header;
* for this reason it is necessary to reload them when needed.
*/
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
if (!seg6_validate_srh(srh, len, true))
return NULL;
return srh;
}
static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
{
struct ipv6_sr_hdr *srh;
srh = get_srh(skb, IP6_FH_F_SKIP_RH);
srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
if (!srh)
return NULL;
@ -201,7 +172,7 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
struct ipv6_sr_hdr *srh;
unsigned int off = 0;
srh = get_srh(skb, 0);
srh = seg6_get_srh(skb, 0);
if (srh && srh->segments_left > 0)
return false;

View File

@ -41,6 +41,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/seg6.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
@ -562,7 +563,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
bool tunnel = false;
struct sock *sk;

View File

@ -647,6 +647,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings;
};
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containing all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@ -721,8 +741,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
struct mesh_table *mesh_paths;
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
struct mesh_table mesh_paths;
struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
int mpp_paths_generation;
};

View File

@ -127,26 +127,6 @@ struct mesh_path {
u32 path_change_count;
};
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containing all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
/* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256
@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t);

View File

@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath);
}
static struct mesh_table *mesh_table_alloc(void)
static void mesh_table_init(struct mesh_table *tbl)
{
struct mesh_table *newtbl;
INIT_HLIST_HEAD(&tbl->known_gates);
INIT_HLIST_HEAD(&tbl->walk_head);
atomic_set(&tbl->entries, 0);
spin_lock_init(&tbl->gates_lock);
spin_lock_init(&tbl->walk_lock);
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates);
INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
kfree(newtbl);
return NULL;
}
return newtbl;
/* rhashtable_init() may fail only in case of wrong
* mesh_rht_params
*/
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
kfree(tbl);
}
/**
@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err;
rcu_read_lock();
tbl = mpath->sdata->u.mesh.mesh_paths;
tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath)
return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths;
tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths;
tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
table_flush_by_iface(sdata->u.mesh.mesh_paths);
table_flush_by_iface(sdata->u.mesh.mpp_paths);
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate;
bool copy = false;
tbl = sdata->u.mesh.mesh_paths;
tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl_path, *tbl_mpp;
int ret;
tbl_path = mesh_table_alloc();
if (!tbl_path)
return -ENOMEM;
tbl_mpp = mesh_table_alloc();
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;
return 0;
free_path:
mesh_table_free(tbl_path);
return ret;
mesh_table_init(&sdata->u.mesh.mesh_paths);
mesh_table_init(&sdata->u.mesh.mpp_paths);
}
static
@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
mesh_table_free(sdata->u.mesh.mesh_paths);
mesh_table_free(sdata->u.mesh.mpp_paths);
mesh_table_free(&sdata->u.mesh.mesh_paths);
mesh_table_free(&sdata->u.mesh.mpp_paths);
}

View File

@ -5279,7 +5279,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
*/
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
bool have_higher_than_11mbit = false;
int min_rate = INT_MAX, min_rate_index = -1;
const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif);

View File

@ -85,8 +85,8 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev)
mutex_unlock(&net->mctp.neigh_lock);
}
// TODO: add a "source" flag so netlink can only delete static neighbours?
static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
enum mctp_neigh_source source)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh, *tmp;
@ -94,7 +94,8 @@ static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
mutex_lock(&net->mctp.neigh_lock);
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
if (neigh->dev == mdev && neigh->eid == eid) {
if (neigh->dev == mdev && neigh->eid == eid &&
neigh->source == source) {
list_del_rcu(&neigh->list);
/* TODO: immediate RTM_DELNEIGH */
call_rcu(&neigh->rcu, __mctp_neigh_free);
@ -202,7 +203,7 @@ static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!mdev)
return -ENODEV;
return mctp_neigh_remove(mdev, eid);
return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
}
static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,

View File

@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
return -EFAULT;
switch (optname) {

View File

@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
max_classes = QFQ_MAX_AGG_CLASSES;
else
max_classes = qdisc_dev(sch)->tx_queue_len + 1;
max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
QFQ_MAX_AGG_CLASSES);
/* max_cl_shift = floor(log_2(max_classes)) */
max_cl_shift = __fls(max_classes);
q->max_agg_classes = 1<<max_cl_shift;

View File

@ -245,48 +245,44 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ 64;
}
static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
struct sctp_association *assoc = tsp->asoc;
struct sock *sk = tsp->asoc->base.sk;
struct sctp_comm_param *commp = p;
struct sk_buff *in_skb = commp->skb;
struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *req = commp->r;
const struct nlmsghdr *nlh = commp->nlh;
struct net *net = sock_net(in_skb->sk);
struct sk_buff *skb = commp->skb;
struct sk_buff *rep;
int err;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
goto out;
return err;
err = -ENOMEM;
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
if (!rep)
goto out;
return -ENOMEM;
lock_sock(sk);
if (sk != assoc->base.sk) {
release_sock(sk);
sk = assoc->base.sk;
lock_sock(sk);
}
err = inet_sctp_diag_fill(sk, assoc, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0, nlh,
commp->net_admin);
release_sock(sk);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
if (ep != assoc->ep) {
err = -EAGAIN;
goto out;
}
err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
commp->nlh, commp->net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
goto out;
}
release_sock(sk);
return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
out:
release_sock(sk);
kfree_skb(rep);
return err;
}
@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
static int sctp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
struct sk_buff *in_skb = cb->skb;
struct net *net = sock_net(in_skb->sk);
struct sk_buff *skb = cb->skb;
struct net *net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
union sctp_addr laddr, paddr;
struct sctp_comm_param commp = {
.skb = in_skb,
.skb = skb,
.r = req,
.nlh = nlh,
.net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
.net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
};
if (req->sdiag_family == AF_INET) {
@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
paddr.v6.sin6_family = AF_INET6;
}
return sctp_transport_lookup_process(sctp_tsp_dump_one,
return sctp_transport_lookup_process(sctp_sock_dump_one,
net, &laddr, &paddr, &commp);
}

View File

@ -5312,23 +5312,31 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
}
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net,
int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p)
{
struct sctp_transport *transport;
int err;
struct sctp_endpoint *ep;
int err = -ENOENT;
rcu_read_lock();
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
if (!transport) {
rcu_read_unlock();
return err;
}
ep = transport->asoc->ep;
if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
sctp_transport_put(transport);
rcu_read_unlock();
return err;
}
rcu_read_unlock();
if (!transport)
return -ENOENT;
err = cb(transport, p);
err = cb(ep, transport, p);
sctp_endpoint_put(ep);
sctp_transport_put(transport);
return err;
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);

View File

@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
msg_set_syn(hdr, 1);
}
memset(&skaddr, 0, sizeof(skaddr));
/* Determine destination */
if (atype == TIPC_SERVICE_RANGE) {
return tipc_sendmcast(sock, ua, m, dlen, timeout);

View File

@ -677,6 +677,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct xdp_sock *xs = xdp_sk(sk);
struct xsk_buff_pool *pool;
sock_poll_wait(file, sock, wait);
if (unlikely(!xsk_is_bound(xs)))
return mask;
@ -688,8 +690,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
else
/* Poll needs to drive Tx also in copy mode */
__xsk_sendmsg(sk);
} else {
sock_poll_wait(file, sock, wait);
}
if (xs->rx && !xskq_prod_is_empty(xs->rx))

View File

@ -2473,7 +2473,7 @@ static int process_switch_event(struct perf_tool *tool,
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
if (scripting_ops && scripting_ops->process_switch)
if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events)

View File

@ -32,8 +32,7 @@ try:
except:
broken_pipe_exception = IOError
glb_switch_str = None
glb_switch_printed = True
glb_switch_str = {}
glb_insn = False
glb_disassembler = None
glb_src = False
@ -70,6 +69,7 @@ def trace_begin():
ap = argparse.ArgumentParser(usage = "", add_help = False)
ap.add_argument("--insn-trace", action='store_true')
ap.add_argument("--src-trace", action='store_true')
ap.add_argument("--all-switch-events", action='store_true')
global glb_args
global glb_insn
global glb_src
@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
print(start_str, src_str)
def do_process_event(param_dict):
global glb_switch_printed
if not glb_switch_printed:
print(glb_switch_str)
glb_switch_printed = True
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
@ -274,6 +270,11 @@ def do_process_event(param_dict):
dso = get_optional(param_dict, "dso")
symbol = get_optional(param_dict, "symbol")
cpu = sample["cpu"]
if cpu in glb_switch_str:
print(glb_switch_str[cpu])
del glb_switch_str[cpu]
if name[0:12] == "instructions":
if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, True)
@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
sys.exit(1)
def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
global glb_switch_printed
global glb_switch_str
if out:
out_str = "Switch out "
else:
@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
machine_str = ""
else:
machine_str = "machine PID %d" % machine_pid
glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
glb_switch_printed = False
if glb_args.all_switch_events:
print(switch_str);
else:
global glb_switch_str
glb_switch_str[cpu] = switch_str

View File

@ -170,9 +170,11 @@ void ui__exit(bool wait_for_ok)
"Press any key...", 0);
SLtt_set_cursor_visibility(1);
SLsmg_refresh();
SLsmg_reset_smg();
if (!pthread_mutex_trylock(&ui__lock)) {
SLsmg_refresh();
SLsmg_reset_smg();
pthread_mutex_unlock(&ui__lock);
}
SLang_reset_tty();
perf_error__unregister(&perf_tui_eops);
}

View File

@ -66,7 +66,12 @@ static bool key_equal(const void *key1, const void *key2,
struct hashmap *ids__new(void)
{
return hashmap__new(key_hash, key_equal, NULL);
struct hashmap *hash;
hash = hashmap__new(key_hash, key_equal, NULL);
if (IS_ERR(hash))
return NULL;
return hash;
}
void ids__free(struct hashmap *ids)

View File

@ -3625,6 +3625,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
*args = p;
return 0;
}
p += 1;
while (1) {
vmcs = strtoull(p, &p, 0);
if (errno)

View File

@ -1659,6 +1659,21 @@ bool is_pmu_core(const char *name)
return !strcmp(name, "cpu") || is_arm_pmu_core(name);
}
static bool pmu_alias_is_duplicate(struct sevent *alias_a,
struct sevent *alias_b)
{
/* Different names -> never duplicates */
if (strcmp(alias_a->name, alias_b->name))
return false;
/* Don't remove duplicates for hybrid PMUs */
if (perf_pmu__is_hybrid(alias_a->pmu) &&
perf_pmu__is_hybrid(alias_b->pmu))
return false;
return true;
}
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
bool long_desc, bool details_flag, bool deprecated,
const char *pmu_name)
@ -1744,12 +1759,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (j = 0; j < len; j++) {
/* Skip duplicates */
if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) {
if (!aliases[j].pmu || !aliases[j - 1].pmu ||
!strcmp(aliases[j].pmu, aliases[j - 1].pmu)) {
continue;
}
}
if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
continue;
if (name_only) {
printf("%s ", aliases[j].name);

View File

@ -1078,7 +1078,7 @@
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{
"map access: trying to leak tained dst reg",
"map access: trying to leak tainted dst reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),

0
tools/testing/selftests/net/amt.sh Normal file → Executable file
View File

View File

@ -193,7 +193,8 @@ for family in 4 6; do
SUFFIX="64 nodad"
VXDEV=vxlan6
IPT=ip6tables
PING="ping6"
# Use ping6 on systems where ping doesn't handle IPv6
ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
fi
echo "IPv$family"

View File

@ -87,7 +87,7 @@ static bool test_uffdio_minor = false;
static bool map_shared;
static int shm_fd;
static int huge_fd;
static int huge_fd = -1; /* only used for hugetlb_shared test */
static char *huge_fd_off0;
static unsigned long long *count_verify;
static int uffd = -1;
@ -223,6 +223,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
static void hugetlb_release_pages(char *rel_area)
{
if (huge_fd == -1)
return;
if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
nr_pages * page_size))
@ -235,16 +238,17 @@ static void hugetlb_allocate_area(void **alloc_area)
char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
(map_shared ? MAP_SHARED : MAP_PRIVATE) |
MAP_HUGETLB,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
map_shared ? MAP_SHARED :
MAP_PRIVATE | MAP_HUGETLB |
(*alloc_area == area_src ? 0 : MAP_NORESERVE),
huge_fd,
*alloc_area == area_src ? 0 : nr_pages * page_size);
if (*alloc_area == MAP_FAILED)
err("mmap of hugetlbfs file failed");
if (map_shared) {
area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_HUGETLB,
MAP_SHARED,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
if (area_alias == MAP_FAILED)