Merge remote-tracking branch 'tip/perf/urgent' into perf/core
To pick up fixes. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
7869e58894
|
@ -10,6 +10,7 @@ Required properties:
|
|||
"brcm,bcm53128"
|
||||
"brcm,bcm5365"
|
||||
"brcm,bcm5395"
|
||||
"brcm,bcm5389"
|
||||
"brcm,bcm5397"
|
||||
"brcm,bcm5398"
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ Kernel driver i2c-ocores
|
|||
|
||||
Supported adapters:
|
||||
* OpenCores.org I2C controller by Richard Herveille (see datasheet link)
|
||||
Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview
|
||||
https://opencores.org/project/i2c/overview
|
||||
|
||||
Author: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
|
||||
|
|
|
@ -15513,6 +15513,14 @@ L: linux-kernel@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/char/xillybus/
|
||||
|
||||
XLP9XX I2C DRIVER
|
||||
M: George Cherian <george.cherian@cavium.com>
|
||||
M: Jan Glauber <jglauber@cavium.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
W: http://www.cavium.com
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-xlp9xx.c
|
||||
|
||||
XRA1403 GPIO EXPANDER
|
||||
M: Nandor Han <nandor.han@ge.com>
|
||||
M: Semi Malinen <semi.malinen@ge.com>
|
||||
|
|
|
@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
|||
if (value & ~known_bits)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Setting FRE without FR is not supported. */
|
||||
if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Avoid inadvertently triggering emulation */
|
||||
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
|
||||
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
|
||||
|
|
|
@ -818,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
break;
|
||||
}
|
||||
#endif
|
||||
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
|
||||
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
|
||||
break;
|
||||
case PC:
|
||||
tmp = regs->cp0_epc;
|
||||
|
|
|
@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
addr & 1);
|
||||
break;
|
||||
}
|
||||
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
|
||||
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
|
||||
break;
|
||||
case PC:
|
||||
tmp = regs->cp0_epc;
|
||||
|
|
|
@ -21,7 +21,7 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
|
|||
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
|
||||
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
|
||||
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
|
||||
KBUILD_CFLAGS += -c -MD -Os -m64
|
||||
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
|
|
|
@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
|
|||
}
|
||||
|
||||
|
||||
static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
|
||||
int offset, int swap)
|
||||
static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
|
||||
int swap)
|
||||
{
|
||||
unsigned char buf[ZEPROM_SIZE];
|
||||
struct zatm_dev *zatm_dev;
|
||||
|
|
|
@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
|
|||
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
|
||||
writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
|
||||
|
||||
memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
|
||||
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
|
||||
memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
|
||||
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
|
||||
|
||||
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
|
||||
EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
|
||||
|
|
|
@ -4555,8 +4555,8 @@ static int dm_update_crtcs_state(struct dc *dc,
|
|||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct drm_connector_state *new_con_state = NULL;
|
||||
struct dm_connector_state *dm_conn_state = NULL;
|
||||
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
|
||||
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
|
||||
struct drm_plane_state *new_plane_state = NULL;
|
||||
|
||||
new_stream = NULL;
|
||||
|
@ -4577,19 +4577,23 @@ static int dm_update_crtcs_state(struct dc *dc,
|
|||
/* TODO This hack should go away */
|
||||
if (aconnector && enable) {
|
||||
// Make sure fake sink is created in plug-in scenario
|
||||
new_con_state = drm_atomic_get_connector_state(state,
|
||||
drm_new_conn_state = drm_atomic_get_new_connector_state(state,
|
||||
&aconnector->base);
|
||||
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
|
||||
&aconnector->base);
|
||||
|
||||
if (IS_ERR(new_con_state)) {
|
||||
ret = PTR_ERR_OR_ZERO(new_con_state);
|
||||
|
||||
if (IS_ERR(drm_new_conn_state)) {
|
||||
ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
|
||||
break;
|
||||
}
|
||||
|
||||
dm_conn_state = to_dm_connector_state(new_con_state);
|
||||
dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
|
||||
dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
|
||||
|
||||
new_stream = create_stream_for_sink(aconnector,
|
||||
&new_crtc_state->mode,
|
||||
dm_conn_state);
|
||||
dm_new_conn_state);
|
||||
|
||||
/*
|
||||
* we can have no stream on ACTION_SET if a display
|
||||
|
@ -4695,20 +4699,30 @@ next_crtc:
|
|||
* We want to do dc stream updates that do not require a
|
||||
* full modeset below.
|
||||
*/
|
||||
if (!enable || !aconnector || modereset_required(new_crtc_state))
|
||||
if (!(enable && aconnector && new_crtc_state->enable &&
|
||||
new_crtc_state->active))
|
||||
continue;
|
||||
/*
|
||||
* Given above conditions, the dc state cannot be NULL because:
|
||||
* 1. We're attempting to enable a CRTC. Which has a...
|
||||
* 2. Valid connector attached, and
|
||||
* 3. User does not want to reset it (disable or mark inactive,
|
||||
* which can happen on a CRTC that's already disabled).
|
||||
* => It currently exists.
|
||||
* 1. We're in the process of enabling CRTCs (just been added
|
||||
* to the dc context, or already is on the context)
|
||||
* 2. Has a valid connector attached, and
|
||||
* 3. Is currently active and enabled.
|
||||
* => The dc stream state currently exists.
|
||||
*/
|
||||
BUG_ON(dm_new_crtc_state->stream == NULL);
|
||||
|
||||
/* Color managment settings */
|
||||
if (dm_new_crtc_state->base.color_mgmt_changed) {
|
||||
/* Scaling or underscan settings */
|
||||
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
|
||||
update_stream_scaling_settings(
|
||||
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
|
||||
|
||||
/*
|
||||
* Color management settings. We also update color properties
|
||||
* when a modeset is needed, to ensure it gets reprogrammed.
|
||||
*/
|
||||
if (dm_new_crtc_state->base.color_mgmt_changed ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
||||
ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
|
|
@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
|
||||
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
|
||||
{
|
||||
mutex_lock(&hdmi->mutex);
|
||||
|
||||
|
@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
|
|||
}
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
}
|
||||
|
||||
void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
|
||||
{
|
||||
struct dw_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
__dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
|
||||
|
||||
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
|
||||
|
@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
|
|||
*/
|
||||
if (intr_stat &
|
||||
(HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
|
||||
__dw_hdmi_setup_rx_sense(hdmi,
|
||||
phy_stat & HDMI_PHY_HPD,
|
||||
phy_stat & HDMI_PHY_RX_SENSE);
|
||||
dw_hdmi_setup_rx_sense(hdmi,
|
||||
phy_stat & HDMI_PHY_HPD,
|
||||
phy_stat & HDMI_PHY_RX_SENSE);
|
||||
|
||||
if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
|
||||
cec_notifier_set_phys_addr(hdmi->cec_notifier,
|
||||
|
|
|
@ -1145,6 +1145,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
|
|||
static const u16 psr_setup_time_us[] = {
|
||||
PSR_SETUP_TIME(330),
|
||||
PSR_SETUP_TIME(275),
|
||||
PSR_SETUP_TIME(220),
|
||||
PSR_SETUP_TIME(165),
|
||||
PSR_SETUP_TIME(110),
|
||||
PSR_SETUP_TIME(55),
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_query.h"
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
|
||||
for (i = 0; i < args->num_items; i++, user_item_ptr++) {
|
||||
struct drm_i915_query_item item;
|
||||
u64 func_idx;
|
||||
unsigned long func_idx;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&item, user_item_ptr, sizeof(item)))
|
||||
|
@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
if (item.query_id == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (overflows_type(item.query_id - 1, unsigned long))
|
||||
return -EINVAL;
|
||||
|
||||
func_idx = item.query_id - 1;
|
||||
|
||||
if (func_idx < ARRAY_SIZE(i915_query_funcs))
|
||||
ret = -EINVAL;
|
||||
if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
|
||||
func_idx = array_index_nospec(func_idx,
|
||||
ARRAY_SIZE(i915_query_funcs));
|
||||
ret = i915_query_funcs[func_idx](dev_priv, &item);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
/* Only write the length back to userspace if they differ. */
|
||||
if (ret != item.length && put_user(ret, &user_item_ptr->length))
|
||||
|
|
|
@ -574,6 +574,36 @@ exit:
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_lvds_connector_register(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = intel_connector_register(connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lvds->lid_notifier.notifier_call = intel_lid_notify;
|
||||
if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
|
||||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||
lvds->lid_notifier.notifier_call = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_lvds_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
|
||||
|
||||
if (lvds->lid_notifier.notifier_call)
|
||||
acpi_lid_notifier_unregister(&lvds->lid_notifier);
|
||||
|
||||
intel_connector_unregister(connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_lvds_destroy - unregister and free LVDS structures
|
||||
* @connector: connector to free
|
||||
|
@ -586,9 +616,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
|
|||
struct intel_lvds_connector *lvds_connector =
|
||||
to_lvds_connector(connector);
|
||||
|
||||
if (lvds_connector->lid_notifier.notifier_call)
|
||||
acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
|
||||
|
||||
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
|
||||
kfree(lvds_connector->base.edid);
|
||||
|
||||
|
@ -609,8 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.late_register = intel_lvds_connector_register,
|
||||
.early_unregister = intel_lvds_connector_unregister,
|
||||
.destroy = intel_lvds_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
|
||||
|
@ -827,6 +854,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
|||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_no_lvds_dmi_callback,
|
||||
.ident = "Radiant P845",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
|
||||
},
|
||||
},
|
||||
|
||||
{ } /* terminating entry */
|
||||
};
|
||||
|
@ -1150,12 +1185,6 @@ out:
|
|||
|
||||
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
|
||||
|
||||
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
|
||||
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
|
||||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||
lvds_connector->lid_notifier.notifier_call = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
failed:
|
||||
|
|
|
@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
|
|||
if (stat & HDMITX_TOP_INTR_HPD_RISE)
|
||||
hpd_connected = true;
|
||||
|
||||
dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
|
||||
dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
|
||||
hpd_connected);
|
||||
|
||||
drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
|
||||
|
|
|
@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
|
|||
struct dispc_clock_info *dispc_cinfo)
|
||||
{
|
||||
int i;
|
||||
struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
|
||||
struct sdi_clk_calc_ctx ctx;
|
||||
|
||||
/*
|
||||
* DSS fclk gives us very few possibilities, so finding a good pixel
|
||||
|
@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
|
|||
bool ok;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
|
||||
ctx.sdi = sdi;
|
||||
|
||||
if (pclk > 1000 * i * i * i)
|
||||
ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
|
||||
else
|
||||
|
|
|
@ -733,8 +733,8 @@ err_nomem:
|
|||
/* Reset the page to write-back before releasing */
|
||||
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
|
||||
#endif
|
||||
dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
|
||||
win->block[i].addr);
|
||||
dma_free_coherent(msc_dev(msc)->parent->parent, size,
|
||||
win->block[i].bdesc, win->block[i].addr);
|
||||
}
|
||||
kfree(win);
|
||||
|
||||
|
@ -769,7 +769,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
|
|||
/* Reset the page to write-back before releasing */
|
||||
set_memory_wb((unsigned long)win->block[i].bdesc, 1);
|
||||
#endif
|
||||
dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
|
||||
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
|
||||
win->block[i].bdesc, win->block[i].addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/stm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "stm.h"
|
||||
|
||||
#include <uapi/linux/stm.h>
|
||||
|
@ -674,7 +675,7 @@ static void stm_device_release(struct device *dev)
|
|||
{
|
||||
struct stm_device *stm = to_stm_device(dev);
|
||||
|
||||
kfree(stm);
|
||||
vfree(stm);
|
||||
}
|
||||
|
||||
int stm_register_device(struct device *parent, struct stm_data *stm_data,
|
||||
|
@ -691,7 +692,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
|
|||
return -EINVAL;
|
||||
|
||||
nmasters = stm_data->sw_end - stm_data->sw_start + 1;
|
||||
stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
|
||||
stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
|
||||
if (!stm)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -744,7 +745,7 @@ err_device:
|
|||
/* matches device_initialize() above */
|
||||
put_device(&stm->dev);
|
||||
err_free:
|
||||
kfree(stm);
|
||||
vfree(stm);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* i2c-ocores.c: I2C bus driver for OpenCores I2C controller
|
||||
* (http://www.opencores.org/projects.cgi/web/i2c/overview).
|
||||
* (https://opencores.org/project/i2c/overview)
|
||||
*
|
||||
* Peter Korsgaard <jacmet@sunsite.dk>
|
||||
*
|
||||
|
|
|
@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
|
|||
depends on ARCH_AT91 || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
depends on HAS_DMA
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say yes here to build support for Atmel SAMA5D2 ADC which is
|
||||
|
|
|
@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
|
|||
static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
|
||||
33, 0, 17, 16, 12, 10, 8, 6, 4};
|
||||
|
||||
static ssize_t ad7793_read_frequency(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
struct ad7793_state *st = iio_priv(indio_dev);
|
||||
|
||||
return sprintf(buf, "%d\n",
|
||||
st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
|
||||
}
|
||||
|
||||
static ssize_t ad7793_write_frequency(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
|
||||
struct ad7793_state *st = iio_priv(indio_dev);
|
||||
long lval;
|
||||
int i, ret;
|
||||
|
||||
ret = kstrtol(buf, 10, &lval);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (lval == 0)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
if (lval == st->chip_info->sample_freq_avail[i])
|
||||
break;
|
||||
if (i == 16)
|
||||
return -EINVAL;
|
||||
|
||||
ret = iio_device_claim_direct_mode(indio_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
st->mode &= ~AD7793_MODE_RATE(-1);
|
||||
st->mode |= AD7793_MODE_RATE(i);
|
||||
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
|
||||
iio_device_release_direct_mode(indio_dev);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
|
||||
ad7793_read_frequency,
|
||||
ad7793_write_frequency);
|
||||
|
||||
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
|
||||
"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
|
||||
|
||||
|
@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
|
|||
ad7793_show_scale_available, NULL, 0);
|
||||
|
||||
static struct attribute *ad7793_attributes[] = {
|
||||
&iio_dev_attr_sampling_frequency.dev_attr.attr,
|
||||
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
|
||||
&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
|
||||
NULL
|
||||
|
@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
|
|||
};
|
||||
|
||||
static struct attribute *ad7797_attributes[] = {
|
||||
&iio_dev_attr_sampling_frequency.dev_attr.attr,
|
||||
&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
|
|||
*val -= offset;
|
||||
}
|
||||
return IIO_VAL_INT;
|
||||
case IIO_CHAN_INFO_SAMP_FREQ:
|
||||
*val = st->chip_info
|
||||
->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
|
|||
break;
|
||||
}
|
||||
break;
|
||||
case IIO_CHAN_INFO_SAMP_FREQ:
|
||||
if (!val) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
if (val == st->chip_info->sample_freq_avail[i])
|
||||
break;
|
||||
|
||||
if (i == 16) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
st->mode &= ~AD7793_MODE_RATE(-1);
|
||||
st->mode |= AD7793_MODE_RATE(i);
|
||||
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
|
||||
st->mode);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
|
|
@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = {
|
|||
+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
|
||||
};
|
||||
|
||||
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < indio_dev->num_channels; i++) {
|
||||
if (indio_dev->channels[i].scan_index == chan)
|
||||
return i;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct iio_chan_spec const *
|
||||
at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
|
||||
{
|
||||
int index = at91_adc_chan_xlate(indio_dev, chan);
|
||||
|
||||
if (index < 0)
|
||||
return NULL;
|
||||
return indio_dev->channels + index;
|
||||
}
|
||||
|
||||
static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
||||
{
|
||||
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
|
||||
|
@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
|||
at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
|
||||
|
||||
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
|
||||
struct iio_chan_spec const *chan = indio->channels + bit;
|
||||
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
if (state) {
|
||||
at91_adc_writel(st, AT91_SAMA5D2_CHER,
|
||||
BIT(chan->channel));
|
||||
|
@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
|
|||
|
||||
for_each_set_bit(bit, indio_dev->active_scan_mask,
|
||||
indio_dev->num_channels) {
|
||||
struct iio_chan_spec const *chan = indio_dev->channels + bit;
|
||||
struct iio_chan_spec const *chan =
|
||||
at91_adc_chan_get(indio_dev, bit);
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
|
||||
st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
|
||||
}
|
||||
|
@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
|
|||
*/
|
||||
for_each_set_bit(bit, indio_dev->active_scan_mask,
|
||||
indio_dev->num_channels) {
|
||||
struct iio_chan_spec const *chan = indio_dev->channels + bit;
|
||||
struct iio_chan_spec const *chan =
|
||||
at91_adc_chan_get(indio_dev, bit);
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
if (st->dma_st.dma_chan)
|
||||
at91_adc_readl(st, chan->address);
|
||||
}
|
||||
|
@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
|
|||
|
||||
for_each_set_bit(bit, indio_dev->active_scan_mask,
|
||||
indio_dev->num_channels) {
|
||||
struct iio_chan_spec const *chan = indio_dev->channels + bit;
|
||||
struct iio_chan_spec const *chan =
|
||||
at91_adc_chan_get(indio_dev, bit);
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
st->buffer[i] = at91_adc_readl(st, chan->address);
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
|
|||
* Leave as soon as if exact resolution if reached.
|
||||
* Otherwise the higher resolution below 32 bits is kept.
|
||||
*/
|
||||
fl->res = 0;
|
||||
for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
|
||||
for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
|
||||
if (fast)
|
||||
|
@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
|
|||
}
|
||||
}
|
||||
|
||||
if (!fl->fosr)
|
||||
if (!fl->res)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
|
|||
struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
|
||||
struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
|
||||
struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
|
||||
unsigned int spi_freq = adc->spi_freq;
|
||||
unsigned int spi_freq;
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (mask) {
|
||||
|
@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
|
|||
case IIO_CHAN_INFO_SAMP_FREQ:
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
|
||||
|
||||
switch (ch->src) {
|
||||
case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
|
||||
spi_freq = adc->dfsdm->spi_master_freq;
|
||||
break;
|
||||
case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
|
||||
case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
|
||||
spi_freq = adc->dfsdm->spi_master_freq / 2;
|
||||
break;
|
||||
default:
|
||||
spi_freq = adc->spi_freq;
|
||||
}
|
||||
|
||||
if (spi_freq % val)
|
||||
dev_warn(&indio_dev->dev,
|
||||
|
|
|
@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
|
|||
* Should be used as the set_length callback for iio_buffer_access_ops
|
||||
* struct for DMA buffers.
|
||||
*/
|
||||
int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
|
||||
int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
|
||||
{
|
||||
/* Avoid an invalid state */
|
||||
if (length < 2)
|
||||
|
|
|
@ -22,11 +22,18 @@ struct iio_kfifo {
|
|||
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
|
||||
|
||||
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
|
||||
int bytes_per_datum, int length)
|
||||
size_t bytes_per_datum, unsigned int length)
|
||||
{
|
||||
if ((length == 0) || (bytes_per_datum == 0))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Make sure we don't overflow an unsigned int after kfifo rounds up to
|
||||
* the next power of 2.
|
||||
*/
|
||||
if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
|
||||
return -EINVAL;
|
||||
|
||||
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
|
||||
bytes_per_datum, GFP_KERNEL);
|
||||
}
|
||||
|
@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iio_set_length_kfifo(struct iio_buffer *r, int length)
|
||||
static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
|
||||
{
|
||||
/* Avoid an invalid state */
|
||||
if (length < 2)
|
||||
|
|
|
@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
|
|||
#ifdef CONFIG_PM
|
||||
int ret;
|
||||
|
||||
atomic_set(&st->user_requested_state, state);
|
||||
|
||||
if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
|
||||
pm_runtime_enable(&st->pdev->dev);
|
||||
|
||||
if (state)
|
||||
if (state) {
|
||||
atomic_inc(&st->user_requested_state);
|
||||
ret = pm_runtime_get_sync(&st->pdev->dev);
|
||||
else {
|
||||
} else {
|
||||
atomic_dec(&st->user_requested_state);
|
||||
pm_runtime_mark_last_busy(&st->pdev->dev);
|
||||
pm_runtime_use_autosuspend(&st->pdev->dev);
|
||||
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
|
||||
|
|
|
@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
|
|||
return -EINVAL;
|
||||
|
||||
if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
|
||||
return -EAGAIN;
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
|
||||
if (attr) {
|
||||
|
|
|
@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
|
|||
bnxt_re_ib_unreg(rdev, false);
|
||||
}
|
||||
|
||||
static void bnxt_re_stop_irq(void *handle)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
|
||||
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx;
|
||||
|
||||
for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
|
||||
nq = &rdev->nq[indx - 1];
|
||||
bnxt_qplib_nq_stop_irq(nq, false);
|
||||
}
|
||||
|
||||
bnxt_qplib_rcfw_stop_irq(rcfw, false);
|
||||
}
|
||||
|
||||
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
|
||||
struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
|
||||
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
int indx, rc;
|
||||
|
||||
if (!ent) {
|
||||
/* Not setting the f/w timeout bit in rcfw.
|
||||
* During the driver unload the first command
|
||||
* to f/w will timeout and that will set the
|
||||
* timeout bit.
|
||||
*/
|
||||
dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Vectors may change after restart, so update with new vectors
|
||||
* in device sctructure.
|
||||
*/
|
||||
for (indx = 0; indx < rdev->num_msix; indx++)
|
||||
rdev->msix_entries[indx].vector = ent[indx].vector;
|
||||
|
||||
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
|
||||
false);
|
||||
for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
|
||||
nq = &rdev->nq[indx - 1];
|
||||
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
|
||||
msix_ent[indx].vector, false);
|
||||
if (rc)
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to reinit NQ index %d\n", indx - 1);
|
||||
}
|
||||
}
|
||||
|
||||
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
|
||||
.ulp_async_notifier = NULL,
|
||||
.ulp_stop = bnxt_re_stop,
|
||||
.ulp_start = bnxt_re_start,
|
||||
.ulp_sriov_config = bnxt_re_sriov_config,
|
||||
.ulp_shutdown = bnxt_re_shutdown
|
||||
.ulp_shutdown = bnxt_re_shutdown,
|
||||
.ulp_irq_stop = bnxt_re_stop_irq,
|
||||
.ulp_irq_restart = bnxt_re_start_irq
|
||||
};
|
||||
|
||||
/* RoCE -> Net driver */
|
||||
|
|
|
@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
|
||||
{
|
||||
tasklet_disable(&nq->worker);
|
||||
/* Mask h/w interrupt */
|
||||
NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
/* Sync with last running IRQ handler */
|
||||
synchronize_irq(nq->vector);
|
||||
if (kill)
|
||||
tasklet_kill(&nq->worker);
|
||||
if (nq->requested) {
|
||||
irq_set_affinity_hint(nq->vector, NULL);
|
||||
free_irq(nq->vector, nq);
|
||||
nq->requested = false;
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
||||
{
|
||||
if (nq->cqn_wq) {
|
||||
destroy_workqueue(nq->cqn_wq);
|
||||
nq->cqn_wq = NULL;
|
||||
}
|
||||
/* Make sure the HW is stopped! */
|
||||
synchronize_irq(nq->vector);
|
||||
tasklet_disable(&nq->worker);
|
||||
tasklet_kill(&nq->worker);
|
||||
|
||||
if (nq->requested) {
|
||||
irq_set_affinity_hint(nq->vector, NULL);
|
||||
free_irq(nq->vector, nq);
|
||||
nq->requested = false;
|
||||
}
|
||||
/* Make sure the HW is stopped! */
|
||||
bnxt_qplib_nq_stop_irq(nq, true);
|
||||
|
||||
if (nq->bar_reg_iomem)
|
||||
iounmap(nq->bar_reg_iomem);
|
||||
nq->bar_reg_iomem = NULL;
|
||||
|
@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
|||
nq->vector = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
int msix_vector, bool need_init)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (nq->requested)
|
||||
return -EFAULT;
|
||||
|
||||
nq->vector = msix_vector;
|
||||
if (need_init)
|
||||
tasklet_init(&nq->worker, bnxt_qplib_service_nq,
|
||||
(unsigned long)nq);
|
||||
else
|
||||
tasklet_enable(&nq->worker);
|
||||
|
||||
snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
|
||||
rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cpumask_clear(&nq->mask);
|
||||
cpumask_set_cpu(nq_indx, &nq->mask);
|
||||
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
|
||||
if (rc) {
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
|
||||
nq->vector, nq_indx);
|
||||
}
|
||||
nq->requested = true;
|
||||
NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
int nq_idx, int msix_vector, int bar_reg_offset,
|
||||
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
|
@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
|||
resource_size_t nq_base;
|
||||
int rc = -1;
|
||||
|
||||
nq->pdev = pdev;
|
||||
nq->vector = msix_vector;
|
||||
if (cqn_handler)
|
||||
nq->cqn_handler = cqn_handler;
|
||||
|
||||
if (srqn_handler)
|
||||
nq->srqn_handler = srqn_handler;
|
||||
|
||||
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
|
||||
|
||||
/* Have a task to schedule CQ notifiers in post send case */
|
||||
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
|
||||
if (!nq->cqn_wq)
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
|
||||
nq->requested = false;
|
||||
memset(nq->name, 0, 32);
|
||||
sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
|
||||
rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
|
||||
if (rc) {
|
||||
dev_err(&nq->pdev->dev,
|
||||
"Failed to request IRQ for NQ: %#x", rc);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cpumask_clear(&nq->mask);
|
||||
cpumask_set_cpu(nq_idx, &nq->mask);
|
||||
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
|
||||
if (rc) {
|
||||
dev_warn(&nq->pdev->dev,
|
||||
"QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
|
||||
nq->vector, nq_idx);
|
||||
}
|
||||
|
||||
nq->requested = true;
|
||||
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
|
||||
nq->bar_reg_off = bar_reg_offset;
|
||||
nq_base = pci_resource_start(pdev, nq->bar_reg);
|
||||
|
@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
|||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
|
||||
|
||||
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
|
||||
if (rc) {
|
||||
dev_err(&nq->pdev->dev,
|
||||
"QPLIB: Failed to request irq for nq-idx %d", nq_idx);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
|
|
|
@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
|
|||
struct bnxt_qplib_cq *cq;
|
||||
};
|
||||
|
||||
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
|
||||
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
int msix_vector, bool need_init);
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
int nq_idx, int msix_vector, int bar_reg_offset,
|
||||
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
|
||||
|
|
|
@ -582,19 +582,29 @@ fail:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
|
||||
{
|
||||
unsigned long indx;
|
||||
|
||||
/* Make sure the HW channel is stopped! */
|
||||
synchronize_irq(rcfw->vector);
|
||||
tasklet_disable(&rcfw->worker);
|
||||
tasklet_kill(&rcfw->worker);
|
||||
/* Mask h/w interrupts */
|
||||
CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
|
||||
rcfw->creq.max_elements);
|
||||
/* Sync with last running IRQ-handler */
|
||||
synchronize_irq(rcfw->vector);
|
||||
if (kill)
|
||||
tasklet_kill(&rcfw->worker);
|
||||
|
||||
if (rcfw->requested) {
|
||||
free_irq(rcfw->vector, rcfw);
|
||||
rcfw->requested = false;
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
{
|
||||
unsigned long indx;
|
||||
|
||||
bnxt_qplib_rcfw_stop_irq(rcfw, true);
|
||||
|
||||
if (rcfw->cmdq_bar_reg_iomem)
|
||||
iounmap(rcfw->cmdq_bar_reg_iomem);
|
||||
rcfw->cmdq_bar_reg_iomem = NULL;
|
||||
|
@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
|||
rcfw->vector = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
|
||||
bool need_init)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (rcfw->requested)
|
||||
return -EFAULT;
|
||||
|
||||
rcfw->vector = msix_vector;
|
||||
if (need_init)
|
||||
tasklet_init(&rcfw->worker,
|
||||
bnxt_qplib_service_creq, (unsigned long)rcfw);
|
||||
else
|
||||
tasklet_enable(&rcfw->worker);
|
||||
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
|
||||
"bnxt_qplib_creq", rcfw);
|
||||
if (rc)
|
||||
return rc;
|
||||
rcfw->requested = true;
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
|
||||
rcfw->creq.max_elements);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
|
@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
|||
rcfw->creq_qp_event_processed = 0;
|
||||
rcfw->creq_func_event_processed = 0;
|
||||
|
||||
rcfw->vector = msix_vector;
|
||||
if (aeq_handler)
|
||||
rcfw->aeq_handler = aeq_handler;
|
||||
init_waitqueue_head(&rcfw->waitq);
|
||||
|
||||
tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
|
||||
(unsigned long)rcfw);
|
||||
|
||||
rcfw->requested = false;
|
||||
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
|
||||
"bnxt_qplib_creq", rcfw);
|
||||
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
|
||||
if (rc) {
|
||||
dev_err(&rcfw->pdev->dev,
|
||||
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
|
||||
bnxt_qplib_disable_rcfw_channel(rcfw);
|
||||
return rc;
|
||||
}
|
||||
rcfw->requested = true;
|
||||
|
||||
init_waitqueue_head(&rcfw->waitq);
|
||||
|
||||
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
|
||||
|
||||
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
|
||||
init.cmdq_size_cmdq_lvl = cpu_to_le16(
|
||||
|
|
|
@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
|
|||
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
|
||||
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
|
||||
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
|
||||
bool need_init);
|
||||
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config INFINIBAND_SRPT
|
||||
tristate "InfiniBand SCSI RDMA Protocol target support"
|
||||
depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
---help---
|
||||
|
||||
Support for the SCSI RDMA Protocol (SRP) Target driver. The
|
||||
|
|
|
@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
|
|||
bool max_baseline, u8 *value)
|
||||
{
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client,
|
||||
max_baseline ?
|
||||
|
@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
|
|||
bool iap, u8 *version)
|
||||
{
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client,
|
||||
iap ? ETP_SMBUS_IAP_VERSION_CMD :
|
||||
|
@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
|
|||
u8 *clickpad)
|
||||
{
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client,
|
||||
ETP_SMBUS_SM_VERSION_CMD, val);
|
||||
|
@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
|
|||
static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
|
||||
{
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client,
|
||||
ETP_SMBUS_UNIQUEID_CMD, val);
|
||||
|
@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
|
|||
bool iap, u16 *csum)
|
||||
{
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client,
|
||||
iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
|
||||
|
@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
|
|||
{
|
||||
int ret;
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
|
||||
if (ret != 3) {
|
||||
|
@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
|
|||
{
|
||||
int ret;
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
|
||||
if (ret != 3) {
|
||||
|
@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
|
|||
{
|
||||
int ret;
|
||||
int error;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
|
||||
if (ret != 3) {
|
||||
|
@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
|
|||
{
|
||||
int error;
|
||||
u16 constant;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
|
||||
if (error < 0) {
|
||||
|
@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
|
|||
int len;
|
||||
int error;
|
||||
enum tp_mode mode;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
|
||||
u16 password;
|
||||
|
||||
|
@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
|
|||
struct device *dev = &client->dev;
|
||||
int error;
|
||||
u16 result;
|
||||
u8 val[3];
|
||||
u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
|
||||
|
||||
/*
|
||||
* Due to the limitation of smbus protocol limiting
|
||||
|
|
|
@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
|
|||
"LEN0048", /* X1 Carbon 3 */
|
||||
"LEN0046", /* X250 */
|
||||
"LEN004a", /* W541 */
|
||||
"LEN0071", /* T480 */
|
||||
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
||||
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
||||
"LEN0092", /* X1 Carbon 6 */
|
||||
"LEN0096", /* X280 */
|
||||
"LEN0097", /* X280 -> ALPS trackpoint */
|
||||
"LEN200f", /* T450s */
|
||||
NULL
|
||||
};
|
||||
|
|
|
@ -1711,6 +1711,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.cpu_port = B53_CPU_PORT_25,
|
||||
.duplex_reg = B53_DUPLEX_STAT_FE,
|
||||
},
|
||||
{
|
||||
.chip_id = BCM5389_DEVICE_ID,
|
||||
.dev_name = "BCM5389",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0x1f,
|
||||
.arl_entries = 4,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
|
||||
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
|
||||
},
|
||||
{
|
||||
.chip_id = BCM5395_DEVICE_ID,
|
||||
.dev_name = "BCM5395",
|
||||
|
@ -2034,6 +2046,7 @@ int b53_switch_detect(struct b53_device *dev)
|
|||
else
|
||||
dev->chip_id = BCM5365_DEVICE_ID;
|
||||
break;
|
||||
case BCM5389_DEVICE_ID:
|
||||
case BCM5395_DEVICE_ID:
|
||||
case BCM5397_DEVICE_ID:
|
||||
case BCM5398_DEVICE_ID:
|
||||
|
|
|
@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
|
|||
#define B53_BRCM_OUI_1 0x0143bc00
|
||||
#define B53_BRCM_OUI_2 0x03625c00
|
||||
#define B53_BRCM_OUI_3 0x00406000
|
||||
#define B53_BRCM_OUI_4 0x01410c00
|
||||
|
||||
static int b53_mdio_probe(struct mdio_device *mdiodev)
|
||||
{
|
||||
|
@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
|
|||
*/
|
||||
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
|
||||
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
|
||||
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
|
||||
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
|
||||
(phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
|
||||
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
|
|||
{ .compatible = "brcm,bcm53125" },
|
||||
{ .compatible = "brcm,bcm53128" },
|
||||
{ .compatible = "brcm,bcm5365" },
|
||||
{ .compatible = "brcm,bcm5389" },
|
||||
{ .compatible = "brcm,bcm5395" },
|
||||
{ .compatible = "brcm,bcm5397" },
|
||||
{ .compatible = "brcm,bcm5398" },
|
||||
|
|
|
@ -48,6 +48,7 @@ struct b53_io_ops {
|
|||
enum {
|
||||
BCM5325_DEVICE_ID = 0x25,
|
||||
BCM5365_DEVICE_ID = 0x65,
|
||||
BCM5389_DEVICE_ID = 0x89,
|
||||
BCM5395_DEVICE_ID = 0x95,
|
||||
BCM5397_DEVICE_ID = 0x97,
|
||||
BCM5398_DEVICE_ID = 0x98,
|
||||
|
|
|
@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
|
|||
if ((val & POST_STAGE_FAT_LOG_START)
|
||||
!= POST_STAGE_FAT_LOG_START &&
|
||||
(val & POST_STAGE_ARMFW_UE)
|
||||
!= POST_STAGE_ARMFW_UE)
|
||||
!= POST_STAGE_ARMFW_UE &&
|
||||
(val & POST_STAGE_RECOVERABLE_ERR)
|
||||
!= POST_STAGE_RECOVERABLE_ERR)
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
|
|||
{
|
||||
const struct tc_action *a;
|
||||
LIST_HEAD(actions);
|
||||
int err;
|
||||
|
||||
if (!tcf_exts_has_actions(exts))
|
||||
return -EINVAL;
|
||||
|
@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
|
|||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
err = handle_redirect_action(adapter, dev->ifindex, queue,
|
||||
action);
|
||||
if (err == 0)
|
||||
return err;
|
||||
return handle_redirect_action(adapter, dev->ifindex,
|
||||
queue, action);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
|
|
@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
|
|||
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (is_vlan_dev(upper_dev) &&
|
||||
vlan_dev_vlan_id(upper_dev) == 1) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case NETDEV_CHANGEUPPER:
|
||||
upper_dev = info->upper_dev;
|
||||
|
|
|
@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev)
|
|||
for (i = 0; i < SONIC_NUM_RRS; i++) {
|
||||
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
|
||||
SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
if (!laddr) {
|
||||
if (dma_mapping_error(lp->device, laddr)) {
|
||||
while(i > 0) { /* free any that were mapped successfully */
|
||||
i--;
|
||||
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
|
|
|
@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto unreg_napi;
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
|
||||
dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
|
||||
dev_warn(&pdev->dev, "Failed to set DMA mask\n");
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
|
|
|
@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(priv->txchan)) {
|
||||
dev_err(&pdev->dev, "error initializing tx dma channel\n");
|
||||
rc = PTR_ERR(priv->txchan);
|
||||
goto no_cpdma_chan;
|
||||
goto err_free_dma;
|
||||
}
|
||||
|
||||
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
|
||||
|
@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(priv->rxchan)) {
|
||||
dev_err(&pdev->dev, "error initializing rx dma channel\n");
|
||||
rc = PTR_ERR(priv->rxchan);
|
||||
goto no_cpdma_chan;
|
||||
goto err_free_txchan;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "error getting irq res\n");
|
||||
rc = -ENOENT;
|
||||
goto no_cpdma_chan;
|
||||
goto err_free_rxchan;
|
||||
}
|
||||
ndev->irq = res->start;
|
||||
|
||||
|
@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
|
|||
pm_runtime_put_noidle(&pdev->dev);
|
||||
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
|
||||
__func__, rc);
|
||||
goto no_cpdma_chan;
|
||||
goto err_napi_del;
|
||||
}
|
||||
|
||||
/* register the network device */
|
||||
|
@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "error in register_netdev\n");
|
||||
rc = -ENODEV;
|
||||
pm_runtime_put(&pdev->dev);
|
||||
goto no_cpdma_chan;
|
||||
goto err_napi_del;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
|
||||
no_cpdma_chan:
|
||||
if (priv->txchan)
|
||||
cpdma_chan_destroy(priv->txchan);
|
||||
if (priv->rxchan)
|
||||
cpdma_chan_destroy(priv->rxchan);
|
||||
err_napi_del:
|
||||
netif_napi_del(&priv->napi);
|
||||
err_free_rxchan:
|
||||
cpdma_chan_destroy(priv->rxchan);
|
||||
err_free_txchan:
|
||||
cpdma_chan_destroy(priv->txchan);
|
||||
err_free_dma:
|
||||
cpdma_ctlr_destroy(priv->dma);
|
||||
no_pdata:
|
||||
if (of_phy_is_fixed_link(np))
|
||||
|
|
|
@ -1650,7 +1650,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
else
|
||||
*skb_xdp = 0;
|
||||
|
||||
preempt_disable();
|
||||
local_bh_disable();
|
||||
rcu_read_lock();
|
||||
xdp_prog = rcu_dereference(tun->xdp_prog);
|
||||
if (xdp_prog && !*skb_xdp) {
|
||||
|
@ -1675,7 +1675,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
if (err)
|
||||
goto err_redirect;
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
return NULL;
|
||||
case XDP_TX:
|
||||
get_page(alloc_frag->page);
|
||||
|
@ -1684,7 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
goto err_redirect;
|
||||
tun_xdp_flush(tun->dev);
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
return NULL;
|
||||
case XDP_PASS:
|
||||
delta = orig_data - xdp.data;
|
||||
|
@ -1703,7 +1703,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
skb = build_skb(buf, buflen);
|
||||
if (!skb) {
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -1713,7 +1713,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
alloc_frag->offset += buflen;
|
||||
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
|
||||
return skb;
|
||||
|
||||
|
@ -1721,7 +1721,7 @@ err_redirect:
|
|||
put_page(alloc_frag->page);
|
||||
err_xdp:
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1917,16 +1917,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
struct bpf_prog *xdp_prog;
|
||||
int ret;
|
||||
|
||||
local_bh_disable();
|
||||
rcu_read_lock();
|
||||
xdp_prog = rcu_dereference(tun->xdp_prog);
|
||||
if (xdp_prog) {
|
||||
ret = do_xdp_generic(xdp_prog, skb);
|
||||
if (ret != XDP_PASS) {
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
return total_len;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
|
|||
*/
|
||||
static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
|
||||
.description = "CDC MBIM",
|
||||
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
|
||||
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
|
||||
.bind = cdc_mbim_bind,
|
||||
.unbind = cdc_mbim_unbind,
|
||||
.manage_power = cdc_mbim_manage_power,
|
||||
|
|
|
@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
|
||||
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
|
||||
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
|
||||
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
|
||||
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
||||
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
|
||||
|
|
|
@ -1590,14 +1590,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
|
|||
struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int max_irqs, num_irqs, i, ret, nr_online_cpus;
|
||||
int max_irqs, num_irqs, i, ret;
|
||||
u16 pci_cmd;
|
||||
|
||||
if (!trans->cfg->mq_rx_supported)
|
||||
goto enable_msi;
|
||||
|
||||
nr_online_cpus = num_online_cpus();
|
||||
max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
|
||||
max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
|
||||
for (i = 0; i < max_irqs; i++)
|
||||
trans_pcie->msix_entries[i].entry = i;
|
||||
|
||||
|
@ -1623,16 +1622,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
|
|||
* Two interrupts less: non rx causes shared with FBQ and RSS.
|
||||
* More than two interrupts: we will use fewer RSS queues.
|
||||
*/
|
||||
if (num_irqs <= nr_online_cpus) {
|
||||
if (num_irqs <= max_irqs - 2) {
|
||||
trans_pcie->trans->num_rx_queues = num_irqs + 1;
|
||||
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
|
||||
IWL_SHARED_IRQ_FIRST_RSS;
|
||||
} else if (num_irqs == nr_online_cpus + 1) {
|
||||
} else if (num_irqs == max_irqs - 1) {
|
||||
trans_pcie->trans->num_rx_queues = num_irqs;
|
||||
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
|
||||
} else {
|
||||
trans_pcie->trans->num_rx_queues = num_irqs - 1;
|
||||
}
|
||||
WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
|
||||
|
||||
trans_pcie->alloc_vecs = num_irqs;
|
||||
trans_pcie->msix_enabled = true;
|
||||
|
|
|
@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
|
|||
|
||||
/*
|
||||
* Determine IFS values
|
||||
* - Use TXOP_BACKOFF for probe and management frames except beacons
|
||||
* - Use TXOP_BACKOFF for management frames except beacons
|
||||
* - Use TXOP_SIFS for fragment bursts
|
||||
* - Use TXOP_HTTXOP for everything else
|
||||
*
|
||||
* Note: rt2800 devices won't use CTS protection (if used)
|
||||
* for frames not transmitted with TXOP_HTTXOP
|
||||
*/
|
||||
if ((ieee80211_is_mgmt(hdr->frame_control) &&
|
||||
!ieee80211_is_beacon(hdr->frame_control)) ||
|
||||
(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
|
||||
if (ieee80211_is_mgmt(hdr->frame_control) &&
|
||||
!ieee80211_is_beacon(hdr->frame_control))
|
||||
txdesc->u.ht.txop = TXOP_BACKOFF;
|
||||
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
|
||||
txdesc->u.ht.txop = TXOP_SIFS;
|
||||
|
|
|
@ -27,7 +27,7 @@ config NVME_FABRICS
|
|||
|
||||
config NVME_RDMA
|
||||
tristate "NVM Express over Fabrics RDMA host driver"
|
||||
depends on INFINIBAND_ADDR_TRANS && BLOCK
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
||||
select NVME_CORE
|
||||
select NVME_FABRICS
|
||||
select SG_POOL
|
||||
|
|
|
@ -1447,8 +1447,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
|
|||
if (ns->lba_shift == 0)
|
||||
ns->lba_shift = 9;
|
||||
ns->noiob = le16_to_cpu(id->noiob);
|
||||
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
|
||||
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
|
||||
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
|
||||
/* the PI implementation requires metadata equal t10 pi tuple size */
|
||||
if (ns->ms == sizeof(struct t10_pi_tuple))
|
||||
ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
|
||||
|
|
|
@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
|
|||
|
||||
config NVME_TARGET_RDMA
|
||||
tristate "NVMe over Fabrics RDMA target support"
|
||||
depends on INFINIBAND_ADDR_TRANS
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
depends on NVME_TARGET
|
||||
select SGL_ALLOC
|
||||
help
|
||||
|
|
|
@ -163,6 +163,16 @@ MODULE_LICENSE("GPL");
|
|||
|
||||
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
|
||||
|
||||
static bool ashs_present(void)
|
||||
{
|
||||
int i = 0;
|
||||
while (ashs_ids[i]) {
|
||||
if (acpi_dev_found(ashs_ids[i++]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
struct bios_args {
|
||||
u32 arg0;
|
||||
u32 arg1;
|
||||
|
@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
|
|||
|
||||
static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
|
||||
{
|
||||
if (asus->driver->wlan_ctrl_by_user && ashs_present())
|
||||
return;
|
||||
|
||||
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
|
||||
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
|
||||
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
|
||||
|
@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool ashs_present(void)
|
||||
{
|
||||
int i = 0;
|
||||
while (ashs_ids[i]) {
|
||||
if (acpi_dev_found(ashs_ids[i++]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* WMI Driver
|
||||
*/
|
||||
|
|
|
@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
|
|||
cqr->callback_data = req;
|
||||
cqr->status = DASD_CQR_FILLED;
|
||||
cqr->dq = dq;
|
||||
req->completion_data = cqr;
|
||||
*((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
|
||||
|
||||
blk_mq_start_request(req);
|
||||
spin_lock(&block->queue_lock);
|
||||
list_add_tail(&cqr->blocklist, &block->ccw_queue);
|
||||
|
@ -3058,12 +3059,13 @@ out:
|
|||
*/
|
||||
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
|
||||
{
|
||||
struct dasd_ccw_req *cqr = req->completion_data;
|
||||
struct dasd_block *block = req->q->queuedata;
|
||||
struct dasd_device *device;
|
||||
struct dasd_ccw_req *cqr;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
|
||||
if (!cqr)
|
||||
return BLK_EH_NOT_HANDLED;
|
||||
|
||||
|
@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
|
|||
int rc;
|
||||
|
||||
block->tag_set.ops = &dasd_mq_ops;
|
||||
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
|
||||
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
|
||||
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
|
||||
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
|
|
|
@ -51,6 +51,8 @@ struct srp_internal {
|
|||
struct transport_container rport_attr_cont;
|
||||
};
|
||||
|
||||
static int scsi_is_srp_rport(const struct device *dev);
|
||||
|
||||
#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
|
||||
|
||||
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
|
||||
|
@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
|
|||
return dev_to_shost(r->dev.parent);
|
||||
}
|
||||
|
||||
static int find_child_rport(struct device *dev, void *data)
|
||||
{
|
||||
struct device **child = data;
|
||||
|
||||
if (scsi_is_srp_rport(dev)) {
|
||||
WARN_ON_ONCE(*child);
|
||||
*child = dev;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
|
||||
{
|
||||
return transport_class_to_srp_rport(&shost->shost_gendev);
|
||||
struct device *child = NULL;
|
||||
|
||||
WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
|
||||
find_child_rport) < 0);
|
||||
return child ? dev_to_rport(child) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
|
|||
struct srp_rport *rport = shost_to_rport(shost);
|
||||
|
||||
pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
|
||||
return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
|
||||
return rport && rport->fast_io_fail_tmo < 0 &&
|
||||
rport->dev_loss_tmo < 0 &&
|
||||
i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
|
||||
BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ struct xway_gphy_priv {
|
|||
struct clk *gphy_clk_gate;
|
||||
struct reset_control *gphy_reset;
|
||||
struct reset_control *gphy_reset2;
|
||||
struct notifier_block gphy_reboot_nb;
|
||||
void __iomem *membase;
|
||||
char *fw_name;
|
||||
};
|
||||
|
@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, xway_gphy_match);
|
||||
|
||||
static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
|
||||
{
|
||||
return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
|
||||
}
|
||||
|
||||
static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
|
||||
unsigned long code, void *unused)
|
||||
{
|
||||
struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
|
||||
|
||||
if (priv) {
|
||||
reset_control_assert(priv->gphy_reset);
|
||||
reset_control_assert(priv->gphy_reset2);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
|
||||
dma_addr_t *dev_addr)
|
||||
{
|
||||
|
@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
|
|||
reset_control_deassert(priv->gphy_reset);
|
||||
reset_control_deassert(priv->gphy_reset2);
|
||||
|
||||
/* assert the gphy reset because it can hang after a reboot: */
|
||||
priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
|
||||
priv->gphy_reboot_nb.priority = -1;
|
||||
|
||||
ret = register_reboot_notifier(&priv->gphy_reboot_nb);
|
||||
if (ret)
|
||||
dev_warn(dev, "Failed to register reboot notifier\n");
|
||||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
return ret;
|
||||
|
@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
|
|||
|
||||
static int xway_gphy_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
reset_control_assert(priv->gphy_reset);
|
||||
reset_control_assert(priv->gphy_reset2);
|
||||
|
||||
iowrite32be(0, priv->membase);
|
||||
|
||||
clk_disable_unprepare(priv->gphy_clk_gate);
|
||||
|
||||
ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
|
||||
if (ret)
|
||||
dev_warn(dev, "Failed to unregister reboot notifier\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ config LNET_SELFTEST
|
|||
|
||||
config LNET_XPRT_IB
|
||||
tristate "LNET infiniband support"
|
||||
depends on LNET && PCI && INFINIBAND_ADDR_TRANS
|
||||
depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
default LNET && INFINIBAND
|
||||
help
|
||||
This option allows the LNET users to use infiniband as an
|
||||
|
|
|
@ -1255,7 +1255,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
|
|||
/* Map empty entries to null UUID */
|
||||
uuid[0] = 0;
|
||||
uuid[1] = 0;
|
||||
} else {
|
||||
} else if (uuid[0] != 0 || uuid[1] != 0) {
|
||||
/* Upper two DWs are always one's */
|
||||
uuid[2] = 0xffffffff;
|
||||
uuid[3] = 0xffffffff;
|
||||
|
|
|
@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
|
|||
{
|
||||
unsigned long pfn = 0;
|
||||
long ret, pinned = 0, lock_acct = 0;
|
||||
bool rsvd;
|
||||
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
|
||||
|
||||
/* This code path is only user initiated */
|
||||
|
@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (is_invalid_reserved_pfn(*pfn_base)) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
|
||||
pinned = min_t(long, npage, vma_pages(vma));
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return pinned;
|
||||
}
|
||||
|
||||
pinned++;
|
||||
rsvd = is_invalid_reserved_pfn(*pfn_base);
|
||||
|
||||
/*
|
||||
* Reserved pages aren't counted against the user, externally pinned
|
||||
* pages are already counted against the user.
|
||||
*/
|
||||
if (!vfio_find_vpfn(dma, iova)) {
|
||||
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
|
||||
if (!lock_cap && current->mm->locked_vm + 1 > limit) {
|
||||
put_pfn(*pfn_base, dma->prot);
|
||||
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
|
||||
|
@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
|
|||
if (ret)
|
||||
break;
|
||||
|
||||
if (pfn != *pfn_base + pinned) {
|
||||
if (pfn != *pfn_base + pinned ||
|
||||
rsvd != is_invalid_reserved_pfn(pfn)) {
|
||||
put_pfn(pfn, dma->prot);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!vfio_find_vpfn(dma, iova)) {
|
||||
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
|
||||
if (!lock_cap &&
|
||||
current->mm->locked_vm + lock_acct + 1 > limit) {
|
||||
put_pfn(pfn, dma->prot);
|
||||
|
@ -473,8 +466,10 @@ out:
|
|||
|
||||
unpin_out:
|
||||
if (ret) {
|
||||
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
|
||||
put_pfn(pfn, dma->prot);
|
||||
if (!rsvd) {
|
||||
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
|
||||
put_pfn(pfn, dma->prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
|
|||
/* vhost zerocopy support fields below: */
|
||||
/* last used idx for outstanding DMA zerocopy buffers */
|
||||
int upend_idx;
|
||||
/* first used idx for DMA done zerocopy buffers */
|
||||
/* For TX, first used idx for DMA done zerocopy buffers
|
||||
* For RX, number of batched heads
|
||||
*/
|
||||
int done_idx;
|
||||
/* an array of userspace buffers info */
|
||||
struct ubuf_info *ubuf_info;
|
||||
|
@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
|
|||
return skb_queue_empty(&sk->sk_receive_queue);
|
||||
}
|
||||
|
||||
static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
|
||||
{
|
||||
struct vhost_virtqueue *vq = &nvq->vq;
|
||||
struct vhost_dev *dev = vq->dev;
|
||||
|
||||
if (!nvq->done_idx)
|
||||
return;
|
||||
|
||||
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
|
||||
nvq->done_idx = 0;
|
||||
}
|
||||
|
||||
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
|
||||
{
|
||||
struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
|
||||
|
@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
|
|||
int len = peek_head_len(rvq, sk);
|
||||
|
||||
if (!len && vq->busyloop_timeout) {
|
||||
/* Flush batched heads first */
|
||||
vhost_rx_signal_used(rvq);
|
||||
/* Both tx vq and rx socket were polled here */
|
||||
mutex_lock_nested(&vq->mutex, 1);
|
||||
vhost_disable_notify(&net->dev, vq);
|
||||
|
@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
|
|||
};
|
||||
size_t total_len = 0;
|
||||
int err, mergeable;
|
||||
s16 headcount, nheads = 0;
|
||||
s16 headcount;
|
||||
size_t vhost_hlen, sock_hlen;
|
||||
size_t vhost_len, sock_len;
|
||||
struct socket *sock;
|
||||
|
@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
|
|||
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
|
||||
sock_len += sock_hlen;
|
||||
vhost_len = sock_len + vhost_hlen;
|
||||
headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
|
||||
&in, vq_log, &log,
|
||||
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
|
||||
vhost_len, &in, vq_log, &log,
|
||||
likely(mergeable) ? UIO_MAXIOV : 1);
|
||||
/* On error, stop handling until the next kick. */
|
||||
if (unlikely(headcount < 0))
|
||||
|
@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
|
|||
vhost_discard_vq_desc(vq, headcount);
|
||||
goto out;
|
||||
}
|
||||
nheads += headcount;
|
||||
if (nheads > VHOST_RX_BATCH) {
|
||||
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
|
||||
nheads);
|
||||
nheads = 0;
|
||||
}
|
||||
nvq->done_idx += headcount;
|
||||
if (nvq->done_idx > VHOST_RX_BATCH)
|
||||
vhost_rx_signal_used(nvq);
|
||||
if (unlikely(vq_log))
|
||||
vhost_log_write(vq, vq_log, log, vhost_len);
|
||||
total_len += vhost_len;
|
||||
|
@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
|
|||
}
|
||||
vhost_net_enable_vq(net, vq);
|
||||
out:
|
||||
if (nheads)
|
||||
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
|
||||
nheads);
|
||||
vhost_rx_signal_used(nvq);
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -372,18 +372,14 @@ int afs_permission(struct inode *inode, int mask)
|
|||
mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
if (mask & MAY_EXEC) {
|
||||
if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
|
||||
if (!(access & AFS_ACE_LOOKUP))
|
||||
goto permission_denied;
|
||||
} else if (mask & MAY_READ) {
|
||||
if (!(access & AFS_ACE_LOOKUP))
|
||||
goto permission_denied;
|
||||
} else if (mask & MAY_WRITE) {
|
||||
}
|
||||
if (mask & MAY_WRITE) {
|
||||
if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
|
||||
AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */
|
||||
goto permission_denied;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
} else {
|
||||
if (!(access & AFS_ACE_LOOKUP))
|
||||
|
|
|
@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
|
|||
struct afs_uvldbentry__xdr *uvldb;
|
||||
struct afs_vldb_entry *entry;
|
||||
bool new_only = false;
|
||||
u32 tmp, nr_servers;
|
||||
u32 tmp, nr_servers, vlflags;
|
||||
int i, ret;
|
||||
|
||||
_enter("");
|
||||
|
@ -55,6 +55,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
|
|||
new_only = true;
|
||||
}
|
||||
|
||||
vlflags = ntohl(uvldb->flags);
|
||||
for (i = 0; i < nr_servers; i++) {
|
||||
struct afs_uuid__xdr *xdr;
|
||||
struct afs_uuid *uuid;
|
||||
|
@ -64,12 +65,13 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
|
|||
if (tmp & AFS_VLSF_DONTUSE ||
|
||||
(new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
|
||||
continue;
|
||||
if (tmp & AFS_VLSF_RWVOL)
|
||||
if (tmp & AFS_VLSF_RWVOL) {
|
||||
entry->fs_mask[i] |= AFS_VOL_VTM_RW;
|
||||
if (vlflags & AFS_VLF_BACKEXISTS)
|
||||
entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
|
||||
}
|
||||
if (tmp & AFS_VLSF_ROVOL)
|
||||
entry->fs_mask[i] |= AFS_VOL_VTM_RO;
|
||||
if (tmp & AFS_VLSF_BACKVOL)
|
||||
entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
|
||||
if (!entry->fs_mask[i])
|
||||
continue;
|
||||
|
||||
|
@ -89,15 +91,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
|
|||
for (i = 0; i < AFS_MAXTYPES; i++)
|
||||
entry->vid[i] = ntohl(uvldb->volumeId[i]);
|
||||
|
||||
tmp = ntohl(uvldb->flags);
|
||||
if (tmp & AFS_VLF_RWEXISTS)
|
||||
if (vlflags & AFS_VLF_RWEXISTS)
|
||||
__set_bit(AFS_VLDB_HAS_RW, &entry->flags);
|
||||
if (tmp & AFS_VLF_ROEXISTS)
|
||||
if (vlflags & AFS_VLF_ROEXISTS)
|
||||
__set_bit(AFS_VLDB_HAS_RO, &entry->flags);
|
||||
if (tmp & AFS_VLF_BACKEXISTS)
|
||||
if (vlflags & AFS_VLF_BACKEXISTS)
|
||||
__set_bit(AFS_VLDB_HAS_BAK, &entry->flags);
|
||||
|
||||
if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) {
|
||||
if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) {
|
||||
entry->error = -ENOMEDIUM;
|
||||
__set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags);
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ config CIFS_SMB311
|
|||
|
||||
config CIFS_SMB_DIRECT
|
||||
bool "SMB Direct support (Experimental)"
|
||||
depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y
|
||||
depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
|
||||
help
|
||||
Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
|
||||
SMB Direct allows transferring SMB packets over RDMA. If unsure,
|
||||
|
|
|
@ -178,6 +178,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
|||
mapping->a_ops = &empty_aops;
|
||||
mapping->host = inode;
|
||||
mapping->flags = 0;
|
||||
mapping->wb_err = 0;
|
||||
atomic_set(&mapping->i_mmap_writable, 0);
|
||||
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
|
||||
mapping->private_data = NULL;
|
||||
|
|
|
@ -151,7 +151,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
|
|||
struct drm_encoder *encoder,
|
||||
const struct dw_hdmi_plat_data *plat_data);
|
||||
|
||||
void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
|
||||
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
|
||||
|
||||
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
|
||||
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
|
||||
|
|
|
@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
|
|||
int (*request_update)(struct iio_buffer *buffer);
|
||||
|
||||
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
|
||||
int (*set_length)(struct iio_buffer *buffer, int length);
|
||||
int (*set_length)(struct iio_buffer *buffer, unsigned int length);
|
||||
|
||||
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
|
||||
|
@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
|
|||
*/
|
||||
struct iio_buffer {
|
||||
/** @length: Number of datums in buffer. */
|
||||
int length;
|
||||
unsigned int length;
|
||||
|
||||
/** @bytes_per_datum: Size of individual datum including timestamp. */
|
||||
int bytes_per_datum;
|
||||
size_t bytes_per_datum;
|
||||
|
||||
/**
|
||||
* @access: Buffer access functions associated with the
|
||||
|
|
|
@ -1017,6 +1017,7 @@ struct bpf_prog_info {
|
|||
__aligned_u64 map_ids;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
__u32 ifindex;
|
||||
__u32 :32;
|
||||
__u64 netns_dev;
|
||||
__u64 netns_ino;
|
||||
} __attribute__((aligned(8)));
|
||||
|
@ -1030,6 +1031,7 @@ struct bpf_map_info {
|
|||
__u32 map_flags;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
__u32 ifindex;
|
||||
__u32 :32;
|
||||
__u64 netns_dev;
|
||||
__u64 netns_ino;
|
||||
} __attribute__((aligned(8)));
|
||||
|
|
|
@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline bool is_per_cpu_kthread(struct task_struct *p)
|
||||
{
|
||||
if (!(p->flags & PF_KTHREAD))
|
||||
return false;
|
||||
|
||||
if (p->nr_cpus_allowed != 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU kthreads are allowed to run on !actie && online CPUs, see
|
||||
* __set_cpus_allowed_ptr() and select_fallback_rq().
|
||||
*/
|
||||
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
return false;
|
||||
|
||||
if (is_per_cpu_kthread(p))
|
||||
return cpu_online(cpu);
|
||||
|
||||
return cpu_active(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is how migration works:
|
||||
*
|
||||
|
@ -938,16 +965,8 @@ struct migration_arg {
|
|||
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
|
||||
struct task_struct *p, int dest_cpu)
|
||||
{
|
||||
if (p->flags & PF_KTHREAD) {
|
||||
if (unlikely(!cpu_online(dest_cpu)))
|
||||
return rq;
|
||||
} else {
|
||||
if (unlikely(!cpu_active(dest_cpu)))
|
||||
return rq;
|
||||
}
|
||||
|
||||
/* Affinity changed (again). */
|
||||
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
||||
if (!is_cpu_allowed(p, dest_cpu))
|
||||
return rq;
|
||||
|
||||
update_rq_clock(rq);
|
||||
|
@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|||
for (;;) {
|
||||
/* Any allowed, online CPU? */
|
||||
for_each_cpu(dest_cpu, &p->cpus_allowed) {
|
||||
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
|
||||
continue;
|
||||
if (!cpu_online(dest_cpu))
|
||||
if (!is_cpu_allowed(p, dest_cpu))
|
||||
continue;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
|||
* [ this allows ->select_task() to simply return task_cpu(p) and
|
||||
* not worry about this generic constraint ]
|
||||
*/
|
||||
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
|
||||
!cpu_online(cpu)))
|
||||
if (unlikely(!is_cpu_allowed(p, cpu)))
|
||||
cpu = select_fallback_rq(task_cpu(p), p);
|
||||
|
||||
return cpu;
|
||||
|
|
|
@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
|
|||
|
||||
rq = task_rq_lock(p, &rf);
|
||||
|
||||
sched_clock_tick();
|
||||
update_rq_clock(rq);
|
||||
|
||||
if (!dl_task(p) || p->state == TASK_DEAD) {
|
||||
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
|
||||
|
||||
|
@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
|
|||
if (dl_se->dl_non_contending == 0)
|
||||
goto unlock;
|
||||
|
||||
sched_clock_tick();
|
||||
update_rq_clock(rq);
|
||||
|
||||
sub_running_bw(dl_se, &rq->dl);
|
||||
dl_se->dl_non_contending = 0;
|
||||
unlock:
|
||||
|
|
|
@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
|
|||
}
|
||||
|
||||
/*
|
||||
* See rt task throttoling, which is the only time a skip
|
||||
* See rt task throttling, which is the only time a skip
|
||||
* request is cancelled.
|
||||
*/
|
||||
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
|
||||
|
|
|
@ -893,7 +893,7 @@ int __trace_bputs(unsigned long ip, const char *str)
|
|||
EXPORT_SYMBOL_GPL(__trace_bputs);
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
static void tracing_snapshot_instance(struct trace_array *tr)
|
||||
void tracing_snapshot_instance(struct trace_array *tr)
|
||||
{
|
||||
struct tracer *tracer = tr->current_trace;
|
||||
unsigned long flags;
|
||||
|
@ -949,7 +949,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
|
|||
struct trace_buffer *size_buf, int cpu_id);
|
||||
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
|
||||
|
||||
static int alloc_snapshot(struct trace_array *tr)
|
||||
int tracing_alloc_snapshot_instance(struct trace_array *tr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -995,7 +995,7 @@ int tracing_alloc_snapshot(void)
|
|||
struct trace_array *tr = &global_trace;
|
||||
int ret;
|
||||
|
||||
ret = alloc_snapshot(tr);
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
return ret;
|
||||
|
@ -5408,7 +5408,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
|||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
if (t->use_max_tr && !had_max_tr) {
|
||||
ret = alloc_snapshot(tr);
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
@ -6451,7 +6451,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
}
|
||||
#endif
|
||||
if (!tr->allocated_snapshot) {
|
||||
ret = alloc_snapshot(tr);
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
@ -7179,7 +7179,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
|
|||
return ret;
|
||||
|
||||
out_reg:
|
||||
ret = alloc_snapshot(tr);
|
||||
ret = tracing_alloc_snapshot_instance(tr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1817,6 +1817,17 @@ static inline void __init trace_event_init(void) { }
|
|||
static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
void tracing_snapshot_instance(struct trace_array *tr);
|
||||
int tracing_alloc_snapshot_instance(struct trace_array *tr);
|
||||
#else
|
||||
static inline void tracing_snapshot_instance(struct trace_array *tr) { }
|
||||
static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct trace_iterator *tracepoint_print_iter;
|
||||
|
||||
#endif /* _LINUX_KERNEL_TRACE_H */
|
||||
|
|
|
@ -483,9 +483,10 @@ clear_event_triggers(struct trace_array *tr)
|
|||
struct trace_event_file *file;
|
||||
|
||||
list_for_each_entry(file, &tr->events, list) {
|
||||
struct event_trigger_data *data;
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
struct event_trigger_data *data, *n;
|
||||
list_for_each_entry_safe(data, n, &file->triggers, list) {
|
||||
trace_event_trigger_enable_disable(file, 0);
|
||||
list_del_rcu(&data->list);
|
||||
if (data->ops->free)
|
||||
data->ops->free(data->ops, data);
|
||||
}
|
||||
|
@ -642,6 +643,7 @@ event_trigger_callback(struct event_command *cmd_ops,
|
|||
trigger_data->count = -1;
|
||||
trigger_data->ops = trigger_ops;
|
||||
trigger_data->cmd_ops = cmd_ops;
|
||||
trigger_data->private_data = file;
|
||||
INIT_LIST_HEAD(&trigger_data->list);
|
||||
INIT_LIST_HEAD(&trigger_data->named_list);
|
||||
|
||||
|
@ -1053,7 +1055,12 @@ static void
|
|||
snapshot_trigger(struct event_trigger_data *data, void *rec,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
tracing_snapshot();
|
||||
struct trace_event_file *file = data->private_data;
|
||||
|
||||
if (file)
|
||||
tracing_snapshot_instance(file->tr);
|
||||
else
|
||||
tracing_snapshot();
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1076,7 +1083,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
|
|||
{
|
||||
int ret = register_trigger(glob, ops, data, file);
|
||||
|
||||
if (ret > 0 && tracing_alloc_snapshot() != 0) {
|
||||
if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
|
||||
unregister_trigger(glob, ops, data, file);
|
||||
ret = 0;
|
||||
}
|
||||
|
|
|
@ -2431,7 +2431,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
|||
__split_huge_page_tail(head, i, lruvec, list);
|
||||
/* Some pages can be beyond i_size: drop them from page cache */
|
||||
if (head[i].index >= end) {
|
||||
__ClearPageDirty(head + i);
|
||||
ClearPageDirty(head + i);
|
||||
__delete_from_page_cache(head + i, NULL);
|
||||
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
|
||||
shmem_uncharge(head->mapping->host, 1);
|
||||
|
|
|
@ -1418,7 +1418,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
|
|||
return ret;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
migrate_dirty = mapping && mapping->a_ops->migratepage;
|
||||
migrate_dirty = !mapping || mapping->a_ops->migratepage;
|
||||
unlock_page(page);
|
||||
if (!migrate_dirty)
|
||||
return ret;
|
||||
|
|
|
@ -32,7 +32,7 @@ config NET_9P_XEN
|
|||
|
||||
|
||||
config NET_9P_RDMA
|
||||
depends on INET && INFINIBAND_ADDR_TRANS
|
||||
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
tristate "9P RDMA Transport (Experimental)"
|
||||
help
|
||||
This builds support for an RDMA transport.
|
||||
|
|
|
@ -1954,7 +1954,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
|||
int off, pad = 0;
|
||||
unsigned int size_kern, match_size = mwt->match_size;
|
||||
|
||||
strlcpy(name, mwt->u.name, sizeof(name));
|
||||
if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (state->buf_kern_start)
|
||||
dst = state->buf_kern_start + state->buf_kern_offset;
|
||||
|
|
|
@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
|
|||
cpumask_var_t mask;
|
||||
unsigned long index;
|
||||
|
||||
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
index = get_netdev_queue_index(queue);
|
||||
|
||||
if (dev->num_tc) {
|
||||
|
@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_read_lock();
|
||||
dev_maps = rcu_dereference(dev->xps_maps);
|
||||
if (dev_maps) {
|
||||
|
|
|
@ -328,7 +328,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
|||
|
||||
if (tdev) {
|
||||
hlen = tdev->hard_header_len + tdev->needed_headroom;
|
||||
mtu = tdev->mtu;
|
||||
mtu = min(tdev->mtu, IP_MAX_MTU);
|
||||
}
|
||||
|
||||
dev->needed_headroom = t_hlen + hlen;
|
||||
|
@ -362,7 +362,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
|
|||
nt = netdev_priv(dev);
|
||||
t_hlen = nt->hlen + sizeof(struct iphdr);
|
||||
dev->min_mtu = ETH_MIN_MTU;
|
||||
dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
|
||||
dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
|
||||
ip_tunnel_add(itn, nt);
|
||||
return nt;
|
||||
|
||||
|
@ -930,7 +930,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
|
|||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
|
||||
int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
|
||||
|
||||
if (new_mtu < ETH_MIN_MTU)
|
||||
return -EINVAL;
|
||||
|
@ -1107,7 +1107,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
|
|||
|
||||
mtu = ip_tunnel_bind_dev(dev);
|
||||
if (tb[IFLA_MTU]) {
|
||||
unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
|
||||
unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
|
||||
|
||||
mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
|
||||
(unsigned int)(max - sizeof(struct iphdr)));
|
||||
|
|
|
@ -1692,8 +1692,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
|
|||
if (new_mtu < ETH_MIN_MTU)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_mtu > 0xFFF8 - dev->hard_header_len)
|
||||
return -EINVAL;
|
||||
if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
|
||||
if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1841,7 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
|
|||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
dev->mtu -= 8;
|
||||
dev->min_mtu = ETH_MIN_MTU;
|
||||
dev->max_mtu = 0xFFF8 - dev->hard_header_len;
|
||||
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|||
hdrlen = (osrh->hdrlen + 1) << 3;
|
||||
tot_len = hdrlen + sizeof(*hdr);
|
||||
|
||||
err = skb_cow_head(skb, tot_len);
|
||||
err = skb_cow_head(skb, tot_len + skb->mac_len);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
|
@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
|
|||
|
||||
hdrlen = (osrh->hdrlen + 1) << 3;
|
||||
|
||||
err = skb_cow_head(skb, hdrlen);
|
||||
err = skb_cow_head(skb, hdrlen + skb->mac_len);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
|
|
|
@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
|
|||
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen;
|
||||
dev->min_mtu = IPV6_MIN_MTU;
|
||||
dev->max_mtu = 0xFFF8 - t_hlen;
|
||||
dev->max_mtu = IP6_MAX_MTU - t_hlen;
|
||||
dev->flags = IFF_NOARP;
|
||||
netif_keep_dst(dev);
|
||||
dev->addr_len = 4;
|
||||
|
@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
|
|||
if (tb[IFLA_MTU]) {
|
||||
u32 mtu = nla_get_u32(tb[IFLA_MTU]);
|
||||
|
||||
if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
|
||||
if (mtu >= IPV6_MIN_MTU &&
|
||||
mtu <= IP6_MAX_MTU - dev->hard_header_len)
|
||||
dev->mtu = mtu;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
|
|||
struct flowi6 *fl6 = &fl->u.ip6;
|
||||
int onlyproto = 0;
|
||||
const struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
u16 offset = sizeof(*hdr);
|
||||
u32 offset = sizeof(*hdr);
|
||||
struct ipv6_opt_hdr *exthdr;
|
||||
const unsigned char *nh = skb_network_header(skb);
|
||||
u16 nhoff = IP6CB(skb)->nhoff;
|
||||
|
|
|
@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
|
|||
__module_get(newsock->ops->owner);
|
||||
|
||||
newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
|
||||
&kcm_proto, true);
|
||||
&kcm_proto, false);
|
||||
if (!newsk) {
|
||||
sock_release(newsock);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
@ -215,7 +215,7 @@ err:
|
|||
static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
|
||||
struct netlink_callback *cb)
|
||||
{
|
||||
struct nlattr *attrs[NCSI_ATTR_MAX];
|
||||
struct nlattr *attrs[NCSI_ATTR_MAX + 1];
|
||||
struct ncsi_package *np, *package;
|
||||
struct ncsi_dev_priv *ndp;
|
||||
unsigned int package_id;
|
||||
|
|
|
@ -2381,8 +2381,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
|||
struct ipvs_sync_daemon_cfg cfg;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
|
||||
sizeof(cfg.mcast_ifn));
|
||||
ret = -EINVAL;
|
||||
if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
|
||||
sizeof(cfg.mcast_ifn)) <= 0)
|
||||
goto out_dec;
|
||||
cfg.syncid = dm->syncid;
|
||||
ret = start_sync_thread(ipvs, &cfg, dm->state);
|
||||
} else {
|
||||
|
@ -2420,12 +2422,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
|
|||
}
|
||||
}
|
||||
|
||||
if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
|
||||
strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
|
||||
IP_VS_SCHEDNAME_MAXLEN) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
|
||||
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
|
||||
usvc.protocol != IPPROTO_SCTP) {
|
||||
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
|
||||
pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
|
||||
usvc.protocol, &usvc.addr.ip,
|
||||
ntohs(usvc.port), usvc.sched_name);
|
||||
ntohs(usvc.port));
|
||||
ret = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -2847,7 +2856,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
|
|||
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
|
||||
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
|
||||
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
|
||||
.len = IP_VS_IFNAME_MAXLEN },
|
||||
.len = IP_VS_IFNAME_MAXLEN - 1 },
|
||||
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
|
||||
[IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
|
||||
[IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
|
||||
|
@ -2865,7 +2874,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
|
|||
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
|
||||
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
|
||||
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
|
||||
.len = IP_VS_SCHEDNAME_MAXLEN },
|
||||
.len = IP_VS_SCHEDNAME_MAXLEN - 1 },
|
||||
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
|
||||
.len = IP_VS_PENAME_MAXLEN },
|
||||
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
|
||||
|
|
|
@ -1298,8 +1298,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
|
|||
rcu_assign_pointer(chain->stats, newstats);
|
||||
synchronize_rcu();
|
||||
free_percpu(oldstats);
|
||||
} else
|
||||
} else {
|
||||
rcu_assign_pointer(chain->stats, newstats);
|
||||
static_branch_inc(&nft_counters_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
static void nf_tables_chain_destroy(struct nft_ctx *ctx)
|
||||
|
@ -4706,7 +4708,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (idx > s_idx)
|
||||
memset(&cb->args[1], 0,
|
||||
sizeof(cb->args) - sizeof(cb->args[0]));
|
||||
if (filter && filter->table[0] &&
|
||||
if (filter && filter->table &&
|
||||
strcmp(filter->table, table->name))
|
||||
goto cont;
|
||||
if (filter &&
|
||||
|
@ -5380,7 +5382,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
|
|||
if (idx > s_idx)
|
||||
memset(&cb->args[1], 0,
|
||||
sizeof(cb->args) - sizeof(cb->args[0]));
|
||||
if (filter && filter->table[0] &&
|
||||
if (filter && filter->table &&
|
||||
strcmp(filter->table, table->name))
|
||||
goto cont;
|
||||
|
||||
|
|
|
@ -126,15 +126,15 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
|
|||
if (!base_chain->stats)
|
||||
return;
|
||||
|
||||
local_bh_disable();
|
||||
stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
|
||||
if (stats) {
|
||||
local_bh_disable();
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->pkts++;
|
||||
stats->bytes += pkt->skb->len;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
local_bh_enable();
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
struct nft_jumpstack {
|
||||
|
|
|
@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
|
|||
nfacct->flags = flags;
|
||||
}
|
||||
|
||||
nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
|
||||
nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
|
||||
|
||||
if (tb[NFACCT_BYTES]) {
|
||||
atomic64_set(&nfacct->bytes,
|
||||
|
|
|
@ -150,7 +150,7 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
|
|||
return -EINVAL;
|
||||
|
||||
nla_strlcpy(expect_policy->name,
|
||||
nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
|
||||
expect_policy->max_expected =
|
||||
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
|
||||
if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
|
||||
|
@ -235,7 +235,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
|
|||
goto err1;
|
||||
|
||||
nla_strlcpy(helper->name,
|
||||
nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
|
||||
tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
|
||||
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
|
||||
if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -880,22 +880,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
|
|||
struct nft_object *obj, bool reset)
|
||||
{
|
||||
const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
|
||||
const struct nf_conntrack_helper *helper = priv->helper4;
|
||||
const struct nf_conntrack_helper *helper;
|
||||
u16 family;
|
||||
|
||||
if (priv->helper4 && priv->helper6) {
|
||||
family = NFPROTO_INET;
|
||||
helper = priv->helper4;
|
||||
} else if (priv->helper6) {
|
||||
family = NFPROTO_IPV6;
|
||||
helper = priv->helper6;
|
||||
} else {
|
||||
family = NFPROTO_IPV4;
|
||||
helper = priv->helper4;
|
||||
}
|
||||
|
||||
if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
|
||||
return -1;
|
||||
|
||||
if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
|
||||
return -1;
|
||||
|
||||
if (priv->helper4 && priv->helper6)
|
||||
family = NFPROTO_INET;
|
||||
else if (priv->helper6)
|
||||
family = NFPROTO_IPV6;
|
||||
else
|
||||
family = NFPROTO_IPV4;
|
||||
|
||||
if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
|
|||
return !limit->invert;
|
||||
}
|
||||
|
||||
/* Use same default as in iptables. */
|
||||
#define NFT_LIMIT_PKT_BURST_DEFAULT 5
|
||||
|
||||
static int nft_limit_init(struct nft_limit *limit,
|
||||
const struct nlattr * const tb[])
|
||||
const struct nlattr * const tb[], bool pkts)
|
||||
{
|
||||
u64 unit;
|
||||
u64 unit, tokens;
|
||||
|
||||
if (tb[NFTA_LIMIT_RATE] == NULL ||
|
||||
tb[NFTA_LIMIT_UNIT] == NULL)
|
||||
|
@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
|
|||
|
||||
if (tb[NFTA_LIMIT_BURST])
|
||||
limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
|
||||
else
|
||||
limit->burst = 0;
|
||||
|
||||
if (pkts && limit->burst == 0)
|
||||
limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
|
||||
|
||||
if (limit->rate + limit->burst < limit->rate)
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* The token bucket size limits the number of tokens can be
|
||||
* accumulated. tokens_max specifies the bucket size.
|
||||
* tokens_max = unit * (rate + burst) / rate.
|
||||
*/
|
||||
limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
|
||||
limit->rate);
|
||||
if (pkts) {
|
||||
tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
|
||||
} else {
|
||||
/* The token bucket size limits the number of tokens can be
|
||||
* accumulated. tokens_max specifies the bucket size.
|
||||
* tokens_max = unit * (rate + burst) / rate.
|
||||
*/
|
||||
tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
|
||||
limit->rate);
|
||||
}
|
||||
|
||||
limit->tokens = tokens;
|
||||
limit->tokens_max = limit->tokens;
|
||||
|
||||
if (tb[NFTA_LIMIT_FLAGS]) {
|
||||
|
@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
|
|||
struct nft_limit_pkts *priv = nft_expr_priv(expr);
|
||||
int err;
|
||||
|
||||
err = nft_limit_init(&priv->limit, tb);
|
||||
err = nft_limit_init(&priv->limit, tb, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_limit *priv = nft_expr_priv(expr);
|
||||
|
||||
return nft_limit_init(priv, tb);
|
||||
return nft_limit_init(priv, tb, false);
|
||||
}
|
||||
|
||||
static int nft_limit_bytes_dump(struct sk_buff *skb,
|
||||
|
@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
|
|||
struct nft_limit_pkts *priv = nft_obj_data(obj);
|
||||
int err;
|
||||
|
||||
err = nft_limit_init(&priv->limit, tb);
|
||||
err = nft_limit_init(&priv->limit, tb, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_limit *priv = nft_obj_data(obj);
|
||||
|
||||
return nft_limit_init(priv, tb);
|
||||
return nft_limit_init(priv, tb, false);
|
||||
}
|
||||
|
||||
static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
|
||||
|
|
|
@ -234,7 +234,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|||
struct sk_buff *skb = pkt->skb;
|
||||
u32 *sreg = ®s->data[meta->sreg];
|
||||
u32 value = *sreg;
|
||||
u8 pkt_type;
|
||||
u8 value8;
|
||||
|
||||
switch (meta->key) {
|
||||
case NFT_META_MARK:
|
||||
|
@ -244,15 +244,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
|
|||
skb->priority = value;
|
||||
break;
|
||||
case NFT_META_PKTTYPE:
|
||||
pkt_type = nft_reg_load8(sreg);
|
||||
value8 = nft_reg_load8(sreg);
|
||||
|
||||
if (skb->pkt_type != pkt_type &&
|
||||
skb_pkt_type_ok(pkt_type) &&
|
||||
if (skb->pkt_type != value8 &&
|
||||
skb_pkt_type_ok(value8) &&
|
||||
skb_pkt_type_ok(skb->pkt_type))
|
||||
skb->pkt_type = pkt_type;
|
||||
skb->pkt_type = value8;
|
||||
break;
|
||||
case NFT_META_NFTRACE:
|
||||
skb->nf_trace = !!value;
|
||||
value8 = nft_reg_load8(sreg);
|
||||
|
||||
skb->nf_trace = !!value8;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
|
|
|
@ -8,7 +8,7 @@ config RDS
|
|||
|
||||
config RDS_RDMA
|
||||
tristate "RDS over Infiniband"
|
||||
depends on RDS && INFINIBAND_ADDR_TRANS
|
||||
depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
---help---
|
||||
Allow RDS to use Infiniband as a transport.
|
||||
This transport supports RDMA operations.
|
||||
|
|
|
@ -977,7 +977,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||
return 0;
|
||||
|
||||
errout_idr:
|
||||
if (fnew->handle)
|
||||
if (!fold)
|
||||
idr_remove(&head->handle_idr, fnew->handle);
|
||||
errout:
|
||||
tcf_exts_destroy(&fnew->exts);
|
||||
|
|
|
@ -50,7 +50,7 @@ config SUNRPC_DEBUG
|
|||
|
||||
config SUNRPC_XPRT_RDMA
|
||||
tristate "RPC-over-RDMA transport"
|
||||
depends on SUNRPC && INFINIBAND_ADDR_TRANS
|
||||
depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
|
||||
default SUNRPC && INFINIBAND
|
||||
select SG_POOL
|
||||
help
|
||||
|
|
|
@ -1658,7 +1658,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
|||
trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
|
||||
}
|
||||
|
||||
out:
|
||||
return &xdst0->u.dst;
|
||||
|
||||
put_states:
|
||||
|
@ -1667,8 +1666,8 @@ put_states:
|
|||
free_dst:
|
||||
if (xdst0)
|
||||
dst_release_immediate(&xdst0->u.dst);
|
||||
xdst0 = ERR_PTR(err);
|
||||
goto out;
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
|
||||
|
|
|
@ -1494,7 +1494,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
|
|||
scontext_len, &context, def_sid);
|
||||
if (rc == -EINVAL && force) {
|
||||
context.str = str;
|
||||
context.len = scontext_len;
|
||||
context.len = strlen(str) + 1;
|
||||
str = NULL;
|
||||
} else if (rc)
|
||||
goto out_unlock;
|
||||
|
|
|
@ -198,7 +198,6 @@
|
|||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
||||
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
|
||||
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||
|
@ -207,13 +206,19 @@
|
|||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
||||
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
|
||||
|
||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@ -274,9 +279,10 @@
|
|||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
||||
#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
|
@ -334,6 +340,7 @@
|
|||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
|
@ -363,5 +370,6 @@
|
|||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -1017,6 +1017,7 @@ struct bpf_prog_info {
|
|||
__aligned_u64 map_ids;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
__u32 ifindex;
|
||||
__u32 :32;
|
||||
__u64 netns_dev;
|
||||
__u64 netns_ino;
|
||||
} __attribute__((aligned(8)));
|
||||
|
@ -1030,6 +1031,7 @@ struct bpf_map_info {
|
|||
__u32 map_flags;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
__u32 ifindex;
|
||||
__u32 :32;
|
||||
__u64 netns_dev;
|
||||
__u64 netns_ino;
|
||||
} __attribute__((aligned(8)));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue