Merge linux 6.6.42
This commit is contained in:
commit
6f2eadf6d3
|
@ -217,7 +217,7 @@ current *struct* is::
|
|||
int (*media_changed)(struct cdrom_device_info *, int);
|
||||
int (*tray_move)(struct cdrom_device_info *, int);
|
||||
int (*lock_door)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, int);
|
||||
int (*select_speed)(struct cdrom_device_info *, unsigned long);
|
||||
int (*get_last_session) (struct cdrom_device_info *,
|
||||
struct cdrom_multisession *);
|
||||
int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
|
||||
|
@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
|
|||
|
||||
::
|
||||
|
||||
int select_speed(struct cdrom_device_info *cdi, int speed)
|
||||
int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
|
||||
|
||||
Some CD-ROM drives are capable of changing their head-speed. There
|
||||
are several reasons for changing the speed of a CD-ROM drive. Badly
|
||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ else
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 41
|
||||
SUBLEVEL = 42
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -109,16 +109,6 @@ extern int __get_user_64t_1(void *);
|
|||
extern int __get_user_64t_2(void *);
|
||||
extern int __get_user_64t_4(void *);
|
||||
|
||||
#define __GUP_CLOBBER_1 "lr", "cc"
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define __GUP_CLOBBER_2 "ip", "lr", "cc"
|
||||
#else
|
||||
#define __GUP_CLOBBER_2 "lr", "cc"
|
||||
#endif
|
||||
#define __GUP_CLOBBER_4 "lr", "cc"
|
||||
#define __GUP_CLOBBER_32t_8 "lr", "cc"
|
||||
#define __GUP_CLOBBER_8 "lr", "cc"
|
||||
|
||||
#define __get_user_x(__r2, __p, __e, __l, __s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%1", "r2") \
|
||||
|
@ -126,7 +116,7 @@ extern int __get_user_64t_4(void *);
|
|||
"bl __get_user_" #__s \
|
||||
: "=&r" (__e), "=r" (__r2) \
|
||||
: "0" (__p), "r" (__l) \
|
||||
: __GUP_CLOBBER_##__s)
|
||||
: "ip", "lr", "cc")
|
||||
|
||||
/* narrowing a double-word get into a single 32bit word register: */
|
||||
#ifdef __ARMEB__
|
||||
|
@ -148,7 +138,7 @@ extern int __get_user_64t_4(void *);
|
|||
"bl __get_user_64t_" #__s \
|
||||
: "=&r" (__e), "=r" (__r2) \
|
||||
: "0" (__p), "r" (__l) \
|
||||
: __GUP_CLOBBER_##__s)
|
||||
: "ip", "lr", "cc")
|
||||
#else
|
||||
#define __get_user_x_64t __get_user_x
|
||||
#endif
|
||||
|
|
|
@ -464,6 +464,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
|
|||
for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
|
||||
struct insn_emulation *insn = insn_emulations[i];
|
||||
bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
|
||||
if (insn->status == INSN_UNAVAILABLE)
|
||||
continue;
|
||||
|
||||
if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
|
||||
pr_warn("CPU[%u] cannot support the emulation of %s",
|
||||
cpu, insn->name);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
17 o32 break sys_ni_syscall
|
||||
# 18 was sys_stat
|
||||
18 o32 unused18 sys_ni_syscall
|
||||
19 o32 lseek sys_lseek
|
||||
19 o32 lseek sys_lseek compat_sys_lseek
|
||||
20 o32 getpid sys_getpid
|
||||
21 o32 mount sys_mount
|
||||
22 o32 umount sys_oldumount
|
||||
|
|
|
@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
|||
{
|
||||
struct eeh_dev *edev;
|
||||
struct pci_dev *pdev;
|
||||
struct pci_bus *bus = NULL;
|
||||
|
||||
if (pe->type & EEH_PE_PHB)
|
||||
return pe->phb->bus;
|
||||
|
@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
|
|||
|
||||
/* Retrieve the parent PCI bus of first (top) PCI device */
|
||||
edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
|
||||
pci_lock_rescan_remove();
|
||||
pdev = eeh_dev_to_pci_dev(edev);
|
||||
if (pdev)
|
||||
return pdev->bus;
|
||||
bus = pdev->bus;
|
||||
pci_unlock_rescan_remove();
|
||||
|
||||
return NULL;
|
||||
return bus;
|
||||
}
|
||||
|
|
|
@ -129,14 +129,16 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
fdput(f);
|
||||
|
||||
if (!found)
|
||||
if (!found) {
|
||||
fdput(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
table_group = iommu_group_get_iommudata(grp);
|
||||
if (WARN_ON(!table_group))
|
||||
if (WARN_ON(!table_group)) {
|
||||
fdput(f);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||
struct iommu_table *tbltmp = table_group->tables[i];
|
||||
|
@ -157,8 +159,10 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (!tbl)
|
||||
if (!tbl) {
|
||||
fdput(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||
|
@ -169,6 +173,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
/* stit is being destroyed */
|
||||
iommu_tce_table_put(tbl);
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return -ENOTTY;
|
||||
}
|
||||
/*
|
||||
|
@ -176,6 +181,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
* its KVM reference counter and can return.
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
fdput(f);
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -183,6 +189,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
|
||||
if (!stit) {
|
||||
iommu_tce_table_put(tbl);
|
||||
fdput(f);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -191,6 +198,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
|||
|
||||
list_add_rcu(&stit->next, &stt->iommu_tables);
|
||||
|
||||
fdput(f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void)
|
|||
{
|
||||
void (*ctor)(void *) = get_dtl_cache_ctor();
|
||||
|
||||
dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
|
||||
DISPATCH_LOG_BYTES, 0, ctor);
|
||||
dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
|
||||
DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
|
||||
if (!dtl_cache) {
|
||||
pr_warn("Failed to create dispatch trace log buffer cache\n");
|
||||
pr_warn("Stolen time statistics will be unreliable\n");
|
||||
|
|
|
@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
bool (*fn)(void *, unsigned long), void *arg)
|
||||
{
|
||||
unsigned long fp, sp, pc;
|
||||
int graph_idx = 0;
|
||||
int level = 0;
|
||||
|
||||
if (regs) {
|
||||
|
@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
pc = regs->ra;
|
||||
} else {
|
||||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
|
||||
&frame->ra);
|
||||
if (pc == (unsigned long)ret_from_exception) {
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
|
|
|
@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
|||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_enable(ec);
|
||||
|
||||
for (i = 0; i < bytes; ++i, ++address, ++value)
|
||||
for (i = 0; i < bytes; ++i, ++address, ++value) {
|
||||
result = (function == ACPI_READ) ?
|
||||
acpi_ec_read(ec, address, value) :
|
||||
acpi_ec_write(ec, address, *value);
|
||||
if (result < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ec->busy_polling || bits > 8)
|
||||
acpi_ec_burst_disable(ec);
|
||||
|
@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
|
|||
return AE_NOT_FOUND;
|
||||
case -ETIME:
|
||||
return AE_TIME;
|
||||
default:
|
||||
case 0:
|
||||
return AE_OK;
|
||||
default:
|
||||
return AE_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2013,8 +2013,8 @@ static int null_validate_conf(struct nullb_device *dev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->blocksize = round_down(dev->blocksize, 512);
|
||||
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
|
||||
if (blk_validate_block_size(dev->blocksize))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
|
||||
if (dev->submit_queues != nr_online_nodes)
|
||||
|
|
|
@ -280,7 +280,7 @@ static u8 crc8_table[CRC8_TABLE_SIZE];
|
|||
|
||||
/* Default configurations */
|
||||
#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK
|
||||
#define DEFAULT_PS_MODE PS_MODE_DISABLE
|
||||
#define DEFAULT_PS_MODE PS_MODE_ENABLE
|
||||
#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE
|
||||
|
||||
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
|
||||
|
|
|
@ -41,6 +41,7 @@ SECTIONS
|
|||
}
|
||||
|
||||
/DISCARD/ : {
|
||||
*(.discard .discard.*)
|
||||
*(.modinfo .init.modinfo)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -766,6 +766,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
|
|||
int level;
|
||||
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
/* Enable latch on interrupt-enabled inputs */
|
||||
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
|
||||
|
||||
|
|
|
@ -4290,9 +4290,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
|
|||
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_cu_info *cu_info)
|
||||
{
|
||||
int i, j, k, counter, xcc_id, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
||||
int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
|
||||
unsigned disable_masks[4 * 4];
|
||||
bool is_symmetric_cus;
|
||||
|
||||
if (!adev || !cu_info)
|
||||
return -EINVAL;
|
||||
|
@ -4310,6 +4311,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
|||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
|
||||
is_symmetric_cus = true;
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
|
@ -4337,6 +4339,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
|
|||
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
|
||||
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
|
||||
}
|
||||
if (i && is_symmetric_cus && prev_counter != counter)
|
||||
is_symmetric_cus = false;
|
||||
prev_counter = counter;
|
||||
}
|
||||
if (is_symmetric_cus) {
|
||||
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
|
||||
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
|
||||
}
|
||||
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
|
||||
xcc_id);
|
||||
|
|
|
@ -10630,6 +10630,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void parse_edid_displayid_vrr(struct drm_connector *connector,
|
||||
struct edid *edid)
|
||||
{
|
||||
u8 *edid_ext = NULL;
|
||||
int i;
|
||||
int j = 0;
|
||||
u16 min_vfreq;
|
||||
u16 max_vfreq;
|
||||
|
||||
if (edid == NULL || edid->extensions == 0)
|
||||
return;
|
||||
|
||||
/* Find DisplayID extension */
|
||||
for (i = 0; i < edid->extensions; i++) {
|
||||
edid_ext = (void *)(edid + (i + 1));
|
||||
if (edid_ext[0] == DISPLAYID_EXT)
|
||||
break;
|
||||
}
|
||||
|
||||
if (edid_ext == NULL)
|
||||
return;
|
||||
|
||||
while (j < EDID_LENGTH) {
|
||||
/* Get dynamic video timing range from DisplayID if available */
|
||||
if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 &&
|
||||
(edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
|
||||
min_vfreq = edid_ext[j+9];
|
||||
if (edid_ext[j+1] & 7)
|
||||
max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
|
||||
else
|
||||
max_vfreq = edid_ext[j+10];
|
||||
|
||||
if (max_vfreq && min_vfreq) {
|
||||
connector->display_info.monitor_range.max_vfreq = max_vfreq;
|
||||
connector->display_info.monitor_range.min_vfreq = min_vfreq;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
|
@ -10752,6 +10795,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
|||
if (!adev->dm.freesync_module)
|
||||
goto update;
|
||||
|
||||
/* Some eDP panels only have the refresh rate range info in DisplayID */
|
||||
if ((connector->display_info.monitor_range.min_vfreq == 0 ||
|
||||
connector->display_info.monitor_range.max_vfreq == 0))
|
||||
parse_edid_displayid_vrr(connector, edid);
|
||||
|
||||
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
|
||||
bool edid_check_required = false;
|
||||
|
@ -10759,9 +10807,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
|
|||
if (is_dp_capable_without_timing_msa(adev->dm.dc,
|
||||
amdgpu_dm_connector)) {
|
||||
if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
|
||||
freesync_capable = true;
|
||||
amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
|
||||
amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
|
||||
if (amdgpu_dm_connector->max_vfreq -
|
||||
amdgpu_dm_connector->min_vfreq > 10)
|
||||
freesync_capable = true;
|
||||
} else {
|
||||
edid_check_required = edid->version > 1 ||
|
||||
(edid->version == 1 &&
|
||||
|
|
|
@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
&mode_lib->vba.UrgentBurstFactorLumaPre[k],
|
||||
&mode_lib->vba.UrgentBurstFactorChromaPre[k],
|
||||
&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
|
||||
|
||||
v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
|
||||
8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* AYA NEO KUN */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1600x2560_rightside_up,
|
||||
}, { /* Chuwi HiBook (CWI514) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
||||
|
|
|
@ -288,7 +288,6 @@ struct platform_driver dp_driver = {
|
|||
.remove = exynos_dp_remove,
|
||||
.driver = {
|
||||
.name = "exynos-dp",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = pm_ptr(&exynos_dp_pm_ops),
|
||||
.of_match_table = exynos_dp_match,
|
||||
},
|
||||
|
|
|
@ -926,6 +926,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
|
|||
of_node_put(private->comp_node[i]);
|
||||
}
|
||||
|
||||
static void mtk_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct mtk_drm_private *private = platform_get_drvdata(pdev);
|
||||
|
||||
drm_atomic_helper_shutdown(private->drm);
|
||||
}
|
||||
|
||||
static int mtk_drm_sys_prepare(struct device *dev)
|
||||
{
|
||||
struct mtk_drm_private *private = dev_get_drvdata(dev);
|
||||
|
@ -957,6 +964,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
|
|||
static struct platform_driver mtk_drm_platform_driver = {
|
||||
.probe = mtk_drm_probe,
|
||||
.remove_new = mtk_drm_remove,
|
||||
.shutdown = mtk_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "mediatek-drm",
|
||||
.pm = &mtk_drm_pm_ops,
|
||||
|
|
|
@ -641,7 +641,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
if (bo_va->it.start)
|
||||
if (bo_va->it.start && bo_va->bo)
|
||||
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
|
||||
|
||||
error_unlock:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI && MMU
|
||||
depends on X86 || ARM64
|
||||
depends on (X86 && HYPERVISOR_GUEST) || ARM64
|
||||
select DRM_TTM
|
||||
select DRM_TTM_HELPER
|
||||
select MAPPING_DIRTY_HELPERS
|
||||
|
|
|
@ -974,6 +974,8 @@ static const char *keys[KEY_MAX + 1] = {
|
|||
[KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable",
|
||||
[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
|
||||
[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
|
||||
[KEY_ACCESSIBILITY] = "Accessibility",
|
||||
[KEY_DO_NOT_DISTURB] = "DoNotDisturb",
|
||||
[KEY_DICTATE] = "Dictate",
|
||||
[KEY_MICMUTE] = "MicrophoneMute",
|
||||
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
|
||||
|
|
|
@ -418,6 +418,8 @@
|
|||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
|
||||
#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
|
||||
#define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
|
||||
#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
|
||||
#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
|
||||
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
|
||||
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
|
||||
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
|
||||
|
|
|
@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
|
||||
|
@ -833,9 +837,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
|||
break;
|
||||
}
|
||||
|
||||
if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
|
||||
switch (usage->hid & 0xf) {
|
||||
case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
|
||||
default: goto ignore;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
|
||||
switch (usage->hid & 0xf) {
|
||||
case 0x9: map_key_clear(KEY_MICMUTE); break;
|
||||
case 0xa: map_key_clear(KEY_ACCESSIBILITY); break;
|
||||
default: goto ignore;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -208,6 +208,7 @@ static const struct xpad_device {
|
|||
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
|
||||
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
|
||||
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
|
||||
{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
|
||||
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
|
||||
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
|
||||
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
|
||||
|
|
|
@ -1476,16 +1476,47 @@ static void elantech_disconnect(struct psmouse *psmouse)
|
|||
psmouse->private = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some hw_version 4 models fail to properly activate absolute mode on
|
||||
* resume without going through disable/enable cycle.
|
||||
*/
|
||||
static const struct dmi_system_id elantech_needs_reenable[] = {
|
||||
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
|
||||
{
|
||||
/* Lenovo N24 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
|
||||
},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
||||
/*
|
||||
* Put the touchpad back into absolute mode when reconnecting
|
||||
*/
|
||||
static int elantech_reconnect(struct psmouse *psmouse)
|
||||
{
|
||||
int err;
|
||||
|
||||
psmouse_reset(psmouse);
|
||||
|
||||
if (elantech_detect(psmouse, 0))
|
||||
return -1;
|
||||
|
||||
if (dmi_check_system(elantech_needs_reenable)) {
|
||||
err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
|
||||
if (err)
|
||||
psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
|
||||
psmouse->ps2dev.serio->phys, err);
|
||||
|
||||
err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
|
||||
if (err)
|
||||
psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
|
||||
psmouse->ps2dev.serio->phys, err);
|
||||
}
|
||||
|
||||
if (elantech_set_absolute_mode(psmouse)) {
|
||||
psmouse_err(psmouse,
|
||||
"failed to put touchpad back into absolute mode.\n");
|
||||
|
|
|
@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
|
|||
#define SERIO_QUIRK_PROBE_DEFER BIT(5)
|
||||
#define SERIO_QUIRK_RESET_ALWAYS BIT(6)
|
||||
#define SERIO_QUIRK_RESET_NEVER BIT(7)
|
||||
#define SERIO_QUIRK_DIECT BIT(8)
|
||||
#define SERIO_QUIRK_DIRECT BIT(8)
|
||||
#define SERIO_QUIRK_DUMBKBD BIT(9)
|
||||
#define SERIO_QUIRK_NOLOOP BIT(10)
|
||||
#define SERIO_QUIRK_NOTIMEOUT BIT(11)
|
||||
|
@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
|
|||
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
|
||||
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
|
||||
},
|
||||
{
|
||||
/*
|
||||
* The Ayaneo Kun is a handheld device where some the buttons
|
||||
* are handled by an AT keyboard. The keyboard is usually
|
||||
* detected as raw, but sometimes, usually after a cold boot,
|
||||
* it is detected as translated. Make sure that the keyboard
|
||||
* is always in raw mode.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "KUN"),
|
||||
},
|
||||
.driver_data = (void *)(SERIO_QUIRK_DIRECT)
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
|
|||
if (quirks & SERIO_QUIRK_RESET_NEVER)
|
||||
i8042_reset = I8042_RESET_NEVER;
|
||||
}
|
||||
if (quirks & SERIO_QUIRK_DIECT)
|
||||
if (quirks & SERIO_QUIRK_DIRECT)
|
||||
i8042_direct = true;
|
||||
if (quirks & SERIO_QUIRK_DUMBKBD)
|
||||
i8042_dumbkbd = true;
|
||||
|
|
|
@ -1114,6 +1114,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
|
||||
|
||||
static const struct spi_device_id ads7846_spi_ids[] = {
|
||||
{ "tsc2046", 7846 },
|
||||
{ "ads7843", 7843 },
|
||||
{ "ads7845", 7845 },
|
||||
{ "ads7846", 7846 },
|
||||
{ "ads7873", 7873 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
|
||||
|
||||
static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
|
||||
{
|
||||
struct ads7846_platform_data *pdata;
|
||||
|
@ -1392,10 +1402,10 @@ static struct spi_driver ads7846_driver = {
|
|||
},
|
||||
.probe = ads7846_probe,
|
||||
.remove = ads7846_remove,
|
||||
.id_table = ads7846_spi_ids,
|
||||
};
|
||||
|
||||
module_spi_driver(ads7846_driver);
|
||||
|
||||
MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("spi:ads7846");
|
||||
|
|
|
@ -71,7 +71,6 @@ struct silead_ts_data {
|
|||
struct regulator_bulk_data regulators[2];
|
||||
char fw_name[64];
|
||||
struct touchscreen_properties prop;
|
||||
u32 max_fingers;
|
||||
u32 chip_id;
|
||||
struct input_mt_pos pos[SILEAD_MAX_FINGERS];
|
||||
int slots[SILEAD_MAX_FINGERS];
|
||||
|
@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
|
|||
touchscreen_parse_properties(data->input, true, &data->prop);
|
||||
silead_apply_efi_fw_min_max(data);
|
||||
|
||||
input_mt_init_slots(data->input, data->max_fingers,
|
||||
input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
|
||||
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
|
||||
INPUT_MT_TRACK);
|
||||
|
||||
|
@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
|
|||
return;
|
||||
}
|
||||
|
||||
if (buf[0] > data->max_fingers) {
|
||||
if (buf[0] > SILEAD_MAX_FINGERS) {
|
||||
dev_warn(dev, "More touches reported then supported %d > %d\n",
|
||||
buf[0], data->max_fingers);
|
||||
buf[0] = data->max_fingers;
|
||||
buf[0], SILEAD_MAX_FINGERS);
|
||||
buf[0] = SILEAD_MAX_FINGERS;
|
||||
}
|
||||
|
||||
if (silead_ts_handle_pen_data(data, buf))
|
||||
|
@ -315,7 +314,6 @@ sync:
|
|||
|
||||
static int silead_ts_init(struct i2c_client *client)
|
||||
{
|
||||
struct silead_ts_data *data = i2c_get_clientdata(client);
|
||||
int error;
|
||||
|
||||
error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
|
||||
|
@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
|
|||
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
|
||||
|
||||
error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
|
||||
data->max_fingers);
|
||||
SILEAD_MAX_FINGERS);
|
||||
if (error)
|
||||
goto i2c_write_err;
|
||||
usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
|
||||
|
@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
|
|||
const char *str;
|
||||
int error;
|
||||
|
||||
error = device_property_read_u32(dev, "silead,max-fingers",
|
||||
&data->max_fingers);
|
||||
if (error) {
|
||||
dev_dbg(dev, "Max fingers read error %d\n", error);
|
||||
data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
|
||||
}
|
||||
|
||||
error = device_property_read_string(dev, "firmware-name", &str);
|
||||
if (!error)
|
||||
snprintf(data->fw_name, sizeof(data->fw_name),
|
||||
|
|
|
@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
|
|||
}
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
cl_err(dev, cl, "is not connected");
|
||||
cl_dbg(dev, cl, "is not connected");
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -292,7 +292,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
|
|||
}
|
||||
usb_free_urb(urb);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
|
||||
|
|
|
@ -2478,6 +2478,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
|
||||
|
||||
tx_buff = &tx_pool->tx_buff[bufidx];
|
||||
|
||||
/* Sanity checks on our free map to make sure it points to an index
|
||||
* that is not being occupied by another skb. If skb memory is
|
||||
* not freed then we see congestion control kick in and halt tx.
|
||||
*/
|
||||
if (unlikely(tx_buff->skb)) {
|
||||
dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
|
||||
skb_is_gso(skb) ? "tso_pool" : "tx_pool",
|
||||
queue_num, bufidx);
|
||||
dev_kfree_skb_any(tx_buff->skb);
|
||||
}
|
||||
|
||||
tx_buff->skb = skb;
|
||||
tx_buff->index = bufidx;
|
||||
tx_buff->pool_index = queue_num;
|
||||
|
|
|
@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
|
|||
} else if (lvl == NIX_TXSCH_LVL_TL4) {
|
||||
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
|
||||
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
|
||||
req->regval[0] = parent << 16;
|
||||
req->regval[0] = (u64)parent << 16;
|
||||
req->num_regs++;
|
||||
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
|
||||
req->regval[1] = dwrr_val;
|
||||
} else if (lvl == NIX_TXSCH_LVL_TL3) {
|
||||
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
|
||||
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
|
||||
req->regval[0] = parent << 16;
|
||||
req->regval[0] = (u64)parent << 16;
|
||||
req->num_regs++;
|
||||
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
|
||||
req->regval[1] = dwrr_val;
|
||||
|
@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
|
|||
} else if (lvl == NIX_TXSCH_LVL_TL2) {
|
||||
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
|
||||
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
|
||||
req->regval[0] = parent << 16;
|
||||
req->regval[0] = (u64)parent << 16;
|
||||
|
||||
req->num_regs++;
|
||||
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
|
||||
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
|
||||
req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
|
||||
|
||||
if (lvl == hw->txschq_link_cfg_lvl) {
|
||||
req->num_regs++;
|
||||
|
@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
|
|||
|
||||
req->num_regs++;
|
||||
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
|
||||
req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
|
||||
req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
|
||||
|
||||
req->num_regs++;
|
||||
req->reg[2] = NIX_AF_TL1X_CIR(schq);
|
||||
|
|
|
@ -139,33 +139,34 @@
|
|||
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
|
||||
|
||||
/* NIX AF transmit scheduler registers */
|
||||
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
|
||||
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
|
||||
#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
|
||||
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
|
||||
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
|
||||
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
|
||||
#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
|
||||
#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
|
||||
#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
|
||||
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
|
||||
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
|
||||
#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
|
||||
#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
|
||||
#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
|
||||
#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
|
||||
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
|
||||
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
|
||||
#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
|
||||
#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
|
||||
#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
|
||||
#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
|
||||
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
|
||||
#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
|
||||
#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
|
||||
#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
|
||||
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
|
||||
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
|
||||
#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
|
||||
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
|
||||
#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
|
||||
#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
|
||||
#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
|
||||
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
|
||||
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
|
||||
|
||||
/* LMT LF registers */
|
||||
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
|
||||
|
|
|
@ -510,7 +510,7 @@ process_cqe:
|
|||
|
||||
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
|
||||
{
|
||||
struct dim_sample dim_sample;
|
||||
struct dim_sample dim_sample = { 0 };
|
||||
u64 rx_frames, rx_bytes;
|
||||
u64 tx_frames, tx_bytes;
|
||||
|
||||
|
|
|
@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
|
|||
num_regs++;
|
||||
|
||||
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
|
||||
|
||||
} else if (level == NIX_TXSCH_LVL_TL4) {
|
||||
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
|
||||
} else if (level == NIX_TXSCH_LVL_TL3) {
|
||||
|
@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
|
|||
/* check if node is root */
|
||||
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
|
||||
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
|
||||
cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
|
||||
cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
|
||||
mtu_to_dwrr_weight(pfvf,
|
||||
pfvf->tx_max_pktlen);
|
||||
num_regs++;
|
||||
|
|
|
@ -1380,6 +1380,8 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
|
||||
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
|
||||
|
|
|
@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
|
|||
void *_data)
|
||||
{
|
||||
struct wowlan_key_gtk_type_iter *data = _data;
|
||||
__le32 *cipher = NULL;
|
||||
|
||||
if (key->keyidx == 4 || key->keyidx == 5)
|
||||
cipher = &data->kek_kck_cmd->igtk_cipher;
|
||||
if (key->keyidx == 6 || key->keyidx == 7)
|
||||
cipher = &data->kek_kck_cmd->bigtk_cipher;
|
||||
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
|
@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
|
|||
return;
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
if (cipher)
|
||||
*cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_AES_CMAC:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
||||
if (cipher)
|
||||
*cipher = cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
if (!sta)
|
||||
|
@ -2102,7 +2111,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
|
|||
|
||||
out:
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_GET_STATUSES, 0) < 10) {
|
||||
WOWLAN_GET_STATUSES,
|
||||
IWL_FW_CMD_VER_UNKNOWN) < 10) {
|
||||
mvmvif->seqno_valid = true;
|
||||
/* +0x10 because the set API expects next-to-use, not last-used */
|
||||
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
|
||||
|
|
|
@ -600,7 +600,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
|
||||
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
|
||||
IWL_FW_CMD_VER_UNKNOWN) == 3)
|
||||
IWL_FW_CMD_VER_UNKNOWN) >= 3)
|
||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
|
@ -1051,6 +1051,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
|
|||
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
|
||||
}
|
||||
|
||||
static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm *mvm = data;
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
struct ieee80211_vif *vif;
|
||||
int link_id;
|
||||
|
||||
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
vif = mvm_sta->vif;
|
||||
|
||||
if (!sta->valid_links)
|
||||
return;
|
||||
|
||||
for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
|
||||
struct iwl_mvm_link_sta *mvm_link_sta;
|
||||
|
||||
mvm_link_sta =
|
||||
rcu_dereference_check(mvm_sta->link[link_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
|
||||
/*
|
||||
* We have a link STA but the link is inactive in
|
||||
* mac80211. This will happen if we failed to
|
||||
* deactivate the link but mac80211 roll back the
|
||||
* deactivation of the link.
|
||||
* Delete the stale data to avoid issues later on.
|
||||
*/
|
||||
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
|
||||
link_id, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
@ -1073,6 +1106,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
|||
*/
|
||||
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
|
||||
|
||||
/* cleanup stations as links may be gone after restart */
|
||||
ieee80211_iterate_stations_atomic(mvm->hw,
|
||||
iwl_mvm_cleanup_sta_iterator, mvm);
|
||||
|
||||
mvm->p2p_device_vif = NULL;
|
||||
|
||||
iwl_mvm_reset_phy_ctxts(mvm);
|
||||
|
@ -6032,7 +6069,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
|||
.len[0] = sizeof(cmd),
|
||||
.data[1] = data,
|
||||
.len[1] = size,
|
||||
.flags = sync ? 0 : CMD_ASYNC,
|
||||
.flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
|
||||
};
|
||||
int ret;
|
||||
|
||||
|
@ -6057,11 +6094,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
|||
if (sync) {
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
ret = wait_event_timeout(mvm->rx_sync_waitq,
|
||||
READ_ONCE(mvm->queue_sync_state) == 0 ||
|
||||
iwl_mvm_is_radio_killed(mvm),
|
||||
READ_ONCE(mvm->queue_sync_state) == 0,
|
||||
HZ);
|
||||
WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
|
||||
"queue sync: failed to sync, state is 0x%lx\n",
|
||||
WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx\n",
|
||||
mvm->queue_sync_state);
|
||||
}
|
||||
|
||||
|
|
|
@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
struct iwl_mvm_link_sta *mvm_sta_link,
|
||||
unsigned int link_id,
|
||||
bool is_in_fw)
|
||||
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
struct iwl_mvm_link_sta *mvm_sta_link,
|
||||
unsigned int link_id,
|
||||
bool is_in_fw)
|
||||
{
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
|
||||
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
|
||||
|
@ -1007,7 +1007,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
|
|||
|
||||
cmd.modify.tid = cpu_to_le32(data->tid);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
|
||||
sizeof(cmd), &cmd);
|
||||
data->sta_mask = new_sta_mask;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -1718,7 +1718,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
|
|||
break;
|
||||
}
|
||||
|
||||
if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
|
||||
if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
|
||||
!WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
|
||||
"scan: invalid BSSID at index %u, index_b=%u\n",
|
||||
j, idex_b)) {
|
||||
memcpy(&pp->bssid_array[idex_b++],
|
||||
scan_6ghz_params[j].bssid, ETH_ALEN);
|
||||
}
|
||||
|
@ -3239,10 +3242,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
|
|||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
|
||||
0, sizeof(cmd), &cmd);
|
||||
CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
|
||||
if (!ret)
|
||||
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
|
||||
|
||||
IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2819,7 +2819,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
|
|||
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
|
||||
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
|
||||
};
|
||||
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
|
||||
.flags = CMD_SEND_IN_RFKILL,
|
||||
.len[0] = sizeof(cmd),
|
||||
.data[0] = &cmd,
|
||||
};
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
|
||||
|
@ -2831,7 +2836,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
|
|||
cmd.alloc.ssn = cpu_to_le16(ssn);
|
||||
cmd.alloc.win_size = cpu_to_le16(buf_size);
|
||||
baid = -EIO;
|
||||
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
|
||||
} else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
|
||||
cmd.remove_v1.baid = cpu_to_le32(baid);
|
||||
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
|
||||
} else {
|
||||
|
@ -2840,8 +2845,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
|
|||
cmd.remove.tid = cpu_to_le32(tid);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
|
||||
&cmd, &baid);
|
||||
ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -642,6 +642,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
struct ieee80211_sta *sta);
|
||||
int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
struct iwl_mvm_link_sta *mvm_sta_link,
|
||||
unsigned int link_id,
|
||||
bool is_in_fw);
|
||||
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
|
||||
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
|
|
|
@ -938,6 +938,7 @@ void nvme_cleanup_cmd(struct request *req)
|
|||
clear_bit_unlock(0, &ctrl->discard_page_busy);
|
||||
else
|
||||
kfree(bvec_virt(&req->special_vec));
|
||||
req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
|
||||
|
|
|
@ -485,7 +485,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
|
|||
enum nvme_ns_features {
|
||||
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
|
||||
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
|
||||
NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
|
||||
NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
|
||||
};
|
||||
|
||||
struct nvme_ns {
|
||||
|
|
|
@ -945,6 +945,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
|||
req->metadata_sg_cnt = 0;
|
||||
req->transfer_len = 0;
|
||||
req->metadata_len = 0;
|
||||
req->cqe->result.u64 = 0;
|
||||
req->cqe->status = 0;
|
||||
req->cqe->sq_head = 0;
|
||||
req->ns = NULL;
|
||||
|
|
|
@ -332,7 +332,6 @@ done:
|
|||
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
|
||||
__func__, ctrl->cntlid, req->sq->qid,
|
||||
status, req->error_loc);
|
||||
req->cqe->result.u64 = 0;
|
||||
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
|
||||
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
|
||||
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
|
||||
|
@ -515,8 +514,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
|
|||
status = nvmet_copy_to_sgl(req, 0, d, al);
|
||||
kfree(d);
|
||||
done:
|
||||
req->cqe->result.u64 = 0;
|
||||
|
||||
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
|
||||
nvmet_auth_sq_free(req->sq);
|
||||
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
|
||||
|
|
|
@ -225,9 +225,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
|||
if (status)
|
||||
goto out;
|
||||
|
||||
/* zero out initial completion result, assign values as needed */
|
||||
req->cqe->result.u32 = 0;
|
||||
|
||||
if (c->recfmt != 0) {
|
||||
pr_warn("invalid connect version (%d).\n",
|
||||
le16_to_cpu(c->recfmt));
|
||||
|
@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
|||
if (status)
|
||||
goto out;
|
||||
|
||||
/* zero out initial completion result, assign values as needed */
|
||||
req->cqe->result.u32 = 0;
|
||||
|
||||
if (c->recfmt != 0) {
|
||||
pr_warn("invalid connect version (%d).\n",
|
||||
le16_to_cpu(c->recfmt));
|
||||
|
|
143
drivers/of/irq.c
143
drivers/of/irq.c
|
@ -25,6 +25,8 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
/**
|
||||
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
|
||||
* @dev: Device node of the device whose interrupt is to be mapped
|
||||
|
@ -79,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
|
|||
/*
|
||||
* These interrupt controllers abuse interrupt-map for unspeakable
|
||||
* reasons and rely on the core code to *ignore* it (the drivers do
|
||||
* their own parsing of the property).
|
||||
* their own parsing of the property). The PAsemi entry covers a
|
||||
* non-sensical interrupt-map that is better left ignored.
|
||||
*
|
||||
* If you think of adding to the list for something *new*, think
|
||||
* again. There is a high chance that you will be sent back to the
|
||||
|
@ -93,9 +96,61 @@ static const char * const of_irq_imap_abusers[] = {
|
|||
"fsl,ls1043a-extirq",
|
||||
"fsl,ls1088a-extirq",
|
||||
"renesas,rza1-irqc",
|
||||
"pasemi,rootbus",
|
||||
NULL,
|
||||
};
|
||||
|
||||
const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
|
||||
{
|
||||
u32 intsize, addrsize;
|
||||
struct device_node *np;
|
||||
|
||||
/* Get the interrupt parent */
|
||||
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
|
||||
np = of_node_get(of_irq_dflt_pic);
|
||||
else
|
||||
np = of_find_node_by_phandle(be32_to_cpup(imap));
|
||||
imap++;
|
||||
|
||||
/* Check if not found */
|
||||
if (!np) {
|
||||
pr_debug(" -> imap parent not found !\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get #interrupt-cells and #address-cells of new parent */
|
||||
if (of_property_read_u32(np, "#interrupt-cells",
|
||||
&intsize)) {
|
||||
pr_debug(" -> parent lacks #interrupt-cells!\n");
|
||||
of_node_put(np);
|
||||
return NULL;
|
||||
}
|
||||
if (of_property_read_u32(np, "#address-cells",
|
||||
&addrsize))
|
||||
addrsize = 0;
|
||||
|
||||
pr_debug(" -> intsize=%d, addrsize=%d\n",
|
||||
intsize, addrsize);
|
||||
|
||||
/* Check for malformed properties */
|
||||
if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
|
||||
|| (len < (addrsize + intsize))) {
|
||||
of_node_put(np);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_debug(" -> imaplen=%d\n", len);
|
||||
|
||||
imap += addrsize + intsize;
|
||||
|
||||
out_irq->np = np;
|
||||
for (int i = 0; i < intsize; i++)
|
||||
out_irq->args[i] = be32_to_cpup(imap - intsize + i);
|
||||
out_irq->args_count = intsize;
|
||||
|
||||
return imap;
|
||||
}
|
||||
|
||||
/**
|
||||
* of_irq_parse_raw - Low level interrupt tree parsing
|
||||
* @addr: address specifier (start of "reg" property of the device) in be32 format
|
||||
|
@ -112,12 +167,12 @@ static const char * const of_irq_imap_abusers[] = {
|
|||
*/
|
||||
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
||||
{
|
||||
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
|
||||
struct device_node *ipar, *tnode, *old = NULL;
|
||||
__be32 initial_match_array[MAX_PHANDLE_ARGS];
|
||||
const __be32 *match_array = initial_match_array;
|
||||
const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
|
||||
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
|
||||
int imaplen, match, i, rc = -EINVAL;
|
||||
const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
|
||||
u32 intsize = 1, addrsize;
|
||||
int i, rc = -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
of_print_phandle_args("of_irq_parse_raw: ", out_irq);
|
||||
|
@ -176,6 +231,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||
|
||||
/* Now start the actual "proper" walk of the interrupt tree */
|
||||
while (ipar != NULL) {
|
||||
int imaplen, match;
|
||||
const __be32 *imap, *oldimap, *imask;
|
||||
struct device_node *newpar;
|
||||
/*
|
||||
* Now check if cursor is an interrupt-controller and
|
||||
* if it is then we are done, unless there is an
|
||||
|
@ -216,7 +274,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||
|
||||
/* Parse interrupt-map */
|
||||
match = 0;
|
||||
while (imaplen > (addrsize + intsize + 1) && !match) {
|
||||
while (imaplen > (addrsize + intsize + 1)) {
|
||||
/* Compare specifiers */
|
||||
match = 1;
|
||||
for (i = 0; i < (addrsize + intsize); i++, imaplen--)
|
||||
|
@ -224,74 +282,31 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||
|
||||
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
|
||||
|
||||
/* Get the interrupt parent */
|
||||
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
|
||||
newpar = of_node_get(of_irq_dflt_pic);
|
||||
else
|
||||
newpar = of_find_node_by_phandle(be32_to_cpup(imap));
|
||||
imap++;
|
||||
--imaplen;
|
||||
|
||||
/* Check if not found */
|
||||
if (newpar == NULL) {
|
||||
pr_debug(" -> imap parent not found !\n");
|
||||
oldimap = imap;
|
||||
imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
|
||||
if (!imap)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(newpar))
|
||||
match = 0;
|
||||
|
||||
/* Get #interrupt-cells and #address-cells of new
|
||||
* parent
|
||||
*/
|
||||
if (of_property_read_u32(newpar, "#interrupt-cells",
|
||||
&newintsize)) {
|
||||
pr_debug(" -> parent lacks #interrupt-cells!\n");
|
||||
goto fail;
|
||||
}
|
||||
if (of_property_read_u32(newpar, "#address-cells",
|
||||
&newaddrsize))
|
||||
newaddrsize = 0;
|
||||
|
||||
pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
|
||||
newintsize, newaddrsize);
|
||||
|
||||
/* Check for malformed properties */
|
||||
if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
|
||||
|| (imaplen < (newaddrsize + newintsize))) {
|
||||
rc = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
imap += newaddrsize + newintsize;
|
||||
imaplen -= newaddrsize + newintsize;
|
||||
match &= of_device_is_available(out_irq->np);
|
||||
if (match)
|
||||
break;
|
||||
|
||||
of_node_put(out_irq->np);
|
||||
imaplen -= imap - oldimap;
|
||||
pr_debug(" -> imaplen=%d\n", imaplen);
|
||||
}
|
||||
if (!match) {
|
||||
if (intc) {
|
||||
/*
|
||||
* The PASEMI Nemo is a known offender, so
|
||||
* let's only warn for anyone else.
|
||||
*/
|
||||
WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
|
||||
"%pOF interrupt-map failed, using interrupt-controller\n",
|
||||
ipar);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!match)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Successfully parsed an interrupt-map translation; copy new
|
||||
* interrupt specifier into the out_irq structure
|
||||
*/
|
||||
match_array = imap - newaddrsize - newintsize;
|
||||
for (i = 0; i < newintsize; i++)
|
||||
out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
|
||||
out_irq->args_count = intsize = newintsize;
|
||||
addrsize = newaddrsize;
|
||||
match_array = oldimap + 1;
|
||||
|
||||
newpar = out_irq->np;
|
||||
intsize = out_irq->args_count;
|
||||
addrsize = (imap - match_array) - intsize;
|
||||
|
||||
if (ipar == newpar) {
|
||||
pr_debug("%pOF interrupt-map entry to self\n", ipar);
|
||||
|
@ -300,7 +315,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||
|
||||
skiplevel:
|
||||
/* Iterate again with new parent */
|
||||
out_irq->np = newpar;
|
||||
pr_debug(" -> new parent: %pOF\n", newpar);
|
||||
of_node_put(ipar);
|
||||
ipar = newpar;
|
||||
|
@ -310,7 +324,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||
|
||||
fail:
|
||||
of_node_put(ipar);
|
||||
of_node_put(newpar);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -158,6 +158,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
|
|||
extern int of_bus_n_addr_cells(struct device_node *np);
|
||||
extern int of_bus_n_size_cells(struct device_node *np);
|
||||
|
||||
const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
|
||||
struct of_phandle_args *out_irq);
|
||||
|
||||
struct bus_dma_region;
|
||||
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
|
||||
int of_dma_get_range(struct device_node *np,
|
||||
|
|
|
@ -611,7 +611,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
|
|||
* which may include counters that are not enabled yet.
|
||||
*/
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
0, pmu->cmask, 0, 0, 0, 0);
|
||||
0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
|
||||
|
|
|
@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
|
|||
static int nvsw_sn2201_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct nvsw_sn2201 *nvsw_sn2201;
|
||||
int ret;
|
||||
|
||||
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
|
||||
if (!nvsw_sn2201)
|
||||
|
@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
|
|||
|
||||
nvsw_sn2201->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, nvsw_sn2201);
|
||||
platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
|
||||
ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
|
||||
ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
|
||||
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
|
||||
|
|
|
@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
|
|||
#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
|
||||
#define WMI_EVENT_GUID WMI_EVENT_GUID0
|
||||
|
||||
#define WMAB_METHOD "\\XINI.WMAB"
|
||||
#define WMBB_METHOD "\\XINI.WMBB"
|
||||
#define SB_GGOV_METHOD "\\_SB.GGOV"
|
||||
#define GOV_TLED 0x2020008
|
||||
#define WM_GET 1
|
||||
|
@ -74,7 +72,7 @@ static u32 inited;
|
|||
|
||||
static int battery_limit_use_wmbb;
|
||||
static struct led_classdev kbd_backlight;
|
||||
static enum led_brightness get_kbd_backlight_level(void);
|
||||
static enum led_brightness get_kbd_backlight_level(struct device *dev);
|
||||
|
||||
static const struct key_entry wmi_keymap[] = {
|
||||
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
|
||||
|
@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
|
|||
* this key both sends an event and
|
||||
* changes backlight level.
|
||||
*/
|
||||
{KE_KEY, 0x80, {KEY_RFKILL} },
|
||||
{KE_END, 0}
|
||||
};
|
||||
|
||||
|
@ -128,11 +125,10 @@ static int ggov(u32 arg0)
|
|||
return res;
|
||||
}
|
||||
|
||||
static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
|
||||
static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
|
||||
{
|
||||
union acpi_object args[3];
|
||||
acpi_status status;
|
||||
acpi_handle handle;
|
||||
struct acpi_object_list arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
|
||||
|
@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
|
|||
args[2].type = ACPI_TYPE_INTEGER;
|
||||
args[2].integer.value = arg2;
|
||||
|
||||
status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_err("Cannot get handle");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
arg.count = 3;
|
||||
arg.pointer = args;
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
|
||||
status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_handle_err(handle, "WMAB: call failed.\n");
|
||||
dev_err(dev, "WMAB: call failed.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
}
|
||||
|
||||
static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
|
||||
static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
|
||||
{
|
||||
union acpi_object args[3];
|
||||
acpi_status status;
|
||||
acpi_handle handle;
|
||||
struct acpi_object_list arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
u8 buf[32];
|
||||
|
@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
|
|||
args[2].buffer.length = 32;
|
||||
args[2].buffer.pointer = buf;
|
||||
|
||||
status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_err("Cannot get handle");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
arg.count = 3;
|
||||
arg.pointer = args;
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
|
||||
status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_handle_err(handle, "WMAB: call failed.\n");
|
||||
dev_err(dev, "WMBB: call failed.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
|
|||
|
||||
if (eventcode == 0x10000000) {
|
||||
led_classdev_notify_brightness_hw_changed(
|
||||
&kbd_backlight, get_kbd_backlight_level());
|
||||
&kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
|
||||
} else {
|
||||
key = sparse_keymap_entry_from_scancode(
|
||||
wmi_input_dev, eventcode);
|
||||
|
@ -272,14 +255,7 @@ static void wmi_input_setup(void)
|
|||
|
||||
static void acpi_notify(struct acpi_device *device, u32 event)
|
||||
{
|
||||
struct key_entry *key;
|
||||
|
||||
acpi_handle_debug(device->handle, "notify: %d\n", event);
|
||||
if (inited & INIT_SPARSE_KEYMAP) {
|
||||
key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
|
||||
if (key && key->type == KE_KEY)
|
||||
sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t fan_mode_store(struct device *dev,
|
||||
|
@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
|
||||
r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
|
|||
|
||||
m = r->integer.value;
|
||||
kfree(r);
|
||||
r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
|
||||
r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
|
||||
kfree(r);
|
||||
r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
|
||||
r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
|
||||
kfree(r);
|
||||
|
||||
return count;
|
||||
|
@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
|
|||
unsigned int status;
|
||||
union acpi_object *r;
|
||||
|
||||
r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
|
||||
r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
|
||||
r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
|
|||
unsigned int status;
|
||||
union acpi_object *r;
|
||||
|
||||
r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
|
||||
r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
r = lg_wmab(WM_READER_MODE, WM_SET, value);
|
||||
r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
|
|||
unsigned int status;
|
||||
union acpi_object *r;
|
||||
|
||||
r = lg_wmab(WM_READER_MODE, WM_GET, 0);
|
||||
r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
r = lg_wmab(WM_FN_LOCK, WM_SET, value);
|
||||
r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
|
|||
unsigned int status;
|
||||
union acpi_object *r;
|
||||
|
||||
r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
|
||||
r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
|
|||
union acpi_object *r;
|
||||
|
||||
if (battery_limit_use_wmbb)
|
||||
r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
|
||||
r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
|
||||
else
|
||||
r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
|
||||
r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
|
|||
union acpi_object *r;
|
||||
|
||||
if (battery_limit_use_wmbb) {
|
||||
r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
|
||||
r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
|
|||
|
||||
status = r->buffer.pointer[0x10];
|
||||
} else {
|
||||
r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
|
||||
r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
|
||||
if (!r)
|
||||
return -EIO;
|
||||
|
||||
|
@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
|
|||
{
|
||||
union acpi_object *r;
|
||||
|
||||
r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
|
||||
r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
|
||||
kfree(r);
|
||||
}
|
||||
|
||||
|
@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
|
|||
val = 0;
|
||||
if (brightness >= LED_FULL)
|
||||
val = 0x24;
|
||||
r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
|
||||
r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
|
||||
kfree(r);
|
||||
}
|
||||
|
||||
static enum led_brightness get_kbd_backlight_level(void)
|
||||
static enum led_brightness get_kbd_backlight_level(struct device *dev)
|
||||
{
|
||||
union acpi_object *r;
|
||||
int val;
|
||||
|
||||
r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
|
||||
r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
|
||||
|
||||
if (!r)
|
||||
return LED_OFF;
|
||||
|
@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
|
|||
|
||||
static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
|
||||
{
|
||||
return get_kbd_backlight_level();
|
||||
return get_kbd_backlight_level(cdev->dev->parent);
|
||||
}
|
||||
|
||||
static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
|
||||
|
@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
|
|||
|
||||
static int acpi_add(struct acpi_device *device)
|
||||
{
|
||||
struct platform_device_info pdev_info = {
|
||||
.fwnode = acpi_fwnode_handle(device),
|
||||
.name = PLATFORM_NAME,
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
};
|
||||
int ret;
|
||||
const char *product;
|
||||
int year = 2017;
|
||||
|
@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pf_device = platform_device_register_simple(PLATFORM_NAME,
|
||||
PLATFORM_DEVID_NONE,
|
||||
NULL, 0);
|
||||
pf_device = platform_device_register_full(&pdev_info);
|
||||
if (IS_ERR(pf_device)) {
|
||||
ret = PTR_ERR(pf_device);
|
||||
pf_device = NULL;
|
||||
|
@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
|
|||
}
|
||||
|
||||
static const struct acpi_device_id device_ids[] = {
|
||||
{"LGEX0815", 0},
|
||||
{"LGEX0820", 0},
|
||||
{"", 0}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, device_ids);
|
||||
|
|
|
@ -19,6 +19,7 @@ MODULE_AUTHOR("Alex Hung");
|
|||
MODULE_ALIAS("acpi*:HPQ6001:*");
|
||||
MODULE_ALIAS("acpi*:WSTADEF:*");
|
||||
MODULE_ALIAS("acpi*:AMDI0051:*");
|
||||
MODULE_ALIAS("acpi*:LGEX0815:*");
|
||||
|
||||
struct wl_button {
|
||||
struct input_dev *input_dev;
|
||||
|
@ -29,6 +30,7 @@ static const struct acpi_device_id wl_ids[] = {
|
|||
{"HPQ6001", 0},
|
||||
{"WSTADEF", 0},
|
||||
{"AMDI0051", 0},
|
||||
{"LGEX0815", 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
|
|
|
@ -1293,6 +1293,7 @@ sclp_init(void)
|
|||
fail_unregister_reboot_notifier:
|
||||
unregister_reboot_notifier(&sclp_reboot_notifier);
|
||||
fail_init_state_uninitialized:
|
||||
list_del(&sclp_state_change_event.list);
|
||||
sclp_init_state = sclp_init_state_uninitialized;
|
||||
free_page((unsigned long) sclp_read_sccb);
|
||||
free_page((unsigned long) sclp_init_sccb);
|
||||
|
|
|
@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
|
|||
}
|
||||
}
|
||||
|
||||
static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
|
||||
struct scsi_sense_hdr *sense_hdr)
|
||||
static void alua_handle_state_transition(struct scsi_device *sdev)
|
||||
{
|
||||
struct alua_dh_data *h = sdev->handler_data;
|
||||
struct alua_port_group *pg;
|
||||
|
||||
rcu_read_lock();
|
||||
pg = rcu_dereference(h->pg);
|
||||
if (pg)
|
||||
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
||||
rcu_read_unlock();
|
||||
alua_check(sdev, false);
|
||||
}
|
||||
|
||||
static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
|
||||
struct scsi_sense_hdr *sense_hdr)
|
||||
{
|
||||
switch (sense_hdr->sense_key) {
|
||||
case NOT_READY:
|
||||
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
|
||||
/*
|
||||
* LUN Not Accessible - ALUA state transition
|
||||
*/
|
||||
rcu_read_lock();
|
||||
pg = rcu_dereference(h->pg);
|
||||
if (pg)
|
||||
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
||||
rcu_read_unlock();
|
||||
alua_check(sdev, false);
|
||||
alua_handle_state_transition(sdev);
|
||||
return NEEDS_RETRY;
|
||||
}
|
||||
break;
|
||||
case UNIT_ATTENTION:
|
||||
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
|
||||
/*
|
||||
* LUN Not Accessible - ALUA state transition
|
||||
*/
|
||||
alua_handle_state_transition(sdev);
|
||||
return NEEDS_RETRY;
|
||||
}
|
||||
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
|
||||
/*
|
||||
* Power On, Reset, or Bus Device Reset.
|
||||
|
@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
|
|||
|
||||
retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
|
||||
ALUA_FAILOVER_RETRIES, &sense_hdr);
|
||||
if (sense_hdr.sense_key == NOT_READY &&
|
||||
if ((sense_hdr.sense_key == NOT_READY ||
|
||||
sense_hdr.sense_key == UNIT_ATTENTION) &&
|
||||
sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
|
||||
return SCSI_DH_RETRY;
|
||||
else if (retval)
|
||||
|
|
|
@ -133,6 +133,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
|
|||
func, dev->parent ? "exp-attached" :
|
||||
"direct-attached",
|
||||
SAS_ADDR(dev->sas_addr), err);
|
||||
|
||||
/*
|
||||
* If the device probe failed, the expander phy attached address
|
||||
* needs to be reset so that the phy will not be treated as flutter
|
||||
* in the next revalidation
|
||||
*/
|
||||
if (dev->parent && !dev_is_expander(dev->dev_type)) {
|
||||
struct sas_phy *phy = dev->phy;
|
||||
struct domain_device *parent = dev->parent;
|
||||
struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
|
||||
|
||||
memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
}
|
||||
|
||||
sas_unregister_dev(dev->port, dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -362,6 +362,7 @@ struct qedf_ctx {
|
|||
#define QEDF_IN_RECOVERY 5
|
||||
#define QEDF_DBG_STOP_IO 6
|
||||
#define QEDF_PROBING 8
|
||||
#define QEDF_STAG_IN_PROGRESS 9
|
||||
unsigned long flags; /* Miscellaneous state flags */
|
||||
int fipvlan_retries;
|
||||
u8 num_queues;
|
||||
|
|
|
@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
|
|||
*/
|
||||
if (resp == fc_lport_flogi_resp) {
|
||||
qedf->flogi_cnt++;
|
||||
qedf->flogi_pending++;
|
||||
|
||||
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
|
||||
qedf->flogi_pending = 0;
|
||||
}
|
||||
|
||||
if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
|
||||
schedule_delayed_work(&qedf->stag_work, 2);
|
||||
return NULL;
|
||||
}
|
||||
qedf->flogi_pending++;
|
||||
|
||||
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
|
||||
arg, timeout);
|
||||
}
|
||||
|
@ -911,13 +918,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
|||
struct qedf_ctx *qedf;
|
||||
struct qed_link_output if_link;
|
||||
|
||||
qedf = lport_priv(lport);
|
||||
|
||||
if (lport->vport) {
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
qedf = lport_priv(lport);
|
||||
|
||||
qedf->flogi_pending = 0;
|
||||
/* For host reset, essentially do a soft link up/down */
|
||||
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
|
||||
|
@ -937,6 +945,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
|||
if (!if_link.link_up) {
|
||||
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
|
||||
"Physical link is not up.\n");
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
return;
|
||||
}
|
||||
/* Flush and wait to make sure link down is processed */
|
||||
|
@ -949,6 +958,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
|||
"Queue link up work.\n");
|
||||
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
|
||||
0);
|
||||
clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
}
|
||||
|
||||
/* Reset the host by gracefully logging out and then logging back in */
|
||||
|
@ -3462,6 +3472,7 @@ retry_probe:
|
|||
}
|
||||
|
||||
/* Start the Slowpath-process */
|
||||
memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
|
||||
slowpath_params.int_mode = QED_INT_MODE_MSIX;
|
||||
slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
|
||||
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
|
||||
|
@ -3720,6 +3731,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
|
|||
{
|
||||
struct qedf_ctx *qedf;
|
||||
int rc;
|
||||
int cnt = 0;
|
||||
|
||||
if (!pdev) {
|
||||
QEDF_ERR(NULL, "pdev is NULL.\n");
|
||||
|
@ -3737,6 +3749,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
|
|||
return;
|
||||
}
|
||||
|
||||
stag_in_prog:
|
||||
if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
|
||||
cnt++;
|
||||
|
||||
if (cnt < 5) {
|
||||
msleep(500);
|
||||
goto stag_in_prog;
|
||||
}
|
||||
}
|
||||
|
||||
if (mode != QEDF_MODE_RECOVERY)
|
||||
set_bit(QEDF_UNLOADING, &qedf->flags);
|
||||
|
||||
|
@ -3996,6 +4019,24 @@ void qedf_stag_change_work(struct work_struct *work)
|
|||
struct qedf_ctx *qedf =
|
||||
container_of(work, struct qedf_ctx, stag_work.work);
|
||||
|
||||
if (!qedf) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx,
|
||||
"Already is in recovery, hence not calling software context reset.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
|
||||
|
||||
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
|
||||
dev_name(&qedf->pdev->dev), __func__, __LINE__,
|
||||
qedf->dbg_ctx.host_no);
|
||||
|
|
|
@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
|
|||
int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
|
||||
int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
|
||||
int sr_reset(struct cdrom_device_info *);
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, int speed);
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
|
||||
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
|
||||
|
||||
int sr_is_xa(Scsi_CD *);
|
||||
|
|
|
@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, int speed)
|
||||
int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
|
||||
{
|
||||
Scsi_CD *cd = cdi->handle;
|
||||
struct packet_command cgc;
|
||||
|
||||
/* avoid exceeding the max speed or overflowing integer bounds */
|
||||
speed = clamp(0, speed, 0xffff / 177);
|
||||
|
||||
if (speed == 0)
|
||||
speed = 0xffff; /* set to max */
|
||||
else
|
||||
|
|
|
@ -1050,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
|
|||
.rx_available = mx31_rx_available,
|
||||
.reset = mx31_reset,
|
||||
.fifo_size = 8,
|
||||
.has_dmamode = true,
|
||||
.has_dmamode = false,
|
||||
.dynamic_burst = false,
|
||||
.has_targetmode = false,
|
||||
.devtype = IMX35_CSPI,
|
||||
|
|
|
@ -156,6 +156,7 @@ static int spi_mux_probe(struct spi_device *spi)
|
|||
/* supported modes are the same as our parent's */
|
||||
ctlr->mode_bits = spi->controller->mode_bits;
|
||||
ctlr->flags = spi->controller->flags;
|
||||
ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask;
|
||||
ctlr->transfer_one_message = spi_mux_transfer_one_message;
|
||||
ctlr->setup = spi_mux_setup;
|
||||
ctlr->num_chipselect = mux_control_states(priv->mux);
|
||||
|
|
|
@ -4000,7 +4000,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
|
|||
return -EINVAL;
|
||||
if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
|
||||
xfer->tx_nbits != SPI_NBITS_DUAL &&
|
||||
xfer->tx_nbits != SPI_NBITS_QUAD)
|
||||
xfer->tx_nbits != SPI_NBITS_QUAD &&
|
||||
xfer->tx_nbits != SPI_NBITS_OCTAL)
|
||||
return -EINVAL;
|
||||
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
|
||||
!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
|
||||
|
@ -4015,7 +4016,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
|
|||
return -EINVAL;
|
||||
if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
|
||||
xfer->rx_nbits != SPI_NBITS_DUAL &&
|
||||
xfer->rx_nbits != SPI_NBITS_QUAD)
|
||||
xfer->rx_nbits != SPI_NBITS_QUAD &&
|
||||
xfer->rx_nbits != SPI_NBITS_OCTAL)
|
||||
return -EINVAL;
|
||||
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
|
||||
!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
|
||||
|
|
|
@ -657,7 +657,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
|
|||
const struct ffa_ops *ops)
|
||||
{
|
||||
const struct ffa_msg_ops *msg_ops = ops->msg_ops;
|
||||
struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
|
||||
struct ffa_send_direct_data data = {
|
||||
.data0 = OPTEE_FFA_GET_API_VERSION,
|
||||
};
|
||||
int rc;
|
||||
|
||||
msg_ops->mode_32bit_set(ffa_dev);
|
||||
|
@ -674,7 +676,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
|
|||
return false;
|
||||
}
|
||||
|
||||
data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
|
||||
data = (struct ffa_send_direct_data){
|
||||
.data0 = OPTEE_FFA_GET_OS_VERSION,
|
||||
};
|
||||
rc = msg_ops->sync_send_receive(ffa_dev, &data);
|
||||
if (rc) {
|
||||
pr_err("Unexpected error %d\n", rc);
|
||||
|
@ -694,7 +698,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
|
|||
u32 *sec_caps,
|
||||
unsigned int *rpc_param_count)
|
||||
{
|
||||
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
|
||||
struct ffa_send_direct_data data = {
|
||||
.data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
|
||||
};
|
||||
int rc;
|
||||
|
||||
rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
|
||||
|
|
|
@ -496,7 +496,7 @@ static void afs_extend_writeback(struct address_space *mapping,
|
|||
if (folio_index(folio) != index)
|
||||
break;
|
||||
|
||||
if (!folio_try_get_rcu(folio)) {
|
||||
if (!folio_try_get(folio)) {
|
||||
xas_reset(&xas);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1228,7 +1228,7 @@ out:
|
|||
|
||||
int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_root *quota_root = NULL;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1323,9 +1323,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
|
|||
btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
|
||||
quota_root->node, 0, 1);
|
||||
|
||||
btrfs_put_root(quota_root);
|
||||
|
||||
out:
|
||||
btrfs_put_root(quota_root);
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
if (ret && trans)
|
||||
btrfs_end_transaction(trans);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/namei.h>
|
||||
#include <trace/events/fscache.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
|
@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
|
|||
}
|
||||
|
||||
/*
|
||||
* Withdraw volumes.
|
||||
* Withdraw fscache volumes.
|
||||
*/
|
||||
static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
|
||||
{
|
||||
struct list_head *cur;
|
||||
struct cachefiles_volume *volume;
|
||||
struct fscache_volume *vcookie;
|
||||
|
||||
_enter("");
|
||||
retry:
|
||||
spin_lock(&cache->object_list_lock);
|
||||
list_for_each(cur, &cache->volumes) {
|
||||
volume = list_entry(cur, struct cachefiles_volume, cache_link);
|
||||
|
||||
if (atomic_read(&volume->vcookie->n_accesses) == 0)
|
||||
continue;
|
||||
|
||||
vcookie = fscache_try_get_volume(volume->vcookie,
|
||||
fscache_volume_get_withdraw);
|
||||
if (vcookie) {
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
fscache_withdraw_volume(vcookie);
|
||||
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Withdraw cachefiles volumes.
|
||||
*/
|
||||
static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
|
||||
{
|
||||
_enter("");
|
||||
|
||||
for (;;) {
|
||||
struct fscache_volume *vcookie = NULL;
|
||||
struct cachefiles_volume *volume = NULL;
|
||||
|
||||
spin_lock(&cache->object_list_lock);
|
||||
if (!list_empty(&cache->volumes)) {
|
||||
volume = list_first_entry(&cache->volumes,
|
||||
struct cachefiles_volume, cache_link);
|
||||
vcookie = fscache_try_get_volume(volume->vcookie,
|
||||
fscache_volume_get_withdraw);
|
||||
if (!vcookie) {
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
cpu_relax();
|
||||
continue;
|
||||
}
|
||||
list_del_init(&volume->cache_link);
|
||||
}
|
||||
spin_unlock(&cache->object_list_lock);
|
||||
|
@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
|
|||
break;
|
||||
|
||||
cachefiles_withdraw_volume(volume);
|
||||
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
|
@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
|
|||
pr_info("File cache on %s unregistering\n", fscache->name);
|
||||
|
||||
fscache_withdraw_cache(fscache);
|
||||
cachefiles_withdraw_fscache_volumes(cache);
|
||||
|
||||
/* we now have to destroy all the active objects pertaining to this
|
||||
* cache - which we do by passing them off to thread pool to be
|
||||
|
|
|
@ -97,12 +97,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
|
|||
}
|
||||
|
||||
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
unsigned long id)
|
||||
{
|
||||
struct cachefiles_object *object = filp->private_data;
|
||||
struct cachefiles_cache *cache = object->volume->cache;
|
||||
struct cachefiles_req *req;
|
||||
unsigned long id;
|
||||
XA_STATE(xas, &cache->reqs, id);
|
||||
|
||||
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
|
||||
return -EINVAL;
|
||||
|
@ -110,10 +110,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
|
|||
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
id = arg;
|
||||
req = xa_erase(&cache->reqs, id);
|
||||
if (!req)
|
||||
xa_lock(&cache->reqs);
|
||||
req = xas_load(&xas);
|
||||
if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
|
||||
req->object != object) {
|
||||
xa_unlock(&cache->reqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
xas_store(&xas, NULL);
|
||||
xa_unlock(&cache->reqs);
|
||||
|
||||
trace_cachefiles_ondemand_cread(object, id);
|
||||
complete(&req->done);
|
||||
|
@ -142,6 +147,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
|||
unsigned long id;
|
||||
long size;
|
||||
int ret;
|
||||
XA_STATE(xas, &cache->reqs, 0);
|
||||
|
||||
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -165,10 +171,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
req = xa_erase(&cache->reqs, id);
|
||||
if (!req)
|
||||
xa_lock(&cache->reqs);
|
||||
xas.xa_index = id;
|
||||
req = xas_load(&xas);
|
||||
if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
|
||||
!req->object->ondemand->ondemand_id) {
|
||||
xa_unlock(&cache->reqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
xas_store(&xas, NULL);
|
||||
xa_unlock(&cache->reqs);
|
||||
|
||||
info = req->object->ondemand;
|
||||
/* fail OPEN request if copen format is invalid */
|
||||
ret = kstrtol(psize, 0, &size);
|
||||
if (ret) {
|
||||
|
@ -188,7 +202,6 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
|||
goto out;
|
||||
}
|
||||
|
||||
info = req->object->ondemand;
|
||||
spin_lock(&info->lock);
|
||||
/*
|
||||
* The anonymous fd was closed before copen ? Fail the request.
|
||||
|
@ -228,6 +241,11 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
|
|||
wake_up_all(&cache->daemon_pollwq);
|
||||
|
||||
out:
|
||||
spin_lock(&info->lock);
|
||||
/* Need to set object close to avoid reopen status continuing */
|
||||
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
|
||||
cachefiles_ondemand_set_object_close(req->object);
|
||||
spin_unlock(&info->lock);
|
||||
complete(&req->done);
|
||||
return ret;
|
||||
}
|
||||
|
@ -362,6 +380,20 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
|
||||
struct xa_state *xas, int err)
|
||||
{
|
||||
if (unlikely(!xas || !req))
|
||||
return false;
|
||||
|
||||
if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
|
||||
return false;
|
||||
|
||||
req->error = err;
|
||||
complete(&req->done);
|
||||
return true;
|
||||
}
|
||||
|
||||
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
||||
char __user *_buffer, size_t buflen)
|
||||
{
|
||||
|
@ -425,16 +457,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
|
|||
out:
|
||||
cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
|
||||
/* Remove error request and CLOSE request has no reply */
|
||||
if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
|
||||
xas_reset(&xas);
|
||||
xas_lock(&xas);
|
||||
if (xas_load(&xas) == req) {
|
||||
req->error = ret;
|
||||
complete(&req->done);
|
||||
xas_store(&xas, NULL);
|
||||
}
|
||||
xas_unlock(&xas);
|
||||
}
|
||||
if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
|
||||
cachefiles_ondemand_finish_req(req, &xas, ret);
|
||||
cachefiles_req_put(req);
|
||||
return ret ? ret : n;
|
||||
}
|
||||
|
@ -539,8 +563,18 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
|
|||
goto out;
|
||||
|
||||
wake_up_all(&cache->daemon_pollwq);
|
||||
wait_for_completion(&req->done);
|
||||
ret = req->error;
|
||||
wait:
|
||||
ret = wait_for_completion_killable(&req->done);
|
||||
if (!ret) {
|
||||
ret = req->error;
|
||||
} else {
|
||||
ret = -EINTR;
|
||||
if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
|
||||
/* Someone will complete it soon. */
|
||||
cpu_relax();
|
||||
goto wait;
|
||||
}
|
||||
}
|
||||
cachefiles_req_put(req);
|
||||
return ret;
|
||||
out:
|
||||
|
|
|
@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
|
|||
|
||||
void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
|
||||
{
|
||||
fscache_withdraw_volume(volume->vcookie);
|
||||
cachefiles_set_volume_xattr(volume);
|
||||
__cachefiles_free_volume(volume);
|
||||
}
|
||||
|
|
31
fs/dcache.c
31
fs/dcache.c
|
@ -3208,28 +3208,25 @@ EXPORT_SYMBOL(d_splice_alias);
|
|||
|
||||
bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
|
||||
{
|
||||
bool result;
|
||||
bool subdir;
|
||||
unsigned seq;
|
||||
|
||||
if (new_dentry == old_dentry)
|
||||
return true;
|
||||
|
||||
do {
|
||||
/* for restarting inner loop in case of seq retry */
|
||||
seq = read_seqbegin(&rename_lock);
|
||||
/*
|
||||
* Need rcu_readlock to protect against the d_parent trashing
|
||||
* due to d_move
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (d_ancestor(old_dentry, new_dentry))
|
||||
result = true;
|
||||
else
|
||||
result = false;
|
||||
rcu_read_unlock();
|
||||
} while (read_seqretry(&rename_lock, seq));
|
||||
|
||||
return result;
|
||||
/* Access d_parent under rcu as d_move() may change it. */
|
||||
rcu_read_lock();
|
||||
seq = read_seqbegin(&rename_lock);
|
||||
subdir = d_ancestor(old_dentry, new_dentry);
|
||||
/* Try lockless once... */
|
||||
if (read_seqretry(&rename_lock, seq)) {
|
||||
/* ...else acquire lock for progress even on deep chains. */
|
||||
read_seqlock_excl(&rename_lock);
|
||||
subdir = d_ancestor(old_dentry, new_dentry);
|
||||
read_sequnlock_excl(&rename_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return subdir;
|
||||
}
|
||||
EXPORT_SYMBOL(is_subdir);
|
||||
|
||||
|
|
|
@ -723,6 +723,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
|
|||
|
||||
err = z_erofs_do_map_blocks(inode, map, flags);
|
||||
out:
|
||||
if (err)
|
||||
map->m_llen = 0;
|
||||
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -481,12 +481,12 @@ struct files_struct init_files = {
|
|||
|
||||
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
|
||||
{
|
||||
unsigned int maxfd = fdt->max_fds;
|
||||
unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
|
||||
unsigned int maxbit = maxfd / BITS_PER_LONG;
|
||||
unsigned int bitbit = start / BITS_PER_LONG;
|
||||
|
||||
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
|
||||
if (bitbit > maxfd)
|
||||
if (bitbit >= maxfd)
|
||||
return maxfd;
|
||||
if (bitbit > start)
|
||||
start = bitbit;
|
||||
|
|
|
@ -145,8 +145,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
|
|||
|
||||
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where);
|
||||
void fscache_put_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where);
|
||||
bool fscache_begin_volume_access(struct fscache_volume *volume,
|
||||
struct fscache_cookie *cookie,
|
||||
enum fscache_access_trace why);
|
||||
|
|
|
@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
|
|||
return volume;
|
||||
}
|
||||
|
||||
struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where)
|
||||
{
|
||||
int ref;
|
||||
|
||||
if (!__refcount_inc_not_zero(&volume->ref, &ref))
|
||||
return NULL;
|
||||
|
||||
trace_fscache_volume(volume->debug_id, ref + 1, where);
|
||||
return volume;
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_try_get_volume);
|
||||
|
||||
static void fscache_see_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where)
|
||||
{
|
||||
|
@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
|
|||
fscache_free_volume(volume);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_put_volume);
|
||||
|
||||
/*
|
||||
* Relinquish a volume representation cookie.
|
||||
|
|
|
@ -698,7 +698,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|||
return err;
|
||||
}
|
||||
|
||||
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
|
||||
strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
|
||||
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
|
||||
if (!strbuf) {
|
||||
res = -ENOMEM;
|
||||
|
|
|
@ -201,6 +201,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
|
|||
unsigned block_size = (1 << block_bits);
|
||||
size_t poff = offset_in_folio(folio, *pos);
|
||||
size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
|
||||
size_t orig_plen = plen;
|
||||
unsigned first = poff >> block_bits;
|
||||
unsigned last = (poff + plen - 1) >> block_bits;
|
||||
|
||||
|
@ -237,7 +238,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
|
|||
* handle both halves separately so that we properly zero data in the
|
||||
* page cache for blocks that are entirely outside of i_size.
|
||||
*/
|
||||
if (orig_pos <= isize && orig_pos + length > isize) {
|
||||
if (orig_pos <= isize && orig_pos + orig_plen > isize) {
|
||||
unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
|
||||
|
||||
if (first <= end && last > end)
|
||||
|
|
|
@ -2381,8 +2381,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
|
|||
error = do_lock_file_wait(filp, cmd, file_lock);
|
||||
|
||||
/*
|
||||
* Attempt to detect a close/fcntl race and recover by releasing the
|
||||
* lock that was just acquired. There is no need to do that when we're
|
||||
* Detect close/fcntl races and recover by zapping all POSIX locks
|
||||
* associated with this file and our files_struct, just like on
|
||||
* filp_flush(). There is no need to do that when we're
|
||||
* unlocking though, or for OFD locks.
|
||||
*/
|
||||
if (!error && file_lock->fl_type != F_UNLCK &&
|
||||
|
@ -2397,9 +2398,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
|
|||
f = files_lookup_fd_locked(files, fd);
|
||||
spin_unlock(&files->file_lock);
|
||||
if (f != filp) {
|
||||
file_lock->fl_type = F_UNLCK;
|
||||
error = do_lock_file_wait(filp, cmd, file_lock);
|
||||
WARN_ON_ONCE(error);
|
||||
locks_remove_posix(filp, files);
|
||||
error = -EBADF;
|
||||
}
|
||||
}
|
||||
|
|
27
fs/nfs/dir.c
27
fs/nfs/dir.c
|
@ -1625,7 +1625,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
|
|||
switch (error) {
|
||||
case 1:
|
||||
break;
|
||||
case 0:
|
||||
case -ETIMEDOUT:
|
||||
if (inode && (IS_ROOT(dentry) ||
|
||||
NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
|
||||
error = 1;
|
||||
break;
|
||||
case -ESTALE:
|
||||
case -ENOENT:
|
||||
error = 0;
|
||||
fallthrough;
|
||||
default:
|
||||
/*
|
||||
* We can't d_drop the root of a disconnected tree:
|
||||
* its d_hash is on the s_anon list and d_drop() would hide
|
||||
|
@ -1680,18 +1689,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
|
|||
|
||||
dir_verifier = nfs_save_change_attribute(dir);
|
||||
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
|
||||
if (ret < 0) {
|
||||
switch (ret) {
|
||||
case -ESTALE:
|
||||
case -ENOENT:
|
||||
ret = 0;
|
||||
break;
|
||||
case -ETIMEDOUT:
|
||||
if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
|
||||
ret = 1;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Request help from readdirplus */
|
||||
nfs_lookup_advise_force_readdirplus(dir, flags);
|
||||
|
@ -1735,7 +1734,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|||
unsigned int flags)
|
||||
{
|
||||
struct inode *inode;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
|
||||
inode = d_inode(dentry);
|
||||
|
@ -1780,7 +1779,7 @@ out_valid:
|
|||
out_bad:
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, error);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -6268,6 +6268,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
|
|||
if (status == 0)
|
||||
nfs_setsecurity(inode, fattr);
|
||||
|
||||
nfs_free_fattr(fattr);
|
||||
return status;
|
||||
}
|
||||
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
|
||||
|
|
|
@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
|
|||
continue;
|
||||
} else if (index == prev->wb_index + 1)
|
||||
continue;
|
||||
/*
|
||||
* We will submit more requests after these. Indicate
|
||||
* this to the underlying layers.
|
||||
*/
|
||||
desc->pg_moreio = 1;
|
||||
nfs_pageio_complete(desc);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
|
|||
error:
|
||||
folio_set_error(folio);
|
||||
folio_unlock(folio);
|
||||
return -EIO;
|
||||
return error;
|
||||
}
|
||||
|
||||
static const char *nfs_get_link(struct dentry *dentry,
|
||||
|
|
|
@ -1404,7 +1404,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
|
|||
target_tcon = tlink_tcon(smb_file_target->tlink);
|
||||
|
||||
if (src_tcon->ses != target_tcon->ses) {
|
||||
cifs_dbg(VFS, "source and target of copy not on same server\n");
|
||||
cifs_dbg(FYI, "source and target of copy not on same server\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -2749,7 +2749,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
|
|||
break;
|
||||
}
|
||||
|
||||
if (!folio_try_get_rcu(folio)) {
|
||||
if (!folio_try_get(folio)) {
|
||||
xas_reset(xas);
|
||||
continue;
|
||||
}
|
||||
|
@ -2985,7 +2985,7 @@ search_again:
|
|||
if (!folio)
|
||||
break;
|
||||
|
||||
if (!folio_try_get_rcu(folio)) {
|
||||
if (!folio_try_get(folio)) {
|
||||
xas_reset(xas);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -917,6 +917,40 @@ struct smb2_query_directory_rsp {
|
|||
__u8 Buffer[];
|
||||
} __packed;
|
||||
|
||||
/* DeviceType Flags */
|
||||
#define FILE_DEVICE_CD_ROM 0x00000002
|
||||
#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
|
||||
#define FILE_DEVICE_DFS 0x00000006
|
||||
#define FILE_DEVICE_DISK 0x00000007
|
||||
#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
|
||||
#define FILE_DEVICE_FILE_SYSTEM 0x00000009
|
||||
#define FILE_DEVICE_NAMED_PIPE 0x00000011
|
||||
#define FILE_DEVICE_NETWORK 0x00000012
|
||||
#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
|
||||
#define FILE_DEVICE_NULL 0x00000015
|
||||
#define FILE_DEVICE_PARALLEL_PORT 0x00000016
|
||||
#define FILE_DEVICE_PRINTER 0x00000018
|
||||
#define FILE_DEVICE_SERIAL_PORT 0x0000001b
|
||||
#define FILE_DEVICE_STREAMS 0x0000001e
|
||||
#define FILE_DEVICE_TAPE 0x0000001f
|
||||
#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
|
||||
#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
|
||||
#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
|
||||
|
||||
/* Device Characteristics */
|
||||
#define FILE_REMOVABLE_MEDIA 0x00000001
|
||||
#define FILE_READ_ONLY_DEVICE 0x00000002
|
||||
#define FILE_FLOPPY_DISKETTE 0x00000004
|
||||
#define FILE_WRITE_ONCE_MEDIA 0x00000008
|
||||
#define FILE_REMOTE_DEVICE 0x00000010
|
||||
#define FILE_DEVICE_IS_MOUNTED 0x00000020
|
||||
#define FILE_VIRTUAL_VOLUME 0x00000040
|
||||
#define FILE_DEVICE_SECURE_OPEN 0x00000100
|
||||
#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000
|
||||
#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000
|
||||
#define FILE_PORTABLE_DEVICE 0x00004000
|
||||
#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
|
||||
|
||||
/*
|
||||
* Maximum number of iovs we need for a set-info request.
|
||||
* The largest one is rename/hardlink
|
||||
|
|
|
@ -5323,8 +5323,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
|
|||
|
||||
info = (struct filesystem_device_info *)rsp->Buffer;
|
||||
|
||||
info->DeviceType = cpu_to_le32(stfs.f_type);
|
||||
info->DeviceCharacteristics = cpu_to_le32(0x00000020);
|
||||
info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
|
||||
info->DeviceCharacteristics =
|
||||
cpu_to_le32(FILE_DEVICE_IS_MOUNTED);
|
||||
if (!test_tree_conn_flag(work->tcon,
|
||||
KSMBD_TREE_CONN_FLAG_WRITABLE))
|
||||
info->DeviceCharacteristics |=
|
||||
cpu_to_le32(FILE_READ_ONLY_DEVICE);
|
||||
rsp->OutputBufferLength = cpu_to_le32(8);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ struct cdrom_device_ops {
|
|||
unsigned int clearing, int slot);
|
||||
int (*tray_move) (struct cdrom_device_info *, int);
|
||||
int (*lock_door) (struct cdrom_device_info *, int);
|
||||
int (*select_speed) (struct cdrom_device_info *, int);
|
||||
int (*select_speed) (struct cdrom_device_info *, unsigned long);
|
||||
int (*get_last_session) (struct cdrom_device_info *,
|
||||
struct cdrom_multisession *);
|
||||
int (*get_mcn) (struct cdrom_device_info *,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
enum fscache_cache_trace;
|
||||
enum fscache_cookie_trace;
|
||||
enum fscache_access_trace;
|
||||
enum fscache_volume_trace;
|
||||
|
||||
enum fscache_cache_state {
|
||||
FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
|
||||
|
@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
|
|||
|
||||
extern void fscache_io_error(struct fscache_cache *cache);
|
||||
|
||||
extern struct fscache_volume *
|
||||
fscache_try_get_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where);
|
||||
extern void fscache_put_volume(struct fscache_volume *volume,
|
||||
enum fscache_volume_trace where);
|
||||
extern void fscache_end_volume_access(struct fscache_volume *volume,
|
||||
struct fscache_cookie *cookie,
|
||||
enum fscache_access_trace why);
|
||||
|
|
|
@ -263,54 +263,9 @@ static inline bool folio_try_get(struct folio *folio)
|
|||
return folio_ref_add_unless(folio, 1, 0);
|
||||
}
|
||||
|
||||
static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
|
||||
static inline bool folio_ref_try_add(struct folio *folio, int count)
|
||||
{
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
/*
|
||||
* The caller guarantees the folio will not be freed from interrupt
|
||||
* context, so (on !SMP) we only need preemption to be disabled
|
||||
* and TINY_RCU does that for us.
|
||||
*/
|
||||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic() && !irqs_disabled());
|
||||
# endif
|
||||
VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
|
||||
folio_ref_add(folio, count);
|
||||
#else
|
||||
if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
|
||||
/* Either the folio has been freed, or will be freed. */
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_try_get_rcu - Attempt to increase the refcount on a folio.
|
||||
* @folio: The folio.
|
||||
*
|
||||
* This is a version of folio_try_get() optimised for non-SMP kernels.
|
||||
* If you are still holding the rcu_read_lock() after looking up the
|
||||
* page and know that the page cannot have its refcount decreased to
|
||||
* zero in interrupt context, you can use this instead of folio_try_get().
|
||||
*
|
||||
* Example users include get_user_pages_fast() (as pages are not unmapped
|
||||
* from interrupt context) and the page cache lookups (as pages are not
|
||||
* truncated from interrupt context). We also know that pages are not
|
||||
* frozen in interrupt context for the purposes of splitting or migration.
|
||||
*
|
||||
* You can also use this function if you're holding a lock that prevents
|
||||
* pages being frozen & removed; eg the i_pages lock for the page cache
|
||||
* or the mmap_lock or page table lock for page tables. In this case,
|
||||
* it will always succeed, and you could have used a plain folio_get(),
|
||||
* but it's sometimes more convenient to have a common function called
|
||||
* from both locked and RCU-protected contexts.
|
||||
*
|
||||
* Return: True if the reference count was successfully incremented.
|
||||
*/
|
||||
static inline bool folio_try_get_rcu(struct folio *folio)
|
||||
{
|
||||
return folio_ref_try_add_rcu(folio, 1);
|
||||
return folio_ref_add_unless(folio, count, 0);
|
||||
}
|
||||
|
||||
static inline int page_ref_freeze(struct page *page, int count)
|
||||
|
|
|
@ -1049,12 +1049,13 @@ struct spi_transfer {
|
|||
unsigned dummy_data:1;
|
||||
unsigned cs_off:1;
|
||||
unsigned cs_change:1;
|
||||
unsigned tx_nbits:3;
|
||||
unsigned rx_nbits:3;
|
||||
unsigned tx_nbits:4;
|
||||
unsigned rx_nbits:4;
|
||||
unsigned timestamped:1;
|
||||
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
|
||||
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
|
||||
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
|
||||
#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
|
||||
u8 bits_per_word;
|
||||
struct spi_delay delay;
|
||||
struct spi_delay cs_change_delay;
|
||||
|
|
|
@ -38,6 +38,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|||
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u8 event, u32 timeout,
|
||||
struct sock *sk);
|
||||
int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u32 timeout);
|
||||
|
||||
void hci_cmd_sync_init(struct hci_dev *hdev);
|
||||
void hci_cmd_sync_clear(struct hci_dev *hdev);
|
||||
|
|
|
@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
|
|||
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
|
||||
struct dma_chan *chan);
|
||||
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
|
||||
int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
|
||||
|
||||
int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
|
||||
dma_filter_fn filter_fn, void *filter_data);
|
||||
|
|
|
@ -35,12 +35,14 @@ enum fscache_volume_trace {
|
|||
fscache_volume_get_cookie,
|
||||
fscache_volume_get_create_work,
|
||||
fscache_volume_get_hash_collision,
|
||||
fscache_volume_get_withdraw,
|
||||
fscache_volume_free,
|
||||
fscache_volume_new_acquire,
|
||||
fscache_volume_put_cookie,
|
||||
fscache_volume_put_create_work,
|
||||
fscache_volume_put_hash_collision,
|
||||
fscache_volume_put_relinquish,
|
||||
fscache_volume_put_withdraw,
|
||||
fscache_volume_see_create_work,
|
||||
fscache_volume_see_hash_wake,
|
||||
fscache_volume_wait_create_work,
|
||||
|
@ -120,12 +122,14 @@ enum fscache_access_trace {
|
|||
EM(fscache_volume_get_cookie, "GET cook ") \
|
||||
EM(fscache_volume_get_create_work, "GET creat") \
|
||||
EM(fscache_volume_get_hash_collision, "GET hcoll") \
|
||||
EM(fscache_volume_get_withdraw, "GET withd") \
|
||||
EM(fscache_volume_free, "FREE ") \
|
||||
EM(fscache_volume_new_acquire, "NEW acq ") \
|
||||
EM(fscache_volume_put_cookie, "PUT cook ") \
|
||||
EM(fscache_volume_put_create_work, "PUT creat") \
|
||||
EM(fscache_volume_put_hash_collision, "PUT hcoll") \
|
||||
EM(fscache_volume_put_relinquish, "PUT relnq") \
|
||||
EM(fscache_volume_put_withdraw, "PUT withd") \
|
||||
EM(fscache_volume_see_create_work, "SEE creat") \
|
||||
EM(fscache_volume_see_hash_wake, "SEE hwake") \
|
||||
E_(fscache_volume_wait_create_work, "WAIT crea")
|
||||
|
|
|
@ -618,6 +618,8 @@
|
|||
#define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */
|
||||
#define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */
|
||||
#define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */
|
||||
#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */
|
||||
#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
|
||||
|
||||
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
|
||||
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
|
||||
|
|
|
@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
|
|||
stack overflow.
|
||||
|
||||
config FORCE_NR_CPUS
|
||||
bool "Set number of CPUs at compile time"
|
||||
depends on SMP && EXPERT && !COMPILE_TEST
|
||||
help
|
||||
Say Yes if you have NR_CPUS set to an actual number of possible
|
||||
CPUs in your system, not to a default value. This forces the core
|
||||
code to rely on compile-time value and optimize kernel routines
|
||||
better.
|
||||
def_bool !SMP
|
||||
|
||||
config CPU_RMAP
|
||||
bool
|
||||
|
|
10
mm/filemap.c
10
mm/filemap.c
|
@ -1834,7 +1834,7 @@ repeat:
|
|||
if (!folio || xa_is_value(folio))
|
||||
goto out;
|
||||
|
||||
if (!folio_try_get_rcu(folio))
|
||||
if (!folio_try_get(folio))
|
||||
goto repeat;
|
||||
|
||||
if (unlikely(folio != xas_reload(&xas))) {
|
||||
|
@ -1990,7 +1990,7 @@ retry:
|
|||
if (!folio || xa_is_value(folio))
|
||||
return folio;
|
||||
|
||||
if (!folio_try_get_rcu(folio))
|
||||
if (!folio_try_get(folio))
|
||||
goto reset;
|
||||
|
||||
if (unlikely(folio != xas_reload(xas))) {
|
||||
|
@ -2208,7 +2208,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
|
|||
if (xa_is_value(folio))
|
||||
goto update_start;
|
||||
|
||||
if (!folio_try_get_rcu(folio))
|
||||
if (!folio_try_get(folio))
|
||||
goto retry;
|
||||
|
||||
if (unlikely(folio != xas_reload(&xas)))
|
||||
|
@ -2343,7 +2343,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
|
|||
break;
|
||||
if (xa_is_sibling(folio))
|
||||
break;
|
||||
if (!folio_try_get_rcu(folio))
|
||||
if (!folio_try_get(folio))
|
||||
goto retry;
|
||||
|
||||
if (unlikely(folio != xas_reload(&xas)))
|
||||
|
@ -3455,7 +3455,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
|
|||
continue;
|
||||
if (folio_test_locked(folio))
|
||||
continue;
|
||||
if (!folio_try_get_rcu(folio))
|
||||
if (!folio_try_get(folio))
|
||||
continue;
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(folio != xas_reload(xas)))
|
||||
|
|
2
mm/gup.c
2
mm/gup.c
|
@ -76,7 +76,7 @@ retry:
|
|||
folio = page_folio(page);
|
||||
if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
|
||||
return NULL;
|
||||
if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
|
||||
if (unlikely(!folio_ref_try_add(folio, refs)))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
|
|
|
@ -63,50 +63,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
|
|||
/* HCI ID Numbering */
|
||||
static DEFINE_IDA(hci_index_ida);
|
||||
|
||||
static int hci_scan_req(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
__u8 scan = opt;
|
||||
|
||||
BT_DBG("%s %x", req->hdev->name, scan);
|
||||
|
||||
/* Inquiry and Page scans */
|
||||
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hci_auth_req(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
__u8 auth = opt;
|
||||
|
||||
BT_DBG("%s %x", req->hdev->name, auth);
|
||||
|
||||
/* Authentication */
|
||||
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
__u8 encrypt = opt;
|
||||
|
||||
BT_DBG("%s %x", req->hdev->name, encrypt);
|
||||
|
||||
/* Encryption */
|
||||
hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
__le16 policy = cpu_to_le16(opt);
|
||||
|
||||
BT_DBG("%s %x", req->hdev->name, policy);
|
||||
|
||||
/* Default link policy */
|
||||
hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get HCI device by index.
|
||||
* Device is held on return. */
|
||||
struct hci_dev *hci_dev_get(int index)
|
||||
|
@ -728,6 +684,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|||
{
|
||||
struct hci_dev *hdev;
|
||||
struct hci_dev_req dr;
|
||||
__le16 policy;
|
||||
int err = 0;
|
||||
|
||||
if (copy_from_user(&dr, arg, sizeof(dr)))
|
||||
|
@ -754,8 +711,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|||
|
||||
switch (cmd) {
|
||||
case HCISETAUTH:
|
||||
err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
|
||||
HCI_INIT_TIMEOUT, NULL);
|
||||
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
|
||||
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
|
||||
break;
|
||||
|
||||
case HCISETENCRYPT:
|
||||
|
@ -766,19 +723,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|||
|
||||
if (!test_bit(HCI_AUTH, &hdev->flags)) {
|
||||
/* Auth must be enabled first */
|
||||
err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
|
||||
HCI_INIT_TIMEOUT, NULL);
|
||||
err = __hci_cmd_sync_status(hdev,
|
||||
HCI_OP_WRITE_AUTH_ENABLE,
|
||||
1, &dr.dev_opt,
|
||||
HCI_CMD_TIMEOUT);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
|
||||
HCI_INIT_TIMEOUT, NULL);
|
||||
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
|
||||
1, &dr.dev_opt,
|
||||
HCI_CMD_TIMEOUT);
|
||||
break;
|
||||
|
||||
case HCISETSCAN:
|
||||
err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
|
||||
HCI_INIT_TIMEOUT, NULL);
|
||||
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
|
||||
1, &dr.dev_opt,
|
||||
HCI_CMD_TIMEOUT);
|
||||
|
||||
/* Ensure that the connectable and discoverable states
|
||||
* get correctly modified as this was a non-mgmt change.
|
||||
|
@ -788,8 +749,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
|
|||
break;
|
||||
|
||||
case HCISETLINKPOL:
|
||||
err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
|
||||
HCI_INIT_TIMEOUT, NULL);
|
||||
policy = cpu_to_le16(dr.dev_opt);
|
||||
|
||||
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
|
||||
2, &policy,
|
||||
HCI_CMD_TIMEOUT);
|
||||
break;
|
||||
|
||||
case HCISETLINKMODE:
|
||||
|
@ -2704,7 +2668,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
|||
list_del(&hdev->list);
|
||||
write_unlock(&hci_dev_list_lock);
|
||||
|
||||
cancel_work_sync(&hdev->rx_work);
|
||||
cancel_work_sync(&hdev->cmd_work);
|
||||
cancel_work_sync(&hdev->tx_work);
|
||||
cancel_work_sync(&hdev->power_on);
|
||||
cancel_work_sync(&hdev->error_reset);
|
||||
|
||||
hci_cmd_sync_clear(hdev);
|
||||
|
||||
|
|
|
@ -280,6 +280,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|||
}
|
||||
EXPORT_SYMBOL(__hci_cmd_sync_status);
|
||||
|
||||
int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u32 timeout)
|
||||
{
|
||||
int err;
|
||||
|
||||
hci_req_sync_lock(hdev);
|
||||
err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
|
||||
hci_req_sync_unlock(hdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_cmd_sync_status);
|
||||
|
||||
static void hci_cmd_sync_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue