Merge linux 6.6.58

Conflicts:
	fs/xfs/libxfs/xfs_inode_buf.c

Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jianping Liu 2024-10-28 10:55:21 +08:00
commit 754fb4ce83
118 changed files with 1102 additions and 570 deletions

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 57
SUBLEVEL = 58
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@ -10,11 +10,9 @@
#include <asm/insn.h>
#include <asm/probes.h>
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
typedef __le32 uprobe_opcode_t;
@ -23,8 +21,8 @@ struct arch_uprobe_task {
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
__le32 insn;
__le32 ixol;
};
struct arch_probe_insn api;
bool simulate;

View File

@ -99,10 +99,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@ -140,6 +136,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
probe_opcode_t insn = le32_to_cpu(*addr);
probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
struct arch_probe_insn *api = &asi->api;
if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
decoded = INSN_GOOD_NO_SLOT;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
decoded = INSN_GOOD_NO_SLOT;
} else {
decoded = arm_probe_decode_insn(insn, &asi->api);
}
/*
* If there's a symbol defined in front of and near enough to
@ -157,7 +164,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))

View File

@ -171,17 +171,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
{
u64 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (u64 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
if (opcode & (1 << 30)) /* x0-x30 */
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
else /* w0-w30 */
set_w_reg(regs, xn, *load_addr);
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}
@ -189,14 +187,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
{
s32 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (s32 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}

View File

@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
case INSN_REJECTED:
@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
addr = instruction_pointer(regs);
if (auprobe->api.handler)

View File

@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

View File

@ -1001,6 +1001,8 @@ static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
const gfn_t gfn = gpa_to_gfn(gpa);
int rc;
if (!gfn_to_memslot(kvm, gfn))
return PGM_ADDRESSING;
if (mode == GACC_STORE)
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
else
@ -1158,6 +1160,8 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
gra += fragment_len;
data += fragment_len;
}
if (rc > 0)
vcpu->arch.pgm.code = rc;
return rc;
}

View File

@ -405,11 +405,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest low address and key protection are not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying from @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to guest memory.
*/
@ -428,11 +429,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest key protection is not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying to @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to kernel space.
*/

View File

@ -9,6 +9,8 @@
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
#include <asm/cpufeatures.h>
#include <asm/nospec-branch.h>
.pushsection .noinstr.text, "ax"
@ -17,6 +19,9 @@ SYM_FUNC_START(entry_ibpb)
movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx
wrmsr
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
SYM_FUNC_END(entry_ibpb)
/* For KVM */

View File

@ -875,6 +875,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
/* Now ready to switch the cr3 */
SWITCH_TO_USER_CR3 scratch_reg=%eax
/* Clobbers ZF */
CLEAR_CPU_BUFFERS
/*
* Restore all flags except IF. (We restore IF separately because
@ -885,7 +887,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@ -1148,7 +1149,6 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
@ -1169,6 +1169,7 @@ SYM_CODE_START(asm_exc_nmi)
CHECK_AND_APPLY_ESPFIX
RESTORE_ALL_NMI cr3_reg=%edi pop=4
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#ifdef CONFIG_X86_ESPFIX32
@ -1210,6 +1211,7 @@ SYM_CODE_START(asm_exc_nmi)
* 1 - orig_ax
*/
lss (1+5+6)*4(%esp), %esp # back to espfix stack
CLEAR_CPU_BUFFERS
jmp .Lirq_return
#endif
SYM_CODE_END(asm_exc_nmi)

View File

@ -235,7 +235,7 @@
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
@ -367,6 +367,7 @@
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
@ -546,4 +547,5 @@
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -332,7 +332,16 @@
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
#ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
#else
/*
* In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32).
*/
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
#endif
.endm
#ifdef CONFIG_X86_64

View File

@ -473,7 +473,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0);
/*
* Setting APIC_LVT_MASKED (above) should be enough to tell
* the hardware that this timer will never fire. But AMD
* erratum 411 and some Intel CPU behavior circa 2024 say
* otherwise. Time for belt and suspenders programming: mask
* the timer _and_ zero the counter registers:
*/
if (v & APIC_LVT_TIMER_TSCDEADLINE)
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
else
apic_write(APIC_TMICT, 0);
return 0;
}

View File

@ -1338,7 +1338,8 @@ void amd_check_microcode(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return;
on_each_cpu(zenbleed_check_cpu, NULL, 1);
if (cpu_feature_enabled(X86_FEATURE_ZEN2))
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
/*

View File

@ -1113,8 +1113,25 @@ do_cmd_auto:
case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like SRSO has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
mitigate_smt = true;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
case RETBLEED_MITIGATION_STUFF:
@ -2610,6 +2627,14 @@ static void __init srso_select_mitigation(void)
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
srso_mitigation = SRSO_MITIGATION_IBPB;
/*
* IBPB on entry already obviates the need for
* software-based untraining so clear those in case some
* other mitigation like Retbleed has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@ -2622,6 +2647,13 @@ static void __init srso_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
* There is no need for RSB filling: entry_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
}
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");

View File

@ -1484,6 +1484,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
setup_force_cpu_bug(X86_BUG_BHI);
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@ -194,7 +194,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}
static bool __get_mem_config_intel(struct rdt_resource *r)
static __init bool __get_mem_config_intel(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
union cpuid_0x10_3_eax eax;
@ -228,7 +228,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
return true;
}
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
u32 eax, ebx, ecx, edx, subleaf;

View File

@ -219,8 +219,8 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
data->got_token = true;
smp_wmb();
list_del_init(&curr->entry);
wake_up_process(data->task);
list_del_init_careful(&curr->entry);
return 1;
}

View File

@ -2327,10 +2327,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* TODO: provide forward progress for RECOVERY handler, so that
* unprivileged device can benefit from it
*/
if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
UBLK_F_USER_RECOVERY);
/*
* For USER_COPY, we depends on userspace to fill request
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
*/
if (info.flags & UBLK_F_USER_COPY)
return -EINVAL;
}
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);

View File

@ -1354,10 +1354,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
/* Fake CSR devices don't seem to support sort-transter */
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
else
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {

View File

@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
ret = -EINVAL;
goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)

View File

@ -1843,7 +1843,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
bool force_update)
bool init)
{
int ret = 0;
int index = 0;
@ -1872,7 +1872,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
if (force_update || smu_dpm_ctx->dpm_level != level) {
if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@ -1889,7 +1889,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
if (force_update || smu->power_profile_mode != workload[0])
if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}

View File

@ -42,7 +42,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
uint32_t index_mask = 0;
uint32_t index_mask = drm_encoder_mask(encoder);
int count;
/* DIG routing gets problematic */

View File

@ -1659,6 +1659,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
ret = -EINVAL;
goto err_out;
}

View File

@ -415,6 +415,8 @@ config IIO_ST_ACCEL_SPI_3AXIS
config IIO_KX022A
tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
config IIO_KX022A_SPI
tristate "Kionix KX022A tri-axis digital accelerometer SPI interface"

View File

@ -1286,6 +1286,8 @@ config TI_ADS8344
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADS8684 and
and ADS8688 ADC chips
@ -1296,6 +1298,8 @@ config TI_ADS8688
config TI_ADS124S08
tristate "Texas Instruments ADS124S08"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADS124S08
and ADS124S06 ADC chips
@ -1330,6 +1334,7 @@ config TI_AM335X_ADC
config TI_LMP92064
tristate "Texas Instruments LMP92064 ADC driver"
depends on SPI
select REGMAP_SPI
help
Say yes here to build support for the LMP92064 Precision Current and Voltage
sensor.

View File

@ -27,6 +27,7 @@ config AD8366
config ADA4250
tristate "Analog Devices ADA4250 Instrumentation Amplifier"
depends on SPI
select REGMAP_SPI
help
Say yes here to build support for Analog Devices ADA4250
SPI Amplifier's support. The driver provides direct access via

View File

@ -32,7 +32,7 @@ static ssize_t _hid_sensor_set_report_latency(struct device *dev,
latency = integer * 1000 + fract / 1000;
ret = hid_sensor_set_report_latency(attrb, latency);
if (ret < 0)
return len;
return ret;
attrb->latency_ms = hid_sensor_get_report_latency(attrb);

View File

@ -9,6 +9,8 @@ menu "Digital to analog converters"
config AD3552R
tristate "Analog Devices AD3552R DAC driver"
depends on SPI_MASTER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices AD3552R
Digital to Analog Converter.
@ -214,6 +216,8 @@ config AD5764
config AD5766
tristate "Analog Devices AD5766/AD5767 DAC driver"
depends on SPI_MASTER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices AD5766, AD5767
Digital to Analog Converter.
@ -224,6 +228,7 @@ config AD5766
config AD5770R
tristate "Analog Devices AD5770R IDAC driver"
depends on SPI_MASTER
select REGMAP_SPI
help
Say yes here to build support for Analog Devices AD5770R Digital to
Analog Converter.
@ -315,6 +320,7 @@ config LPC18XX_DAC
config LTC1660
tristate "Linear Technology LTC1660/LTC1665 DAC SPI driver"
depends on SPI
select REGMAP_SPI
help
Say yes here to build support for Linear Technology
LTC1660 and LTC1665 Digital to Analog Converters.
@ -424,6 +430,7 @@ config STM32_DAC
config STM32_DAC_CORE
tristate
select REGMAP_MMIO
config TI_DAC082S085
tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"

View File

@ -53,6 +53,7 @@ config ADF4371
config ADF4377
tristate "Analog Devices ADF4377 Microwave Wideband Synthesizer"
depends on SPI && COMMON_CLK
select REGMAP_SPI
help
Say yes here to build support for Analog Devices ADF4377 Microwave
Wideband Synthesizer.

View File

@ -294,6 +294,8 @@ config ROHM_BU27008
depends on I2C
select REGMAP_I2C
select IIO_GTS_HELPER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Enable support for the ROHM BU27008 color sensor.
The ROHM BU27008 is a sensor with 5 photodiodes (red, green,

View File

@ -138,6 +138,10 @@ static const struct opt3001_scale opt3001_scales[] = {
.val = 20966,
.val2 = 400000,
},
{
.val = 41932,
.val2 = 800000,
},
{
.val = 83865,
.val2 = 600000,

View File

@ -99,9 +99,8 @@ static const char * const period_values[] = {
static ssize_t in_illuminance_period_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct veml6030_data *data = iio_priv(dev_to_iio_dev(dev));
int ret, reg, x;
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct veml6030_data *data = iio_priv(indio_dev);
ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
if (ret) {
@ -780,7 +779,7 @@ static int veml6030_hw_init(struct iio_dev *indio_dev)
/* Cache currently active measurement parameters */
data->cur_gain = 3;
data->cur_resolution = 4608;
data->cur_resolution = 5376;
data->cur_integration_time = 3;
return ret;

View File

@ -72,6 +72,8 @@ config LIDAR_LITE_V2
config MB1232
tristate "MaxSonar I2CXL family ultrasonic sensors"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y to build a driver for the ultrasonic sensors I2CXL of
MaxBotix which have an i2c interface. It can be used to measure

View File

@ -217,6 +217,7 @@ static const struct xpad_device {
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
{ 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0db0, 0x1901, "Micro Star International Xbox360 Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
{ 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
@ -486,6 +487,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */
XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */
XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0db0), /* Micro Star International X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori controllers */

View File

@ -3985,8 +3985,10 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
*/
static void domain_context_clear(struct device_domain_info *info)
{
if (!dev_is_pci(info->dev))
if (!dev_is_pci(info->dev)) {
domain_context_clear_one(info, info->bus, info->devfn);
return;
}
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);

View File

@ -790,6 +790,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@ -799,9 +800,14 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
if (!desc->its_vmapp_cmd.valid) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
/*
* Unmapping a VPE is self-synchronizing on GICv4.1,
* no need to issue a VSYNC.
*/
vpe = NULL;
}
goto out;
@ -814,13 +820,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (!is_v4_1(its))
goto out;
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
/*
@ -836,7 +842,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
return valid_vpe(its, desc->its_vmapp_cmd.vpe);
return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@ -3823,6 +3829,13 @@ static int its_vpe_set_affinity(struct irq_data *d,
unsigned long flags;
int from, cpu;
/*
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
if (!atomic_read(&vpe->vmapp_count))
return -EINVAL;
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
@ -4463,9 +4476,8 @@ static int its_vpe_init(struct its_vpe *vpe)
raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
if (gic_rdists->has_rvpeid)
atomic_set(&vpe->vmapp_count, 0);
else
atomic_set(&vpe->vmapp_count, 0);
if (!gic_rdists->has_rvpeid)
vpe->vpe_proxy_event = -1;
return 0;

View File

@ -120,16 +120,6 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}
static void plic_irq_enable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
}
static void plic_irq_disable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
}
static void plic_irq_unmask(struct irq_data *d)
{
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
@ -144,6 +134,17 @@ static void plic_irq_mask(struct irq_data *d)
writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
}
static void plic_irq_enable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
plic_irq_unmask(d);
}
static void plic_irq_disable(struct irq_data *d)
{
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
}
static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);

View File

@ -364,6 +364,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
if (is_eeprom_responsive(priv)) {
priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM;
priv->nvmem_config_eeprom.name = EEPROM_NAME;
priv->nvmem_config_eeprom.id = NVMEM_DEVID_AUTO;
priv->nvmem_config_eeprom.dev = &aux_dev->dev;
priv->nvmem_config_eeprom.owner = THIS_MODULE;
priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read;
@ -383,6 +384,7 @@ static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
priv->nvmem_config_otp.type = NVMEM_TYPE_OTP;
priv->nvmem_config_otp.name = OTP_NAME;
priv->nvmem_config_otp.id = NVMEM_DEVID_AUTO;
priv->nvmem_config_otp.dev = &aux_dev->dev;
priv->nvmem_config_otp.owner = THIS_MODULE;
priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read;

View File

@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
return ret;
}
if (of_phy_is_fixed_link(np))
return mdiobus_register(bp->mii_bus);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
/* With fixed-link, we don't need to register the MDIO bus,
* except if we have a child named "mdio" in the device tree.
* In that case, some devices may be attached to the MACB's MDIO bus.
*/
child = of_get_child_by_name(np, "mdio");
if (child)
of_node_put(child);
else if (of_phy_is_fixed_link(np))
return macb_mii_probe(bp->dev);
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));

View File

@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@ -1380,6 +1381,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
return -ENETDOWN;
enetc_lock_mdio();
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@ -1524,7 +1528,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
rx_ring->stats.xdp_drops++;
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
@ -1589,6 +1592,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
fallthrough;
case XDP_DROP:
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
rxbd = orig_rxbd;
@ -1605,6 +1609,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
break;
}
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@ -2226,16 +2236,22 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_enable_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@ -2254,16 +2270,22 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
}
static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_disable_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
@ -2463,9 +2485,13 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
enetc_enable_bdrs(priv);
enetc_enable_tx_bdrs(priv);
enetc_enable_rx_bdrs(priv);
netif_tx_start_all_queues(ndev);
clear_bit(ENETC_TX_DOWN, &priv->flags);
}
EXPORT_SYMBOL_GPL(enetc_start);
@ -2523,9 +2549,15 @@ void enetc_stop(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
set_bit(ENETC_TX_DOWN, &priv->flags);
netif_tx_stop_all_queues(ndev);
enetc_disable_bdrs(priv);
enetc_disable_rx_bdrs(priv);
enetc_wait_bdrs(priv);
enetc_disable_tx_bdrs(priv);
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
@ -2536,8 +2568,6 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
enetc_wait_bdrs(priv);
enetc_clear_interrupts(priv);
}
EXPORT_SYMBOL_GPL(enetc_stop);

View File

@ -328,6 +328,7 @@ enum enetc_active_offloads {
enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
ENETC_TX_DOWN,
};
/* interrupt coalescing modes */

View File

@ -90,6 +90,30 @@
#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
}
/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
tempval = fep->cc.read(&fep->cc);
tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
timecounter_read(&fep->tc);
/* Get the current ptp hardware time counter */
temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
temp_val |= FEC_T_CTRL_CAPTURE;
writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
ptp_hc = readl(fep->hwp + FEC_ATIME);
ptp_hc = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
@ -271,30 +289,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
static u64 fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
udelay(1);
return readl(fep->hwp + FEC_ATIME);
}
/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device

View File

@ -1444,6 +1444,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
KUNIT_EXPECT_EQ(test, 0, ret);
vcap_free_rule(rule);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)

View File

@ -58,12 +58,12 @@ static int do_active_device(struct ctl_table *table, int write,
for (dev = port->devices; dev ; dev = dev->next) {
if(dev == port->cad) {
len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name);
len += scnprintf(buffer, sizeof(buffer), "%s\n", dev->name);
}
}
if(!len) {
len += snprintf(buffer, sizeof(buffer), "%s\n", "none");
len += scnprintf(buffer, sizeof(buffer), "%s\n", "none");
}
if (len > *lenp)
@ -94,19 +94,19 @@ static int do_autoprobe(struct ctl_table *table, int write,
}
if ((str = info->class_name) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str);
if ((str = info->model) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str);
if ((str = info->mfr) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str);
if ((str = info->description) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str);
if ((str = info->cmdset) != NULL)
len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
len += scnprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str);
if (len > *lenp)
len = *lenp;
@ -135,7 +135,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
len += scnprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi);
if (len > *lenp)
len = *lenp;
@ -162,7 +162,7 @@ static int do_hardware_irq(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq);
len += scnprintf (buffer, sizeof(buffer), "%d\n", port->irq);
if (len > *lenp)
len = *lenp;
@ -189,7 +189,7 @@ static int do_hardware_dma(struct ctl_table *table, int write,
if (write) /* permissions prevent this anyway */
return -EACCES;
len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma);
len += scnprintf (buffer, sizeof(buffer), "%d\n", port->dma);
if (len > *lenp)
len = *lenp;
@ -220,7 +220,7 @@ static int do_hardware_modes(struct ctl_table *table, int write,
#define printmode(x) \
do { \
if (port->modes & PARPORT_MODE_##x) \
len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
len += scnprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \
} while (0)
int f = 0;
printmode(PCSPP);

View File

@ -474,6 +474,9 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < npins; i++) {
pins[i].number = i;
pins[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PIN%u", i);
if (!pins[i].name)
return -ENOMEM;
pins[i].drv_data = pctl;
pin_names[i] = pins[i].name;
pin_nums[i] = i;

View File

@ -1962,21 +1962,21 @@ static void ocelot_irq_handler(struct irq_desc *desc)
unsigned int reg = 0, irq, i;
unsigned long irqs;
chained_irq_enter(parent_chip, desc);
for (i = 0; i < info->stride; i++) {
regmap_read(info->map, id_reg + 4 * i, &reg);
if (!reg)
continue;
chained_irq_enter(parent_chip, desc);
irqs = reg;
for_each_set_bit(irq, &irqs,
min(32U, info->desc->npins - 32 * i))
generic_handle_domain_irq(chip->irq.domain, irq + 32 * i);
chained_irq_exit(parent_chip, desc);
}
chained_irq_exit(parent_chip, desc);
}
static int ocelot_gpiochip_register(struct platform_device *pdev,

View File

@ -1387,10 +1387,15 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name)
if (stm32_pin && stm32_pin->pin.name) {
names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
else
if (!names[i]) {
err = -ENOMEM;
goto err_clk;
}
} else {
names[i] = NULL;
}
}
bank->gpio_chip.names = (const char * const *)names;

View File

@ -1195,7 +1195,8 @@ sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
}
static struct notifier_block sclp_reboot_notifier = {
.notifier_call = sclp_reboot_event
.notifier_call = sclp_reboot_event,
.priority = INT_MIN,
};
static ssize_t con_pages_show(struct device_driver *dev, char *buf)

View File

@ -319,7 +319,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
/* Perform Linefeed conversion (0x0a -> 0x0d 0x0a)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
@ -328,8 +328,8 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
((unsigned char *) buffer)[to++] = c;
} else
break;

View File

@ -3156,6 +3156,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
guard(spinlock_irqsave)(&gsm->tx_lock);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);

View File

@ -771,6 +771,21 @@ static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
imx_uart_writel(sport, USR1_RTSD, USR1);
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
/*
* Update sport->old_status here, so any follow-up calls to
* imx_uart_mctrl_check() will be able to recognize that RTS
* state changed since last imx_uart_mctrl_check() call.
*
* In case RTS has been detected as asserted here and later on
* deasserted by the time imx_uart_mctrl_check() was called,
* imx_uart_mctrl_check() can detect the RTS state change and
* trigger uart_handle_cts_change() to unblock the port for
* further TX transfers.
*/
if (usr1 & USR1_RTSS)
sport->old_status |= TIOCM_CTS;
else
sport->old_status &= ~TIOCM_CTS;
uart_handle_cts_change(&sport->port, usr1);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);

View File

@ -144,6 +144,8 @@ static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
static struct uart_driver qcom_geni_uart_driver;
static int qcom_geni_serial_port_setup(struct uart_port *uport);
static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
{
return container_of(uport, struct qcom_geni_serial_port, uport);
@ -385,6 +387,23 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
static int qcom_geni_serial_poll_init(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
int ret;
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
if (!qcom_geni_serial_secondary_active(uport))
geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
return 0;
}
#endif
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
@ -756,17 +775,27 @@ static void qcom_geni_serial_start_rx_fifo(struct uart_port *uport)
static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
bool done;
if (!qcom_geni_serial_secondary_active(uport))
return;
geni_se_cancel_s_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
S_CMD_CANCEL_EN, true);
if (qcom_geni_serial_secondary_active(uport))
done = qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
RX_EOT, true);
if (done) {
writel(RX_EOT | RX_DMA_DONE,
uport->membase + SE_DMA_RX_IRQ_CLR);
} else {
qcom_geni_serial_abort_rx(uport);
writel(1, uport->membase + SE_DMA_RX_FSM_RST);
qcom_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT,
RX_RESET_DONE, true);
writel(RX_RESET_DONE | RX_DMA_DONE,
uport->membase + SE_DMA_RX_IRQ_CLR);
}
if (port->rx_dma_addr) {
geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr,
DMA_RX_BUF_SIZE);
@ -1115,7 +1144,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
false, true, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, port->dev_data->mode);
qcom_geni_serial_start_rx(uport);
port->setup = true;
return 0;
@ -1131,6 +1159,11 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
if (ret)
return ret;
}
uart_port_lock_irq(uport);
qcom_geni_serial_start_rx(uport);
uart_port_unlock_irq(uport);
enable_irq(uport->irq);
return 0;
@ -1216,7 +1249,6 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
unsigned int avg_bw_core;
unsigned long timeout;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
@ -1232,7 +1264,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
dev_err(port->se.dev,
"Couldn't find suitable clock rate for %u\n",
baud * sampling_rate);
goto out_restart_rx;
return;
}
dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = %u\n",
@ -1323,8 +1355,6 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
out_restart_rx:
qcom_geni_serial_start_rx(uport);
}
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
@ -1544,7 +1574,7 @@ static const struct uart_ops qcom_geni_console_pops = {
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = qcom_geni_serial_get_char,
.poll_put_char = qcom_geni_serial_poll_put_char,
.poll_init = qcom_geni_serial_port_setup,
.poll_init = qcom_geni_serial_poll_init,
#endif
.pm = qcom_geni_serial_pm,
};
@ -1745,38 +1775,6 @@ static int qcom_geni_serial_sys_resume(struct device *dev)
return ret;
}
static int qcom_geni_serial_sys_hib_resume(struct device *dev)
{
int ret = 0;
struct uart_port *uport;
struct qcom_geni_private_data *private_data;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
uport = &port->uport;
private_data = uport->private_data;
if (uart_console(uport)) {
geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
ret = uart_resume_port(private_data->drv, uport);
/*
* For hibernation usecase clients for
* console UART won't call port setup during restore,
* hence call port setup for console uart.
*/
qcom_geni_serial_port_setup(uport);
} else {
/*
* Peripheral register settings are lost during hibernation.
* Update setup flag such that port setup happens again
* during next session. Clients of HS-UART will close and
* open the port during hibernation.
*/
port->setup = false;
}
return ret;
}
static const struct qcom_geni_device_data qcom_geni_console_data = {
.console = true,
.mode = GENI_SE_FIFO,
@ -1788,12 +1786,8 @@ static const struct qcom_geni_device_data qcom_geni_uart_data = {
};
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
.suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
.freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
.thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
qcom_geni_serial_sys_resume)
};
static const struct of_device_id qcom_geni_serial_match_table[] = {

View File

@ -4550,7 +4550,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
return -EINVAL;
if (op->data) {
font.data = kvmalloc(max_font_size, GFP_KERNEL);
font.data = kvzalloc(max_font_size, GFP_KERNEL);
if (!font.data)
return -ENOMEM;
} else

View File

@ -498,7 +498,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
struct scsi_cmnd *cmd = lrbp->cmd;
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
u32 nexus, id, val, rtc;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
@ -528,17 +528,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
writel(nexus, opr_sqd_base + REG_SQCTI);
/* SQRTCy.ICU = 1 */
writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
/* Initiate Cleanup */
writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
opr_sqd_base + REG_SQRTC);
/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
reg = opr_sqd_base + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
__func__, id, task_tag,
FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
rtc = FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg));
if (err || rtc)
dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d RTC=%d\n",
__func__, id, task_tag, err, rtc);
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;

View File

@ -10100,7 +10100,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
shost_for_each_device(sdev, hba->host) {
if (sdev == hba->ufs_device_wlun)
continue;
scsi_device_quiesce(sdev);
mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_OFFLINE);
mutex_unlock(&sdev->state_mutex);
}
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);

View File

@ -438,6 +438,10 @@ skip_status:
dwc3_gadget_ep_get_transfer_index(dep);
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
!(cmd & DWC3_DEPCMD_CMDIOC))
mdelay(1);
if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= saved_config;
@ -1734,12 +1738,10 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
WARN_ON_ONCE(ret);
dep->resource_index = 0;
if (!interrupt) {
mdelay(1);
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
} else if (!ret) {
else if (!ret)
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret;

View File

@ -1046,7 +1046,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td_to_noop(xhci, ring, cached_td, false);
cached_td->cancel_status = TD_CLEARED;
}
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARING_CACHE;
cached_td = td;
break;

View File

@ -2183,7 +2183,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
goto out;
}
for (i = 0; i < tegra->num_usb_phys; i++) {
for (i = 0; i < xhci->usb2_rhub.num_ports; i++) {
if (!xhci->usb2_rhub.ports[i])
continue;
portsc = readl(xhci->usb2_rhub.ports[i]->addr);

View File

@ -1286,7 +1286,7 @@ enum xhci_setup_dev {
/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
#define SCT_FOR_TRB(p) (((p) & 0x7) << 1)
/* Link TRB specific fields */
#define TRB_TC (1<<1)

View File

@ -279,6 +279,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EG912Y 0x6001
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200A 0x6005
#define QUECTEL_PRODUCT_EG916Q 0x6007
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
@ -1270,6 +1271,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@ -1380,10 +1382,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),

View File

@ -252,7 +252,6 @@ int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
val = TYPEC_CC_RP_DEF;
break;
}
val = TYPEC_CC_RP_DEF;
}
if (misc & CC_ORIENTATION)

View File

@ -1374,7 +1374,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
struct inode *inode = NULL;
unsigned long ref_ptr;
unsigned long ref_end;
struct fscrypt_str name;
struct fscrypt_str name = { 0 };
int ret;
int log_ref_ver = 0;
u64 parent_objectid;
@ -1846,7 +1846,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
struct btrfs_dir_item *di,
struct btrfs_key *key)
{
struct fscrypt_str name;
struct fscrypt_str name = { 0 };
struct btrfs_dir_item *dir_dst_di;
struct btrfs_dir_item *index_dst_di;
bool dir_dst_matches = false;
@ -2126,7 +2126,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct extent_buffer *eb;
int slot;
struct btrfs_dir_item *di;
struct fscrypt_str name;
struct fscrypt_str name = { 0 };
struct inode *inode = NULL;
struct btrfs_key location;

View File

@ -1037,7 +1037,7 @@ error_inode:
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
__func__, new_i_pos);
}
goto out;
}

View File

@ -331,6 +331,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
* returns the page in which the entry was found, and the entry itself
* (as a parameter - res_dir). Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*
* On failure, returns an error pointer and the caller should ignore res_page.
*/
struct nilfs_dir_entry *
nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
@ -358,22 +360,24 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
do {
char *kaddr = nilfs_get_page(dir, n, &page);
if (!IS_ERR(kaddr)) {
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
}
if (nilfs_match(namelen, name, de))
goto found;
de = nilfs_next_entry(de);
if (IS_ERR(kaddr))
return ERR_CAST(kaddr);
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
}
nilfs_put_page(page);
if (nilfs_match(namelen, name, de))
goto found;
de = nilfs_next_entry(de);
}
nilfs_put_page(page);
if (++n >= npages)
n = 0;
/* next page is past the blocks we've got */
@ -386,7 +390,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
}
} while (n != start);
out:
return NULL;
return ERR_PTR(-ENOENT);
found:
*res_page = page;
@ -431,19 +435,19 @@ fail:
return NULL;
}
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
{
ino_t res = 0;
struct nilfs_dir_entry *de;
struct page *page;
de = nilfs_find_entry(dir, qstr, &page);
if (de) {
res = le64_to_cpu(de->inode);
kunmap(page);
put_page(page);
}
return res;
if (IS_ERR(de))
return PTR_ERR(de);
*ino = le64_to_cpu(de->inode);
kunmap(page);
put_page(page);
return 0;
}
/* Releases the page */

View File

@ -55,12 +55,20 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
ino_t ino;
int res;
if (dentry->d_name.len > NILFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ino = nilfs_inode_by_name(dir, &dentry->d_name);
inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL;
res = nilfs_inode_by_name(dir, &dentry->d_name, &ino);
if (res) {
if (res != -ENOENT)
return ERR_PTR(res);
inode = NULL;
} else {
inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
}
return d_splice_alias(inode, dentry);
}
@ -263,10 +271,11 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
struct page *page;
int err;
err = -ENOENT;
de = nilfs_find_entry(dir, &dentry->d_name, &page);
if (!de)
if (IS_ERR(de)) {
err = PTR_ERR(de);
goto out;
}
inode = d_inode(dentry);
err = -EIO;
@ -361,10 +370,11 @@ static int nilfs_rename(struct mnt_idmap *idmap,
if (unlikely(err))
return err;
err = -ENOENT;
old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
if (IS_ERR(old_de)) {
err = PTR_ERR(old_de);
goto out;
}
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
@ -381,10 +391,12 @@ static int nilfs_rename(struct mnt_idmap *idmap,
if (dir_de && !nilfs_empty_dir(new_inode))
goto out_dir;
err = -ENOENT;
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
if (IS_ERR(new_de)) {
err = PTR_ERR(new_de);
goto out_dir;
}
nilfs_set_link(new_dir, new_de, new_page, old_inode);
nilfs_mark_inode_dirty(new_dir);
inode_set_ctime_current(new_inode);
@ -438,13 +450,14 @@ out:
*/
static struct dentry *nilfs_get_parent(struct dentry *child)
{
unsigned long ino;
ino_t ino;
int res;
struct inode *inode;
struct nilfs_root *root;
ino = nilfs_inode_by_name(d_inode(child), &dotdot_name);
if (!ino)
return ERR_PTR(-ENOENT);
res = nilfs_inode_by_name(d_inode(child), &dotdot_name, &ino);
if (res)
return ERR_PTR(res);
root = NILFS_I(d_inode(child))->i_root;

View File

@ -233,7 +233,7 @@ static inline __u32 nilfs_mask_flags(umode_t mode, __u32 flags)
/* dir.c */
extern int nilfs_add_link(struct dentry *, struct inode *);
extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino);
extern int nilfs_make_empty(struct inode *, struct inode *);
extern struct nilfs_dir_entry *
nilfs_find_entry(struct inode *, const struct qstr *, struct page **);

View File

@ -176,9 +176,10 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
if (sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT)) {
if (atomic_read(&sess->refcnt) == 0 &&
(sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT))) {
xa_erase(&conn->sessions, sess->id);
hash_del(&sess->hlist);
ksmbd_session_destroy(sess);
@ -268,8 +269,6 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
down_read(&sessions_table_lock);
sess = __session_lookup(id);
if (sess)
sess->last_active = jiffies;
up_read(&sessions_table_lock);
return sess;
@ -288,6 +287,22 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
return sess;
}
void ksmbd_user_session_get(struct ksmbd_session *sess)
{
atomic_inc(&sess->refcnt);
}
void ksmbd_user_session_put(struct ksmbd_session *sess)
{
if (!sess)
return;
if (atomic_read(&sess->refcnt) <= 0)
WARN_ON(1);
else
atomic_dec(&sess->refcnt);
}
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
u64 sess_id)
{
@ -390,6 +405,7 @@ static struct ksmbd_session *__session_create(int protocol)
xa_init(&sess->rpc_handle_list);
sess->sequence_number = 1;
rwlock_init(&sess->tree_conns_lock);
atomic_set(&sess->refcnt, 1);
ret = __init_smb2_session(sess);
if (ret)

View File

@ -61,6 +61,8 @@ struct ksmbd_session {
struct ksmbd_file_table file_table;
unsigned long last_active;
rwlock_t tree_conns_lock;
atomic_t refcnt;
};
static inline int test_session_flag(struct ksmbd_session *sess, int bit)
@ -104,4 +106,6 @@ void ksmbd_release_tree_conn_id(struct ksmbd_session *sess, int id);
int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name);
void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id);
int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id);
void ksmbd_user_session_get(struct ksmbd_session *sess);
void ksmbd_user_session_put(struct ksmbd_session *sess);
#endif /* __USER_SESSION_MANAGEMENT_H__ */

View File

@ -238,6 +238,8 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
} while (is_chained == true);
send:
if (work->sess)
ksmbd_user_session_put(work->sess);
if (work->tcon)
ksmbd_tree_connect_put(work->tcon);
smb3_preauth_hash_rsp(work);

View File

@ -605,8 +605,10 @@ int smb2_check_user_session(struct ksmbd_work *work)
/* Check for validity of user session */
work->sess = ksmbd_session_lookup_all(conn, sess_id);
if (work->sess)
if (work->sess) {
ksmbd_user_session_get(work->sess);
return 1;
}
ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id);
return -ENOENT;
}
@ -1743,6 +1745,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
}
conn->binding = true;
ksmbd_user_session_get(sess);
} else if ((conn->dialect < SMB30_PROT_ID ||
server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
@ -1769,6 +1772,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
}
conn->binding = false;
ksmbd_user_session_get(sess);
}
work->sess = sess;
@ -2229,7 +2233,9 @@ int smb2_session_logoff(struct ksmbd_work *work)
}
ksmbd_destroy_file_table(&sess->file_table);
down_write(&conn->session_lock);
sess->state = SMB2_SESSION_EXPIRED;
up_write(&conn->session_lock);
ksmbd_free_user(sess->user);
sess->user = NULL;

View File

@ -1565,12 +1565,23 @@ out_release:
return error;
}
/* Enforce that there is at most one namespace bit per attr. */
inline bool xfs_attr_check_namespace(unsigned int attr_flags)
{
return hweight32(attr_flags & XFS_ATTR_NSP_ONDISK_MASK) < 2;
}
/* Returns true if the attribute entry name is valid. */
bool
xfs_attr_namecheck(
unsigned int attr_flags,
const void *name,
size_t length)
{
/* Only one namespace bit allowed. */
if (!xfs_attr_check_namespace(attr_flags))
return false;
/*
* MAXNAMELEN includes the trailing null, but (name/length) leave it
* out, so use >= for the length check.

View File

@ -547,7 +547,9 @@ int xfs_attr_get(struct xfs_da_args *args);
int xfs_attr_set(struct xfs_da_args *args);
int xfs_attr_set_iter(struct xfs_attr_intent *attr);
int xfs_attr_remove_iter(struct xfs_attr_intent *attr);
bool xfs_attr_namecheck(const void *name, size_t length);
bool xfs_attr_check_namespace(unsigned int attr_flags);
bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
size_t length);
int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
unsigned int *total);

View File

@ -984,6 +984,10 @@ xfs_attr_shortform_to_leaf(
nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
if (!xfs_attr_check_namespace(sfe->flags)) {
error = -EFSCORRUPTED;
goto out;
}
error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
ASSERT(error == -ENOATTR);
error = xfs_attr3_leaf_add(bp, &nargs);
@ -1105,7 +1109,7 @@ xfs_attr_shortform_verify(
* one namespace flag per xattr, so we can just count the
* bits (i.e. hweight) here.
*/
if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
if (!xfs_attr_check_namespace(sfep->flags))
return __this_address;
sfep = next_sfep;

View File

@ -619,7 +619,6 @@ xfs_attr_rmtval_set_blk(
if (error)
return error;
ASSERT(nmap == 1);
ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
(map->br_startblock != HOLESTARTBLOCK));

View File

@ -1549,6 +1549,7 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
ASSERT(da_new <= da_old);
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@ -1578,6 +1579,7 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
ASSERT(da_new <= da_old);
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@ -1611,6 +1613,7 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
ASSERT(da_new <= da_old);
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@ -1643,6 +1646,7 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
}
ASSERT(da_new <= da_old);
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@ -1680,6 +1684,7 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
ASSERT(da_new <= da_old);
break;
case BMAP_LEFT_FILLING:
@ -1767,6 +1772,7 @@ xfs_bmap_add_extent_delay_real(
xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
xfs_iext_next(ifp, &bma->icur);
xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
ASSERT(da_new <= da_old);
break;
case BMAP_RIGHT_FILLING:
@ -1814,6 +1820,7 @@ xfs_bmap_add_extent_delay_real(
PREV.br_blockcount = temp;
xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
xfs_iext_next(ifp, &bma->icur);
ASSERT(da_new <= da_old);
break;
case 0:
@ -1934,11 +1941,9 @@ xfs_bmap_add_extent_delay_real(
}
/* adjust for changes in reserved delayed indirect blocks */
if (da_new != da_old) {
ASSERT(state == 0 || da_new < da_old);
if (da_new != da_old)
error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
false);
}
true);
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
done:
@ -3969,20 +3974,32 @@ xfs_bmapi_reserve_delalloc(
xfs_extlen_t alen;
xfs_extlen_t indlen;
int error;
xfs_fileoff_t aoff = off;
xfs_fileoff_t aoff;
bool use_cowextszhint =
whichfork == XFS_COW_FORK && !prealloc;
retry:
/*
* Cap the alloc length. Keep track of prealloc so we know whether to
* tag the inode before we return.
*/
aoff = off;
alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
if (!eof)
alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
if (prealloc && alen >= len)
prealloc = alen - len;
/* Figure out the extent size, adjust alen */
if (whichfork == XFS_COW_FORK) {
/*
* If we're targetting the COW fork but aren't creating a speculative
* posteof preallocation, try to expand the reservation to align with
* the COW extent size hint if there's sufficient free space.
*
* Unlike the data fork, the CoW cancellation functions will free all
* the reservations at inactivation, so we don't require that every
* delalloc reservation have a dirty pagecache.
*/
if (use_cowextszhint) {
struct xfs_bmbt_irec prev;
xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
@ -4001,7 +4018,7 @@ xfs_bmapi_reserve_delalloc(
*/
error = xfs_quota_reserve_blkres(ip, alen);
if (error)
return error;
goto out;
/*
* Split changing sb for alen and indlen since they could be coming
@ -4046,6 +4063,17 @@ out_unreserve_blocks:
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_quota_unreserve_blkres(ip, alen);
out:
if (error == -ENOSPC || error == -EDQUOT) {
trace_xfs_delalloc_enospc(ip, off, len);
if (prealloc || use_cowextszhint) {
/* retry without any preallocation */
use_cowextszhint = false;
prealloc = 0;
goto retry;
}
}
return error;
}
@ -4128,8 +4156,10 @@ xfs_bmapi_allocate(
} else {
error = xfs_bmap_alloc_userdata(bma);
}
if (error || bma->blkno == NULLFSBLOCK)
if (error)
return error;
if (bma->blkno == NULLFSBLOCK)
return -ENOSPC;
if (bma->flags & XFS_BMAPI_ZERO) {
error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
@ -4309,6 +4339,15 @@ xfs_bmapi_finish(
* extent state if necessary. Details behaviour is controlled by the flags
* parameter. Only allocates blocks from a single allocation group, to avoid
* locking problems.
*
* Returns 0 on success and places the extent mappings in mval. nmaps is used
* as an input/output parameter where the caller specifies the maximum number
* of mappings that may be returned and xfs_bmapi_write passes back the number
* of mappings (including existing mappings) it found.
*
* Returns a negative error code on failure, including -ENOSPC when it could not
* allocate any blocks and -ENOSR when it did allocate blocks to convert a
* delalloc range, but those blocks were before the passed in range.
*/
int
xfs_bmapi_write(
@ -4436,10 +4475,16 @@ xfs_bmapi_write(
ASSERT(len > 0);
ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma);
if (error)
if (error) {
/*
* If we already allocated space in a previous
* iteration return what we go so far when
* running out of space.
*/
if (error == -ENOSPC && bma.nallocs)
break;
goto error0;
if (bma.blkno == NULLFSBLOCK)
break;
}
/*
* If this is a CoW allocation, record the data in
@ -4477,7 +4522,6 @@ xfs_bmapi_write(
if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
eof = true;
}
*nmap = n;
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
whichfork);
@ -4488,7 +4532,22 @@ xfs_bmapi_write(
ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
xfs_bmapi_finish(&bma, whichfork, 0);
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
orig_nmap, *nmap);
orig_nmap, n);
/*
* When converting delayed allocations, xfs_bmapi_allocate ignores
* the passed in bno and always converts from the start of the found
* delalloc extent.
*
* To avoid a successful return with *nmap set to 0, return the magic
* -ENOSR error code for this particular case so that the caller can
* handle it.
*/
if (!n) {
ASSERT(bma.nallocs >= *nmap);
return -ENOSR;
}
*nmap = n;
return 0;
error0:
xfs_bmapi_finish(&bma, whichfork, error);
@ -4501,8 +4560,8 @@ error0:
* invocations to allocate the target offset if a large enough physical extent
* is not available.
*/
int
xfs_bmapi_convert_delalloc(
static int
xfs_bmapi_convert_one_delalloc(
struct xfs_inode *ip,
int whichfork,
xfs_off_t offset,
@ -4559,7 +4618,8 @@ xfs_bmapi_convert_delalloc(
if (!isnullstartblock(bma.got.br_startblock)) {
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
xfs_iomap_inode_sequence(ip, flags));
*seq = READ_ONCE(ifp->if_seq);
if (seq)
*seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel;
}
@ -4595,9 +4655,6 @@ xfs_bmapi_convert_delalloc(
if (error)
goto out_finish;
error = -ENOSPC;
if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
goto out_finish;
error = -EFSCORRUPTED;
if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
goto out_finish;
@ -4608,7 +4665,8 @@ xfs_bmapi_convert_delalloc(
ASSERT(!isnullstartblock(bma.got.br_startblock));
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
xfs_iomap_inode_sequence(ip, flags));
*seq = READ_ONCE(ifp->if_seq);
if (seq)
*seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK)
xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
@ -4631,6 +4689,36 @@ out_trans_cancel:
return error;
}
/*
* Pass in a dellalloc extent and convert it to real extents, return the real
* extent that maps offset_fsb in iomap.
*/
int
xfs_bmapi_convert_delalloc(
struct xfs_inode *ip,
int whichfork,
loff_t offset,
struct iomap *iomap,
unsigned int *seq)
{
int error;
/*
* Attempt to allocate whatever delalloc extent currently backs offset
* and put the result into iomap. Allocate in a loop because it may
* take several attempts to allocate real blocks for a contiguous
* delalloc extent if free space is sufficiently fragmented.
*/
do {
error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
iomap, seq);
if (error)
return error;
} while (iomap->offset + iomap->length <= offset);
return 0;
}
int
xfs_bmapi_remap(
struct xfs_trans *tp,

View File

@ -2158,8 +2158,8 @@ xfs_da_grow_inode_int(
struct xfs_inode *dp = args->dp;
int w = args->whichfork;
xfs_rfsblock_t nblks = dp->i_nblocks;
struct xfs_bmbt_irec map, *mapp;
int nmap, error, got, i, mapi;
struct xfs_bmbt_irec map, *mapp = &map;
int nmap, error, got, i, mapi = 1;
/*
* Find a spot in the file space to put the new block.
@ -2175,14 +2175,7 @@ xfs_da_grow_inode_int(
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->total, &map, &nmap);
if (error)
return error;
ASSERT(nmap <= 1);
if (nmap == 1) {
mapp = &map;
mapi = 1;
} else if (nmap == 0 && count > 1) {
if (error == -ENOSPC && count > 1) {
xfs_fileoff_t b;
int c;
@ -2199,16 +2192,13 @@ xfs_da_grow_inode_int(
args->total, &mapp[mapi], &nmap);
if (error)
goto out_free_map;
if (nmap < 1)
break;
mapi += nmap;
b = mapp[mapi - 1].br_startoff +
mapp[mapi - 1].br_blockcount;
}
} else {
mapi = 0;
mapp = NULL;
}
if (error)
goto out_free_map;
/*
* Count the blocks we got, make sure it matches the total.

View File

@ -703,8 +703,13 @@ struct xfs_attr3_leafblock {
#define XFS_ATTR_ROOT (1u << XFS_ATTR_ROOT_BIT)
#define XFS_ATTR_SECURE (1u << XFS_ATTR_SECURE_BIT)
#define XFS_ATTR_INCOMPLETE (1u << XFS_ATTR_INCOMPLETE_BIT)
#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
#define XFS_ATTR_ONDISK_MASK (XFS_ATTR_NSP_ONDISK_MASK | \
XFS_ATTR_LOCAL | \
XFS_ATTR_INCOMPLETE)
/*
* Alignment for namelist and valuelist entries (since they are mixed
* there can be only one alignment value)

View File

@ -371,10 +371,13 @@ xfs_dinode_verify_fork(
/*
* A directory small enough to fit in the inode must be stored
* in local format. The directory sf <-> extents conversion
* code updates the directory size accordingly.
* code updates the directory size accordingly. Directories
* being truncated have zero size and are not subject to this
* check.
*/
if (S_ISDIR(mode)) {
if (be64_to_cpu(dip->di_size) <= fork_size &&
if (dip->di_size &&
be64_to_cpu(dip->di_size) <= fork_size &&
fork_format != XFS_DINODE_FMT_LOCAL)
return __this_address;
}
@ -512,9 +515,19 @@ xfs_dinode_verify(
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
return __this_address;
/* No zero-length symlinks/dirs. */
if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
return __this_address;
/*
* No zero-length symlinks/dirs unless they're unlinked and hence being
* inactivated.
*/
if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) {
if (dip->di_version > 1) {
if (dip->di_nlink)
return __this_address;
} else {
if (dip->di_onlink)
return __this_address;
}
}
fa = xfs_dinode_verify_nrext64(mp, dip);
if (fa)

View File

@ -1031,11 +1031,12 @@ xfs_log_sb(
* and hence we don't need have to update it here.
*/
if (xfs_has_lazysbcount(mp)) {
mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount);
mp->m_sb.sb_ifree = min_t(uint64_t,
percpu_counter_sum(&mp->m_ifree),
percpu_counter_sum_positive(&mp->m_ifree),
mp->m_sb.sb_icount);
mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
mp->m_sb.sb_fdblocks =
percpu_counter_sum_positive(&mp->m_fdblocks);
}
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);

View File

@ -182,32 +182,23 @@ xchk_xattr_listent(
return;
}
if (flags & ~XFS_ATTR_ONDISK_MASK) {
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
goto fail_xref;
}
if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */
xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return;
}
/* Only one namespace bit allowed. */
if (hweight32(flags & XFS_ATTR_NSP_ONDISK_MASK) > 1) {
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
goto fail_xref;
}
/* Does this name make sense? */
if (!xfs_attr_namecheck(name, namelen)) {
if (!xfs_attr_namecheck(flags, name, namelen)) {
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
goto fail_xref;
}
/*
* Local xattr values are stored in the attr leaf block, so we don't
* need to retrieve the value from a remote block to detect corruption
* problems.
*/
if (flags & XFS_ATTR_LOCAL)
goto fail_xref;
/*
* Try to allocate enough memory to extrat the attr value. If that
* doesn't work, we overload the seen_enough variable to convey
@ -223,6 +214,11 @@ xchk_xattr_listent(
args.value = ab->value;
/*
* Get the attr value to ensure that lookup can find this attribute
* through the dabtree indexing and that remote value retrieval also
* works correctly.
*/
error = xfs_attr_get_ilocked(&args);
/* ENODATA means the hash lookup failed and the attr is bad */
if (error == -ENODATA)
@ -463,7 +459,6 @@ xchk_xattr_rec(
xfs_dahash_t hash;
int nameidx;
int hdrsize;
unsigned int badflags;
int error;
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
@ -493,10 +488,15 @@ xchk_xattr_rec(
/* Retrieve the entry and check it. */
hash = be32_to_cpu(ent->hashval);
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE);
if ((ent->flags & badflags) != 0)
if (ent->flags & ~XFS_ATTR_ONDISK_MASK) {
xchk_da_set_corrupt(ds, level);
return 0;
}
if (!xfs_attr_check_namespace(ent->flags)) {
xchk_da_set_corrupt(ds, level);
return 0;
}
if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx);
@ -561,6 +561,15 @@ xchk_xattr_check_sf(
break;
}
/*
* Shortform entries do not set LOCAL or INCOMPLETE, so the
* only valid flag bits here are for namespaces.
*/
if (sfe->flags & ~XFS_ATTR_NSP_ONDISK_MASK) {
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
break;
}
if (!xchk_xattr_set_map(sc, ab->usedmap,
(char *)sfe - (char *)sf,
sizeof(struct xfs_attr_sf_entry))) {

View File

@ -735,7 +735,7 @@ xchk_iget(
{
ASSERT(sc->tp != NULL);
return xfs_iget(sc->mp, sc->tp, inum, XFS_IGET_UNTRUSTED, 0, ipp);
return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
}
/*
@ -786,8 +786,8 @@ again:
if (error)
return error;
error = xfs_iget(mp, tp, inum,
XFS_IGET_NORETRY | XFS_IGET_UNTRUSTED, 0, ipp);
error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
ipp);
if (error == -EAGAIN) {
/*
* The inode may be in core but temporarily unavailable and may
@ -994,12 +994,6 @@ xchk_irele(
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
spin_unlock(&VFS_I(ip)->i_lock);
} else if (atomic_read(&VFS_I(ip)->i_count) == 1) {
/*
* If this is the last reference to the inode and the caller
* permits it, set DONTCACHE to avoid thrashing.
*/
d_mark_dontcache(VFS_I(ip));
}
xfs_irele(ip);

View File

@ -17,6 +17,13 @@ struct xfs_scrub;
#define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
__GFP_RETRY_MAYFAIL))
/*
* For opening files by handle for fsck operations, we don't trust the inumber
* or the allocation state; therefore, perform an untrusted lookup. We don't
* want these inodes to pollute the cache, so mark them for immediate removal.
*/
#define XCHK_IGET_FLAGS (XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE)
/* Type info and names for the scrub types. */
enum xchk_type {
ST_NONE = 1, /* disabled */

View File

@ -233,45 +233,6 @@ xfs_imap_valid(
return true;
}
/*
* Pass in a dellalloc extent and convert it to real extents, return the real
* extent that maps offset_fsb in wpc->iomap.
*
* The current page is held locked so nothing could have removed the block
* backing offset_fsb, although it could have moved from the COW to the data
* fork by another thread.
*/
static int
xfs_convert_blocks(
struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
int whichfork,
loff_t offset)
{
int error;
unsigned *seq;
if (whichfork == XFS_COW_FORK)
seq = &XFS_WPC(wpc)->cow_seq;
else
seq = &XFS_WPC(wpc)->data_seq;
/*
* Attempt to allocate whatever delalloc extent currently backs offset
* and put the result into wpc->iomap. Allocate in a loop because it
* may take several attempts to allocate real blocks for a contiguous
* delalloc extent if free space is sufficiently fragmented.
*/
do {
error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
&wpc->iomap, seq);
if (error)
return error;
} while (wpc->iomap.offset + wpc->iomap.length <= offset);
return 0;
}
static int
xfs_map_blocks(
struct iomap_writepage_ctx *wpc,
@ -289,6 +250,7 @@ xfs_map_blocks(
struct xfs_iext_cursor icur;
int retries = 0;
int error = 0;
unsigned int *seq;
if (xfs_is_shutdown(mp))
return -EIO;
@ -386,7 +348,19 @@ retry:
trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0;
allocate_blocks:
error = xfs_convert_blocks(wpc, ip, whichfork, offset);
/*
* Convert a dellalloc extent to a real one. The current page is held
* locked so nothing could have removed the block backing offset_fsb,
* although it could have moved from the COW to the data fork by another
* thread.
*/
if (whichfork == XFS_COW_FORK)
seq = &XFS_WPC(wpc)->cow_seq;
else
seq = &XFS_WPC(wpc)->data_seq;
error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
&wpc->iomap, seq);
if (error) {
/*
* If we failed to find the extent in the COW fork we might have

View File

@ -510,6 +510,9 @@ xfs_attri_validate(
unsigned int op = attrp->alfi_op_flags &
XFS_ATTRI_OP_FLAGS_TYPE_MASK;
if (!xfs_sb_version_haslogxattrs(&mp->m_sb))
return false;
if (attrp->__pad != 0)
return false;
@ -519,6 +522,10 @@ xfs_attri_validate(
if (attrp->alfi_attr_filter & ~XFS_ATTRI_FILTER_MASK)
return false;
if (!xfs_attr_check_namespace(attrp->alfi_attr_filter &
XFS_ATTR_NSP_ONDISK_MASK))
return false;
/* alfi_op_flags should be either a set or remove */
switch (op) {
case XFS_ATTRI_OP_FLAGS_SET:
@ -569,7 +576,8 @@ xfs_attri_item_recover(
*/
attrp = &attrip->attri_format;
if (!xfs_attri_validate(mp, attrp) ||
!xfs_attr_namecheck(nv->name.i_addr, nv->name.i_len))
!xfs_attr_namecheck(attrp->alfi_attr_filter, nv->name.i_addr,
nv->name.i_len))
return -EFSCORRUPTED;
error = xlog_recover_iget(mp, attrp->alfi_ino, &ip);
@ -602,8 +610,6 @@ xfs_attri_item_recover(
args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT |
XFS_DA_OP_LOGGED;
ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb));
switch (attr->xattri_op_flags) {
case XFS_ATTRI_OP_FLAGS_SET:
case XFS_ATTRI_OP_FLAGS_REPLACE:
@ -717,48 +723,112 @@ xlog_recover_attri_commit_pass2(
const void *attr_value = NULL;
const void *attr_name;
size_t len;
attri_formatp = item->ri_buf[0].i_addr;
attr_name = item->ri_buf[1].i_addr;
unsigned int op, i = 0;
/* Validate xfs_attri_log_format before the large memory allocation */
len = sizeof(struct xfs_attri_log_format);
if (item->ri_buf[0].i_len != len) {
if (item->ri_buf[i].i_len != len) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
return -EFSCORRUPTED;
}
attri_formatp = item->ri_buf[i].i_addr;
if (!xfs_attri_validate(mp, attri_formatp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
attri_formatp, len);
return -EFSCORRUPTED;
}
/* Check the number of log iovecs makes sense for the op code. */
op = attri_formatp->alfi_op_flags & XFS_ATTRI_OP_FLAGS_TYPE_MASK;
switch (op) {
case XFS_ATTRI_OP_FLAGS_SET:
case XFS_ATTRI_OP_FLAGS_REPLACE:
/* Log item, attr name, attr value */
if (item->ri_total != 3) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
break;
case XFS_ATTRI_OP_FLAGS_REMOVE:
/* Log item, attr name */
if (item->ri_total != 2) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
break;
default:
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
i++;
/* Validate the attr name */
if (item->ri_buf[1].i_len !=
if (item->ri_buf[i].i_len !=
xlog_calc_iovec_len(attri_formatp->alfi_name_len)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
attri_formatp, len);
return -EFSCORRUPTED;
}
if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) {
attr_name = item->ri_buf[i].i_addr;
if (!xfs_attr_namecheck(attri_formatp->alfi_attr_filter, attr_name,
attri_formatp->alfi_name_len)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
item->ri_buf[1].i_addr, item->ri_buf[1].i_len);
attri_formatp, len);
return -EFSCORRUPTED;
}
i++;
/* Validate the attr value, if present */
if (attri_formatp->alfi_value_len != 0) {
if (item->ri_buf[2].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) {
if (item->ri_buf[i].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
item->ri_buf[0].i_addr,
item->ri_buf[0].i_len);
return -EFSCORRUPTED;
}
attr_value = item->ri_buf[2].i_addr;
attr_value = item->ri_buf[i].i_addr;
i++;
}
/*
* Make sure we got the correct number of buffers for the operation
* that we just loaded.
*/
if (i != item->ri_total) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
switch (op) {
case XFS_ATTRI_OP_FLAGS_REMOVE:
/* Regular remove operations operate only on names. */
if (attr_value != NULL || attri_formatp->alfi_value_len != 0) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
fallthrough;
case XFS_ATTRI_OP_FLAGS_SET:
case XFS_ATTRI_OP_FLAGS_REPLACE:
/*
* Regular xattr set/remove/replace operations require a name
* and do not take a newname. Values are optional for set and
* replace.
*/
if (attr_name == NULL || attri_formatp->alfi_name_len == 0) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
attri_formatp, len);
return -EFSCORRUPTED;
}
break;
}
/*

View File

@ -82,7 +82,8 @@ xfs_attr_shortform_list(
(dp->i_af.if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (XFS_IS_CORRUPT(context->dp->i_mount,
!xfs_attr_namecheck(sfe->nameval,
!xfs_attr_namecheck(sfe->flags,
sfe->nameval,
sfe->namelen)))
return -EFSCORRUPTED;
context->put_listent(context,
@ -120,7 +121,8 @@ xfs_attr_shortform_list(
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (unlikely(
((char *)sfe < (char *)sf) ||
((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)))) {
((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)) ||
!xfs_attr_check_namespace(sfe->flags))) {
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe,
@ -174,7 +176,7 @@ xfs_attr_shortform_list(
cursor->offset = 0;
}
if (XFS_IS_CORRUPT(context->dp->i_mount,
!xfs_attr_namecheck(sbp->name,
!xfs_attr_namecheck(sbp->flags, sbp->name,
sbp->namelen))) {
error = -EFSCORRUPTED;
goto out;
@ -465,7 +467,8 @@ xfs_attr3_leaf_list_int(
}
if (XFS_IS_CORRUPT(context->dp->i_mount,
!xfs_attr_namecheck(name, namelen)))
!xfs_attr_namecheck(entry->flags, name,
namelen)))
return -EFSCORRUPTED;
context->put_listent(context, entry->flags,
name, namelen, valuelen);

View File

@ -636,13 +636,11 @@ out_unlock:
/*
* Test whether it is appropriate to check an inode for and free post EOF
* blocks. The 'force' parameter determines whether we should also consider
* regular files that are marked preallocated or append-only.
* blocks.
*/
bool
xfs_can_free_eofblocks(
struct xfs_inode *ip,
bool force)
struct xfs_inode *ip)
{
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
@ -676,11 +674,11 @@ xfs_can_free_eofblocks(
return false;
/*
* Do not free real preallocated or append-only files unless the file
* has delalloc blocks and we are forced to remove them.
* Only free real extents for inodes with persistent preallocations or
* the append-only flag.
*/
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
if (!force || ip->i_delayed_blks == 0)
if (ip->i_delayed_blks == 0)
return false;
/*
@ -734,6 +732,22 @@ xfs_free_eofblocks(
/* Wait on dio to ensure i_size has settled. */
inode_dio_wait(VFS_I(ip));
/*
* For preallocated files only free delayed allocations.
*
* Note that this means we also leave speculative preallocations in
* place for preallocated files.
*/
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
if (ip->i_delayed_blks) {
xfs_bmap_punch_delalloc_range(ip,
round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
LLONG_MAX);
}
xfs_inode_clear_eofblocks_tag(ip);
return 0;
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
ASSERT(xfs_is_shutdown(mp));
@ -868,33 +882,32 @@ xfs_alloc_file_space(
if (error)
goto error;
/*
* If the allocator cannot find a single free extent large
* enough to cover the start block of the requested range,
* xfs_bmapi_write will return -ENOSR.
*
* In that case we simply need to keep looping with the same
* startoffset_fsb so that one of the following allocations
* will eventually reach the requested range.
*/
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
&nimaps);
if (error)
goto error;
if (error) {
if (error != -ENOSR)
goto error;
error = 0;
} else {
startoffset_fsb += imapp->br_blockcount;
allocatesize_fsb -= imapp->br_blockcount;
}
ip->i_diflags |= XFS_DIFLAG_PREALLOC;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
break;
/*
* If the allocator cannot find a single free extent large
* enough to cover the start block of the requested range,
* xfs_bmapi_write will return 0 but leave *nimaps set to 0.
*
* In that case we simply need to keep looping with the same
* startoffset_fsb so that one of the following allocations
* will eventually reach the requested range.
*/
if (nimaps) {
startoffset_fsb += imapp->br_blockcount;
allocatesize_fsb -= imapp->br_blockcount;
}
}
return error;
@ -1049,7 +1062,7 @@ xfs_prepare_shift(
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
*/
if (xfs_can_free_eofblocks(ip, true)) {
if (xfs_can_free_eofblocks(ip)) {
error = xfs_free_eofblocks(ip);
if (error)
return error;

View File

@ -63,7 +63,7 @@ int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
/* EOF block manipulation functions */
bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
bool xfs_can_free_eofblocks(struct xfs_inode *ip);
int xfs_free_eofblocks(struct xfs_inode *ip);
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,

View File

@ -333,7 +333,6 @@ xfs_dquot_disk_alloc(
goto err_cancel;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
ASSERT(nmaps == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));

View File

@ -1149,7 +1149,7 @@ xfs_inode_free_eofblocks(
}
*lockflags |= XFS_IOLOCK_EXCL;
if (xfs_can_free_eofblocks(ip, false))
if (xfs_can_free_eofblocks(ip))
return xfs_free_eofblocks(ip);
/* inode could be preallocated or append-only */

View File

@ -1469,7 +1469,7 @@ xfs_release(
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
return 0;
if (xfs_can_free_eofblocks(ip, false)) {
if (xfs_can_free_eofblocks(ip)) {
/*
* Check if the inode is being opened, written and closed
* frequently and we have delayed allocation blocks outstanding
@ -1685,15 +1685,13 @@ xfs_inode_needs_inactive(
/*
* This file isn't being freed, so check if there are post-eof blocks
* to free. @force is true because we are evicting an inode from the
* cache. Post-eof blocks must be freed, lest we end up with broken
* free space accounting.
* to free.
*
* Note: don't bother with iolock here since lockdep complains about
* acquiring it in reclaim context. We have the only reference to the
* inode at this point anyways.
*/
return xfs_can_free_eofblocks(ip, true);
return xfs_can_free_eofblocks(ip);
}
/*
@ -1741,15 +1739,11 @@ xfs_inactive(
if (VFS_I(ip)->i_nlink != 0) {
/*
* force is true because we are evicting an inode from the
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
*
* Note: don't bother with iolock here since lockdep complains
* about acquiring it in reclaim context. We have the only
* reference to the inode at this point anyways.
*/
if (xfs_can_free_eofblocks(ip, true))
if (xfs_can_free_eofblocks(ip))
error = xfs_free_eofblocks(ip);
goto out;
@ -2329,11 +2323,26 @@ xfs_ifree_cluster(
* This buffer may not have been correctly initialised as we
* didn't read it from disk. That's not important because we are
* only using to mark the buffer as stale in the log, and to
* attach stale cached inodes on it. That means it will never be
* dispatched for IO. If it is, we want to know about it, and we
* want it to fail. We can acheive this by adding a write
* verifier to the buffer.
* attach stale cached inodes on it.
*
* For the inode that triggered the cluster freeing, this
* attachment may occur in xfs_inode_item_precommit() after we
* have marked this buffer stale. If this buffer was not in
* memory before xfs_ifree_cluster() started, it will not be
* marked XBF_DONE and this will cause problems later in
* xfs_inode_item_precommit() when we trip over a (stale, !done)
* buffer to attached to the transaction.
*
* Hence we have to mark the buffer as XFS_DONE here. This is
* safe because we are also marking the buffer as XBF_STALE and
* XFS_BLI_STALE. That means it will never be dispatched for
* IO and it won't be unlocked until the cluster freeing has
* been committed to the journal and the buffer unpinned. If it
* is written, we want to know about it, and we want it to
* fail. We can acheive this by adding a write verifier to the
* buffer.
*/
bp->b_flags |= XBF_DONE;
bp->b_ops = &xfs_inode_buf_ops;
/*

View File

@ -317,14 +317,6 @@ xfs_iomap_write_direct(
if (error)
goto out_unlock;
/*
* Copy any maps to caller's array and return any error.
*/
if (nimaps == 0) {
error = -ENOSPC;
goto out_unlock;
}
if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
error = xfs_alert_fsblock_zero(ip, imap);
@ -1013,6 +1005,24 @@ xfs_buffered_write_iomap_begin(
goto out_unlock;
}
/*
* For zeroing, trim a delalloc extent that extends beyond the EOF
* block. If it starts beyond the EOF block, convert it to an
* unwritten extent.
*/
if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb &&
isnullstartblock(imap.br_startblock)) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
if (offset_fsb >= eof_fsb)
goto convert_delay;
if (end_fsb > eof_fsb) {
end_fsb = eof_fsb;
xfs_trim_extent(&imap, offset_fsb,
end_fsb - offset_fsb);
}
}
/*
* Search the COW fork extent list even if we did not find a data fork
* extent. This serves two purposes: first this implements the
@ -1117,47 +1127,48 @@ xfs_buffered_write_iomap_begin(
}
}
retry:
error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
end_fsb - offset_fsb, prealloc_blocks,
allocfork == XFS_DATA_FORK ? &imap : &cmap,
allocfork == XFS_DATA_FORK ? &icur : &ccur,
allocfork == XFS_DATA_FORK ? eof : cow_eof);
switch (error) {
case 0:
break;
case -ENOSPC:
case -EDQUOT:
/* retry without any preallocation */
trace_xfs_delalloc_enospc(ip, offset, count);
if (prealloc_blocks) {
prealloc_blocks = 0;
goto retry;
}
fallthrough;
default:
goto out_unlock;
}
if (allocfork == XFS_COW_FORK) {
error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
end_fsb - offset_fsb, prealloc_blocks, &cmap,
&ccur, cow_eof);
if (error)
goto out_unlock;
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
goto found_cow;
}
error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
eof);
if (error)
goto out_unlock;
/*
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
found_imap:
seq = xfs_iomap_inode_sequence(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
convert_delay:
xfs_iunlock(ip, lockmode);
truncate_pagecache(inode, offset);
error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset,
iomap, NULL);
if (error)
return error;
trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap);
return 0;
found_cow:
seq = xfs_iomap_inode_sequence(ip, 0);
if (imap.br_startoff <= offset_fsb) {
@ -1165,17 +1176,17 @@ found_cow:
if (error)
goto out_unlock;
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
IOMAP_F_SHARED, seq);
}
xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
return error;
}

View File

@ -429,13 +429,6 @@ xfs_reflink_fill_cow_hole(
if (error)
return error;
/*
* Allocation succeeded but the requested range was not even partially
* satisfied? Bail out!
*/
if (nimaps == 0)
return -ENOSPC;
convert:
return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
@ -498,13 +491,6 @@ xfs_reflink_fill_delalloc(
error = xfs_trans_commit(tp);
if (error)
return error;
/*
* Allocation succeeded but the requested range was not even
* partially satisfied? Bail out!
*/
if (nimaps == 0)
return -ENOSPC;
} while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff);
return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
@ -730,12 +716,6 @@ xfs_reflink_end_cow_extent(
int nmaps;
int error;
/* No COW extents? That's easy! */
if (ifp->if_bytes == 0) {
*offset_fsb = end_fsb;
return 0;
}
resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
XFS_TRANS_RESERVE, &tp);

View File

@ -840,8 +840,6 @@ xfs_growfs_rt_alloc(
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_METADATA, 0, &map, &nmap);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
goto out_trans_cancel;
/*

View File

@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
int devad, int regnum, u16 value)
{ return -EINVAL; }
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif

View File

@ -58,10 +58,12 @@ struct its_vpe {
bool enabled;
bool group;
} sgi_config[16];
atomic_t vmapp_count;
};
};
/* Track the VPE being mapped */
atomic_t vmapp_count;
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.

View File

@ -173,7 +173,13 @@
/* use ioctl encoding for uring command */
#define UBLK_F_CMD_IOCTL_ENCODE (1UL << 6)
/* Copy between request and user buffer by pread()/pwrite() */
/*
* Copy between request and user buffer by pread()/pwrite()
*
* Not available for UBLK_F_UNPRIVILEGED_DEV, otherwise userspace may
* deceive us by not filling request buffer, then kernel uninitialized
* data may be leaked.
*/
#define UBLK_F_USER_COPY (1UL << 7)
/*

View File

@ -262,7 +262,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
/*
* SQPOLL must use the actual sqring head, as using the cached_sq_head
* is race prone if the SQPOLL thread has grabbed entries but not yet
* committed them to the ring. For !SQPOLL, this doesn't matter, but
* since this helper is just used for SQPOLL sqring waits (or POLLOUT),
* just read the actual sqring head unconditionally.
*/
return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)

View File

@ -299,6 +299,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
goto out;
}
if (!timespec64_valid_strict(ts))
return -EINVAL;
if (cd.clk->ops.clock_settime)
err = cd.clk->ops.clock_settime(cd.clk, ts);
else

View File

@ -2228,6 +2228,8 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
* If @mas->index cannot be found within the containing
* node, we traverse to the last entry in the node.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
@ -3643,7 +3645,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
return true;
}
static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
@ -3652,11 +3654,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
return;
mas_wr_walk_traverse(wr_mas);
}
return true;
}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
@ -3892,8 +3892,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
/* Copy r_mas into b_node. */
if (r_mas.offset <= r_wr_mas.node_end)
/* Copy r_mas into b_node if there is anything to copy. */
if (r_mas.max > r_mas.last)
mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
&b_node, b_node.b_end + 1);
else

Some files were not shown because too many files have changed in this diff Show More