Merge linux 6.6.37

Conflicts:
	drivers/irqchip/irq-loongson-eiointc.c
	drivers/irqchip/irq-loongson-liointc.c
This commit is contained in:
Jianping Liu 2024-07-09 16:00:26 +08:00
commit 11251a6ce3
193 changed files with 1472 additions and 908 deletions

View File

@ -128,7 +128,7 @@ executed to make module versioning work.
modules_install
Install the external module(s). The default location is
/lib/modules/<kernel_release>/extra/, but a prefix may
/lib/modules/<kernel_release>/updates/, but a prefix may
be added with INSTALL_MOD_PATH (discussed in section 5).
clean
@ -417,7 +417,7 @@ directory:
And external modules are installed in:
/lib/modules/$(KERNELRELEASE)/extra/
/lib/modules/$(KERNELRELEASE)/updates/
5.1 INSTALL_MOD_PATH
--------------------
@ -438,10 +438,10 @@ And external modules are installed in:
-------------------
External modules are by default installed to a directory under
/lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
/lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
locate modules for a specific functionality in a separate
directory. For this purpose, use INSTALL_MOD_DIR to specify an
alternative name to "extra."::
alternative name to "updates."::
$ make INSTALL_MOD_DIR=gandalf -C $KDIR \
M=$PWD modules_install

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 36
SUBLEVEL = 37
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -123,6 +123,7 @@
pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
power-domains = <&power RK3066_PD_VIO>;
rockchip,grf = <&grf>;
#sound-dai-cells = <0>;
status = "disabled";
ports {

View File

@ -1982,28 +1982,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If building the body of the JITed code fails somehow,
* we fall back to the interpretation.
*/
if (build_body(&ctx) < 0) {
image_ptr = NULL;
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_imms;
}
if (build_body(&ctx) < 0)
goto out_free;
build_epilogue(&ctx);
/* 3.) Extra pass to validate JITed Code */
if (validate_code(&ctx)) {
image_ptr = NULL;
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_imms;
}
if (validate_code(&ctx))
goto out_free;
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
if (bpf_jit_enable > 1)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
bpf_jit_binary_lock_ro(header);
if (bpf_jit_binary_lock_ro(header))
goto out_free;
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
@ -2020,5 +2013,11 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
out_free:
image_ptr = NULL;
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_imms;
}

View File

@ -5,6 +5,8 @@
*/
/dts-v1/;
#include <dt-bindings/leds/common.h>
#include "rk3308.dtsi"
/ {
@ -24,17 +26,21 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&green_led_gio>, <&heartbeat_led_gpio>;
pinctrl-0 = <&green_led>, <&heartbeat_led>;
green-led {
color = <LED_COLOR_ID_GREEN>;
default-state = "on";
function = LED_FUNCTION_POWER;
gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
label = "rockpis:green:power";
linux,default-trigger = "default-on";
};
blue-led {
color = <LED_COLOR_ID_BLUE>;
default-state = "on";
function = LED_FUNCTION_HEARTBEAT;
gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
label = "rockpis:blue:user";
linux,default-trigger = "heartbeat";
@ -126,10 +132,12 @@
};
&emmc {
bus-width = <4>;
cap-mmc-highspeed;
mmc-hs200-1_8v;
cap-sd-highspeed;
no-sdio;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
vmmc-supply = <&vcc_io>;
status = "okay";
};
@ -152,11 +160,11 @@
pinctrl-0 = <&rtc_32k>;
leds {
green_led_gio: green-led-gpio {
green_led: green-led {
rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
heartbeat_led_gpio: heartbeat-led-gpio {
heartbeat_led: heartbeat-led {
rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};

View File

@ -186,8 +186,8 @@
rk805: pmic@18 {
compatible = "rockchip,rk805";
reg = <0x18>;
interrupt-parent = <&gpio2>;
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
clock-output-names = "xin32k", "rk805-clkout2";
gpio-controller;

View File

@ -790,6 +790,7 @@
dma-names = "tx";
pinctrl-names = "default";
pinctrl-0 = <&spdif_tx>;
#sound-dai-cells = <0>;
status = "disabled";
};
@ -801,6 +802,7 @@
clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
dmas = <&dmac_bus 6>, <&dmac_bus 7>;
dma-names = "tx", "rx";
#sound-dai-cells = <0>;
status = "disabled";
};
@ -814,6 +816,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&i2s_8ch_bus>;
#sound-dai-cells = <0>;
status = "disabled";
};

View File

@ -450,7 +450,7 @@ ap_i2c_audio: &i2c8 {
dlg,btn-cfg = <50>;
dlg,mic-det-thr = <500>;
dlg,jack-ins-deb = <20>;
dlg,jack-det-rate = "32ms_64ms";
dlg,jack-det-rate = "32_64";
dlg,jack-rem-deb = <1>;
dlg,a-d-btn-thr = <0xa>;

View File

@ -390,6 +390,7 @@
pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
spi-max-frequency = <1000000>;
system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;

View File

@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418

View File

@ -56,17 +56,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_set_return_value(current, regs, 0, ret);
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for arm64 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 9 bits.
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
* bits. The actual entropy will be further reduced by the compiler
* when applying stack alignment constraints: the AAPCS mandates a
* 16-byte aligned SP at function boundaries, which will remove the
* 4 low bits from any entropy chosen here.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints: the AAPCS mandates a
* 16-byte (i.e. 4-bit) aligned SP at function boundaries.
*
* The resulting 5 bits of entropy is seen in SP[8:4].
* The resulting 6 bits of entropy is seen in SP[9:4].
*/
choose_random_kstack_offset(get_random_u16() & 0x1FF);
choose_random_kstack_offset(get_random_u16());
}
static inline bool has_syscall_work(unsigned long flags)

View File

@ -6,6 +6,7 @@
#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>
#define __NR_set_thread_area (__NR_arch_specific_syscall + 0)

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/syscalls.h>
asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
u32 a2, u32 a3, u32 a4, u32 a5);

View File

@ -36,5 +36,6 @@
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYNC_FILE_RANGE2
#include <asm-generic/unistd.h>

View File

@ -14,6 +14,13 @@
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
SC_ARG64(offset), SC_ARG64(len))
{
return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
}
#define sys_fadvise64_64 sys_hexagon_fadvise64_64
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};

View File

@ -12,7 +12,6 @@
#include <linux/nodemask.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODES_PER_FLATMODE_NODE 4
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) (_ULCAST_(nid) << NODE_ADDRSPACE_SHIFT)

View File

@ -1206,16 +1206,19 @@ skip_init_ctx:
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
if (!prog->is_func || extra_pass) {
int err;
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_offset;
goto out_free;
}
err = bpf_jit_binary_lock_ro(header);
if (err) {
pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
err);
goto out_free;
}
bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
@ -1246,6 +1249,13 @@ out:
out_offset = -1;
return prog;
out_free:
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_offset;
}
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */

View File

@ -354,7 +354,7 @@
412 n32 utimensat_time64 sys_utimensat
413 n32 pselect6_time64 compat_sys_pselect6_time64
414 n32 ppoll_time64 compat_sys_ppoll_time64
416 n32 io_pgetevents_time64 sys_io_pgetevents
416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64
417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
418 n32 mq_timedsend_time64 sys_mq_timedsend
419 n32 mq_timedreceive_time64 sys_mq_timedreceive

View File

@ -403,7 +403,7 @@
412 o32 utimensat_time64 sys_utimensat sys_utimensat
413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
/* Set as read-only exec and flush instruction cache */
bpf_jit_binary_lock_ro(header);
if (bpf_jit_binary_lock_ro(header))
goto out_err;
flush_icache_range((unsigned long)header,
(unsigned long)&ctx.target[ctx.jit_index]);

View File

@ -14,6 +14,7 @@ config PARISC
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
select ARCH_SPLIT_ARG64 if !64BIT
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK

View File

@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
const char __user * pathname)
{
return sys_fanotify_mark(fanotify_fd, flags,
((__u64)mask1 << 32) | mask0,
dfd, pathname);
}

View File

@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
98 common recv sys_recv
98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
123 common recvfrom sys_recvfrom
123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at

View File

@ -167,7 +167,13 @@ skip_init_ctx:
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(jit_data->header);
if (bpf_jit_binary_lock_ro(jit_data->header)) {
bpf_jit_binary_free(jit_data->header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
goto out_offset;
}
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] += prologue_len;

View File

@ -230,8 +230,10 @@
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
179 64 pread64 sys_pread64
179 spu pread64 sys_pread64
180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
180 64 pwrite64 sys_pwrite64
180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@ -246,6 +248,7 @@
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
191 64 readahead sys_readahead
191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
@ -293,6 +296,7 @@
232 nospu set_tid_address sys_set_tid_address
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
236 common epoll_create sys_epoll_create
@ -502,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -36,9 +36,6 @@
EMIT(PPC_RAW_BRANCH(offset)); \
} while (0)
/* bl (unconditional 'branch' with link) */
#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) \
do { \
@ -147,12 +144,6 @@ struct codegen_context {
#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
#endif
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb(); /* smp write barrier */
flush_icache_range((unsigned long)start, (unsigned long)end);
}
static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
{
return ctx->seen & (1 << (31 - i));
@ -169,16 +160,17 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
}
void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
void bpf_jit_realloc_regs(struct codegen_context *ctx);
int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
int insn_idx, int jmp_off, int dst_reg);
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
struct codegen_context *ctx, int insn_idx,
int jmp_off, int dst_reg);
#endif

View File

@ -39,10 +39,13 @@ int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg,
return 0;
}
struct powerpc64_jit_data {
struct bpf_binary_header *header;
struct powerpc_jit_data {
/* address of rw header */
struct bpf_binary_header *hdr;
/* address of ro final header */
struct bpf_binary_header *fhdr;
u32 *addrs;
u8 *image;
u8 *fimage;
u32 proglen;
struct codegen_context ctx;
};
@ -59,15 +62,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
u8 *image = NULL;
u32 *code_base;
u32 *addrs;
struct powerpc64_jit_data *jit_data;
struct powerpc_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
int flen;
struct bpf_binary_header *bpf_hdr;
struct bpf_binary_header *fhdr = NULL;
struct bpf_binary_header *hdr = NULL;
struct bpf_prog *org_fp = fp;
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
bool extra_pass = false;
u8 *fimage = NULL;
u32 *fcode_base;
u32 extable_len;
u32 fixup_len;
@ -97,9 +103,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
addrs = jit_data->addrs;
if (addrs) {
cgctx = jit_data->ctx;
image = jit_data->image;
bpf_hdr = jit_data->header;
/*
* JIT compiled to a writable location (image/code_base) first.
* It is then moved to the readonly final location (fimage/fcode_base)
* using instruction patching.
*/
fimage = jit_data->fimage;
fhdr = jit_data->fhdr;
proglen = jit_data->proglen;
hdr = jit_data->hdr;
image = (void *)hdr + ((void *)fimage - (void *)fhdr);
extra_pass = true;
/* During extra pass, ensure index is reset before repopulating extable entries */
cgctx.exentry_idx = 0;
@ -119,7 +132,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
/* We hit something illegal or unsupported. */
fp = org_fp;
goto out_addrs;
@ -134,7 +147,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
*/
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
fp = org_fp;
goto out_addrs;
}
@ -156,17 +169,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
proglen = cgctx.idx * 4;
alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
if (!bpf_hdr) {
fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
bpf_jit_fill_ill_insns);
if (!fhdr) {
fp = org_fp;
goto out_addrs;
}
if (extable_len)
fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
@ -174,8 +189,10 @@ skip_init_ctx:
cgctx.idx = 0;
cgctx.alt_exit_addr = 0;
bpf_jit_build_prologue(code_base, &cgctx);
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
bpf_jit_binary_free(bpf_hdr);
if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
extra_pass)) {
bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
bpf_jit_binary_pack_free(fhdr, hdr);
fp = org_fp;
goto out_addrs;
}
@ -195,17 +212,19 @@ skip_init_ctx:
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[0] = (u64)fcode_base;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
fp->bpf_func = (void *)fimage;
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(bpf_hdr);
if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
fp = org_fp;
goto out_addrs;
}
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
@ -215,8 +234,9 @@ out_addrs:
jit_data->addrs = addrs;
jit_data->ctx = cgctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = bpf_hdr;
jit_data->fimage = fimage;
jit_data->fhdr = fhdr;
jit_data->hdr = hdr;
}
out:
@ -230,12 +250,13 @@ out:
* The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
* this function, as this only applies to BPF_PROBE_MEM, for now.
*/
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
int insn_idx, int jmp_off, int dst_reg)
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
struct codegen_context *ctx, int insn_idx, int jmp_off,
int dst_reg)
{
off_t offset;
unsigned long pc;
struct exception_table_entry *ex;
struct exception_table_entry *ex, *ex_entry;
u32 *fixup;
/* Populate extable entries only in the last pass */
@ -246,9 +267,16 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
return -EINVAL;
/*
* Program is first written to image before copying to the
* final location (fimage). Accordingly, update in the image first.
* As all offsets used are relative, copying as is to the
* final location should be alright.
*/
pc = (unsigned long)&image[insn_idx];
ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
fixup = (void *)fp->aux->extable -
fixup = (void *)ex -
(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
@ -259,18 +287,42 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
fixup[BPF_FIXUP_LEN - 1] =
PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
ex = &fp->aux->extable[ctx->exentry_idx];
ex_entry = &ex[ctx->exentry_idx];
offset = pc - (long)&ex->insn;
offset = pc - (long)&ex_entry->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->insn = offset;
ex_entry->insn = offset;
offset = (long)fixup - (long)&ex->fixup;
offset = (long)fixup - (long)&ex_entry->fixup;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->fixup = offset;
ex_entry->fixup = offset;
ctx->exentry_idx++;
return 0;
}
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited) {
struct powerpc_jit_data *jit_data = fp->aux->jit_data;
struct bpf_binary_header *hdr;
/*
* If we fail the final pass of JIT (from jit_subprogs),
* the program may not be finalized yet. Call finalize here
* before freeing it.
*/
if (jit_data) {
bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
kvfree(jit_data->addrs);
kfree(jit_data);
}
hdr = bpf_jit_binary_pack_hdr(fp);
bpf_jit_binary_pack_free(hdr, NULL);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
}
bpf_prog_unlock_free(fp);
}

View File

@ -200,12 +200,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
/* Relative offset needs to be calculated based on final image location */
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
s32 rel = (s32)func - (s32)(image + ctx->idx);
s32 rel = (s32)func - (s32)(fimage + ctx->idx);
if (image && rel < 0x2000000 && rel >= -0x2000000) {
PPC_BL(func);
EMIT(PPC_RAW_BL(rel));
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
@ -278,7 +279,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
}
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
const struct bpf_insn *insn = fp->insnsi;
@ -1009,7 +1010,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
jmp_off += 4;
}
ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
jmp_off, dst_reg);
if (ret)
return ret;
@ -1065,7 +1066,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
}
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;

View File

@ -240,7 +240,7 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
return 0;
}
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
@ -361,7 +361,7 @@ asm (
);
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
@ -952,8 +952,8 @@ emit_clear:
addrs[++i] = ctx->idx * 4;
if (BPF_MODE(code) == BPF_PROBE_MEM) {
ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
4, dst_reg);
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
ctx->idx - 1, 4, dst_reg);
if (ret)
return ret;
}
@ -1007,7 +1007,7 @@ emit_clear:
if (func_addr_fixed)
ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;

View File

@ -145,7 +145,7 @@
/* parts of opcode for RVF, RVD and RVQ */
#define RVFDQ_FL_FS_WIDTH_OFF 12
#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(2, 0)
#define RVFDQ_FL_FS_WIDTH_W 2
#define RVFDQ_FL_FS_WIDTH_D 3
#define RVFDQ_LS_FS_WIDTH_Q 4

View File

@ -156,7 +156,7 @@ unsigned long __get_wchan(struct task_struct *task)
return pc;
}
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);

View File

@ -55,7 +55,7 @@ static __always_inline void arch_exit_to_user_mode(void)
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
unsigned long ti_work)
{
choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
choose_random_kstack_offset(get_tod_clock_fast());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare

View File

@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
416 32 io_pgetevents_time64 - sys_io_pgetevents
416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive

View File

@ -1973,7 +1973,11 @@ skip_init_ctx:
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
if (bpf_jit_binary_lock_ro(header)) {
bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
}
} else {
jit_data->header = header;
jit_data->ctx = jit;

View File

@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
union zpci_sic_iib iib = {{0}};
union zpci_sic_iib ziib = {{0}};
iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);

View File

@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
/*
* swap the arguments the way that libc wants them instead of
* moving flags ahead of the 64-bit nbytes argument
*/
SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
SC_ARG64(nbytes), unsigned int, flags)
{
return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
SC_VAL64(loff_t, nbytes), flags);
}

View File

@ -321,7 +321,7 @@
311 common set_robust_list sys_set_robust_list
312 common get_robust_list sys_get_robust_list
313 common splice sys_splice
314 common sync_file_range sys_sync_file_range
314 common sync_file_range sys_sh_sync_file_range6
315 common tee sys_tee
316 common vmsplice sys_vmsplice
317 common move_pages sys_move_pages
@ -395,6 +395,7 @@
385 common pkey_alloc sys_pkey_alloc
386 common pkey_free sys_pkey_free
387 common rseq sys_rseq
388 common sync_file_range2 sys_sync_file_range2
# room for arch specific syscalls
393 common semget sys_semget
394 common semctl sys_semctl

View File

@ -18,224 +18,3 @@ sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
.align 32
.globl sys32_socketcall
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
sethi %hi(__socketcall_table_begin), %g2
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
do_einval:
retl
mov -EINVAL, %o0
.align 32
__socketcall_table_begin:
/* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
22: ldswa [%o1 + 0x8] %asi, %o2
23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
26: lduwa [%o1 + 0x8] %asi, %o2
27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
30: lduwa [%o1 + 0x8] %asi, %o2
31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
34: lduwa [%o1 + 0x8] %asi, %o2
35: lduwa [%o1 + 0xc] %asi, %o3
36: lduwa [%o1 + 0x10] %asi, %o4
37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
40: lduwa [%o1 + 0x8] %asi, %o2
41: lduwa [%o1 + 0xc] %asi, %o3
42: lduwa [%o1 + 0x10] %asi, %o4
43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_setsockopt), %g1
48: ldswa [%o1 + 0x8] %asi, %o2
49: lduwa [%o1 + 0xc] %asi, %o3
50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(sys_setsockopt), %g0
51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockopt), %g1
53: ldswa [%o1 + 0x8] %asi, %o2
54: lduwa [%o1 + 0xc] %asi, %o3
55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(sys_getsockopt), %g0
56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a"
.align 4
.word 1b, __retl_efault, 2b, __retl_efault
.word 3b, __retl_efault, 4b, __retl_efault
.word 5b, __retl_efault, 6b, __retl_efault
.word 7b, __retl_efault, 8b, __retl_efault
.word 9b, __retl_efault, 10b, __retl_efault
.word 11b, __retl_efault, 12b, __retl_efault
.word 13b, __retl_efault, 14b, __retl_efault
.word 15b, __retl_efault, 16b, __retl_efault
.word 17b, __retl_efault, 18b, __retl_efault
.word 19b, __retl_efault, 20b, __retl_efault
.word 21b, __retl_efault, 22b, __retl_efault
.word 23b, __retl_efault, 24b, __retl_efault
.word 25b, __retl_efault, 26b, __retl_efault
.word 27b, __retl_efault, 28b, __retl_efault
.word 29b, __retl_efault, 30b, __retl_efault
.word 31b, __retl_efault, 32b, __retl_efault
.word 33b, __retl_efault, 34b, __retl_efault
.word 35b, __retl_efault, 36b, __retl_efault
.word 37b, __retl_efault, 38b, __retl_efault
.word 39b, __retl_efault, 40b, __retl_efault
.word 41b, __retl_efault, 42b, __retl_efault
.word 43b, __retl_efault, 44b, __retl_efault
.word 45b, __retl_efault, 46b, __retl_efault
.word 47b, __retl_efault, 48b, __retl_efault
.word 49b, __retl_efault, 50b, __retl_efault
.word 51b, __retl_efault, 52b, __retl_efault
.word 53b, __retl_efault, 54b, __retl_efault
.word 55b, __retl_efault, 56b, __retl_efault
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous

View File

@ -117,7 +117,7 @@
90 common dup2 sys_dup2
91 32 setfsuid32 sys_setfsuid
92 common fcntl sys_fcntl compat_sys_fcntl
93 common select sys_select
93 common select sys_select compat_sys_select
94 32 setfsgid32 sys_setfsgid
95 common fsync sys_fsync
96 common setpriority sys_setpriority
@ -155,7 +155,7 @@
123 32 fchown sys_fchown16
123 64 fchown sys_fchown
124 common fchmod sys_fchmod
125 common recvfrom sys_recvfrom
125 common recvfrom sys_recvfrom compat_sys_recvfrom
126 32 setreuid sys_setreuid16
126 64 setreuid sys_setreuid
127 32 setregid sys_setregid16
@ -247,7 +247,7 @@
204 32 readdir sys_old_readdir compat_sys_old_readdir
204 64 readdir sys_nis_syscall
205 common readahead sys_readahead compat_sys_readahead
206 common socketcall sys_socketcall sys32_socketcall
206 common socketcall sys_socketcall compat_sys_socketcall
207 common syslog sys_syslog
208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
@ -461,7 +461,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive

View File

@ -1602,7 +1602,11 @@ skip_init_ctx:
bpf_flush_icache(header, (u8 *)header + header->size);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
if (bpf_jit_binary_lock_ro(header)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;
}
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;

View File

@ -420,7 +420,7 @@
412 i386 utimensat_time64 sys_utimensat
413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 i386 io_pgetevents_time64 sys_io_pgetevents
416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 i386 mq_timedsend_time64 sys_mq_timedsend
419 i386 mq_timedreceive_time64 sys_mq_timedreceive

View File

@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
#endif
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for x86 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 8 bits.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints (see cc_stack_align4/8 in
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
* bits. The actual entropy will be further reduced by the compiler
* when applying stack alignment constraints (see cc_stack_align4/8 in
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
* low bits from any entropy chosen here.
*
* Therefore, final stack offset entropy will be 5 (x86_64) or
* 6 (ia32) bits.
* Therefore, final stack offset entropy will be 7 (x86_64) or
* 8 (ia32) bits.
*/
choose_random_kstack_offset(rdtsc() & 0xFF);
choose_random_kstack_offset(rdtsc());
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare

View File

@ -145,8 +145,8 @@ void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
asm volatile(
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
: : [addr] "m" (fpstate));
"fildl %[addr]" /* set F?P to defined value */
: : [addr] "m" (*fpstate));
}
if (use_xsave()) {

View File

@ -27,25 +27,7 @@
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (!user_mode(regs) && in_lock_functions(pc)) {
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
unsigned long *sp = (unsigned long *)regs->sp;
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,
* kernel addresses don't.
*/
if (sp[0] >> 22)
return sp[0];
if (sp[1] >> 22)
return sp[1];
#endif
}
return pc;
return instruction_pointer(regs);
}
EXPORT_SYMBOL(profile_pc);

View File

@ -2600,8 +2600,7 @@ out_image:
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
if (image) {
bpf_jit_binary_lock_ro(header);
if (image && !bpf_jit_binary_lock_ro(header)) {
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited_len = proglen;

View File

@ -33,6 +33,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
memset(ctx->private_key, 0, sizeof(ctx->private_key));
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);

View File

@ -1890,8 +1890,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
if (!host) {
rc = -ENOMEM;
goto err_rm_sysfs_file;
}
host->private_data = hpriv;
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
@ -1944,11 +1946,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
return rc;
goto err_rm_sysfs_file;
rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
goto err_rm_sysfs_file;
ahci_pci_init_controller(host);
ahci_pci_print_info(host);
@ -1957,10 +1959,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ahci_host_activate(host, &ahci_sht);
if (rc)
return rc;
goto err_rm_sysfs_file;
pm_runtime_put_noidle(&pdev->dev);
return 0;
err_rm_sysfs_file:
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_remapped_nvme.attr, NULL);
return rc;
}
static void ahci_shutdown_one(struct pci_dev *pdev)

View File

@ -5499,6 +5499,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
return ap;
}
void ata_port_free(struct ata_port *ap)
{
if (!ap)
return;
kfree(ap->pmp_link);
kfree(ap->slave_link);
kfree(ap->ncq_sense_buf);
kfree(ap);
}
EXPORT_SYMBOL_GPL(ata_port_free);
static void ata_devres_release(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
@ -5525,12 +5537,7 @@ static void ata_host_release(struct kref *kref)
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
kfree(ap->pmp_link);
kfree(ap->slave_link);
kfree(ap->ncq_sense_buf);
kfree(ap);
ata_port_free(host->ports[i]);
host->ports[i] = NULL;
}
kfree(host);
@ -5580,8 +5587,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
if (!host)
return NULL;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
goto err_free;
if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
kfree(host);
return NULL;
}
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
@ -5613,8 +5622,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
err_out:
devres_release_group(dev, NULL);
err_free:
kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
@ -5913,7 +5920,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
* allocation time.
*/
for (i = host->n_ports; host->ports[i]; i++)
kfree(host->ports[i]);
ata_port_free(host->ports[i]);
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {

View File

@ -6,6 +6,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
struct clk *clk;
int err;
counter = devm_counter_alloc(dev, sizeof(*priv));
@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);

View File

@ -675,7 +675,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
if (state)
policy->cpuinfo.max_freq = cpudata->max_freq;
else
policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
policy->cpuinfo.max_freq = cpudata->nominal_freq;
policy->max = policy->cpuinfo.max_freq;

View File

@ -356,15 +356,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret)
return;
/*
* On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
* In this case we can't use CPPC.highest_perf to enable ITMT.
* In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
* If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
*
* Also, on some systems with overclocking enabled, CPPC.highest_perf is
* hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
* Fall back to MSR_HWP_CAPABILITIES then too.
*/
if (cppc_perf.highest_perf == CPPC_MAX_PERF)
if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*

View File

@ -27,7 +27,14 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
int cxl_region_init(void);
void cxl_region_exit(void);
int cxl_get_poison_by_endpoint(struct cxl_port *port);
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
#else
static inline
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
{
return NULL;
}
static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;

View File

@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
struct cxl_dport *dport = NULL;
int single_port_map[1];
unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
* Capability checks are moot for passthrough decoders, support
* any and all possibilities.
*/
cxlhdm->interleave_mask = ~0U;
cxlhdm->iw_cap_mask = ~0UL;
cxlsd = cxl_switch_decoder_alloc(port, 1);
if (IS_ERR(cxlsd))
@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(11, 8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
cxlhdm->interleave_mask |= GENMASK(14, 12);
cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
cxlhdm->iw_cap_mask |= BIT(16);
}
static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,

View File

@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
struct cxl_dpa_to_region_context {
struct cxl_region *cxlr;
u64 dpa;
};
static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxled->cxld.region->dev));
ctx->cxlr = cxled->cxld.region;
return 1;
}
static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dpa_to_region_context ctx;
struct cxl_port *port;
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
port = cxlmd->endpoint;
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
}
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;

View File

@ -997,6 +997,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
cxld = cxl_rr->decoder;
/*
* the number of targets should not exceed the target_count
* of the decoder
*/
if (is_switch_decoder(&cxld->dev)) {
struct cxl_switch_decoder *cxlsd;
cxlsd = to_cxl_switch_decoder(&cxld->dev);
if (cxl_rr->nr_targets > cxlsd->nr_targets) {
dev_dbg(&cxlr->dev,
"%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
dev_name(port->uport_dev), dev_name(&port->dev),
dev_name(&cxld->dev), dev_name(&cxlmd->dev),
dev_name(&cxled->cxld.dev), pos,
cxlsd->nr_targets);
rc = -ENXIO;
goto out_erase;
}
}
rc = cxl_rr_ep_add(cxl_rr, cxled);
if (rc) {
dev_dbg(&cxlr->dev,
@ -1106,6 +1126,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
return 0;
}
static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
unsigned int interleave_mask;
u8 eiw;
u16 eig;
int high_pos, low_pos;
if (!test_bit(iw, &cxlhdm->iw_cap_mask))
return -ENXIO;
/*
* Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
* if eiw < 8:
* DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
* DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
*
* when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
* interleave bits are none.
*
* if eiw >= 8:
* DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
* DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0]
*
* when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
* interleave bits are none.
*/
ways_to_eiw(iw, &eiw);
if (eiw == 0 || eiw == 8)
return 0;
granularity_to_eig(ig, &eig);
if (eiw > 8)
high_pos = eiw + eig - 1;
else
high_pos = eiw + eig + 7;
low_pos = eig + 8;
interleave_mask = GENMASK(high_pos, low_pos);
if (interleave_mask & ~cxlhdm->interleave_mask)
return -ENXIO;
return 0;
}
static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
@ -1256,6 +1320,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
return -ENXIO;
}
} else {
rc = check_interleave_cap(cxld, iw, ig);
if (rc) {
dev_dbg(&cxlr->dev,
"%s:%s iw: %d ig: %d is not supported\n",
dev_name(port->uport_dev),
dev_name(&port->dev), iw, ig);
return rc;
}
cxld->interleave_ways = iw;
cxld->interleave_granularity = ig;
cxld->hpa_range = (struct range) {
@ -1692,6 +1765,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_dport *dport;
int rc = -ENXIO;
rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
p->interleave_granularity);
if (rc) {
dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
dev_name(&cxled->cxld.dev), p->interleave_ways,
p->interleave_granularity);
return rc;
}
if (cxled->mode != cxlr->mode) {
dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
@ -2509,6 +2591,61 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
return rc;
}
struct cxl_dpa_to_region_context {
struct cxl_region *cxlr;
u64 dpa;
};
static int __cxl_dpa_to_region(struct device *dev, void *arg)
{
struct cxl_dpa_to_region_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
struct cxl_region *cxlr;
u64 dpa = ctx->dpa;
if (!is_endpoint_decoder(dev))
return 0;
cxled = to_cxl_endpoint_decoder(dev);
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
return 0;
/*
* Stop the region search (return 1) when an endpoint mapping is
* found. The region may not be fully constructed so offering
* the cxlr in the context structure is not guaranteed.
*/
cxlr = cxled->cxld.region;
if (cxlr)
dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
dev_name(&cxlr->dev));
else
dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
dev_name(dev));
ctx->cxlr = cxlr;
return 1;
}
struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_dpa_to_region_context ctx;
struct cxl_port *port;
ctx = (struct cxl_dpa_to_region_context) {
.dpa = dpa,
};
port = cxlmd->endpoint;
if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
return ctx.cxlr;
}
static struct lock_class_key cxl_pmem_region_key;
static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)

View File

@ -43,6 +43,8 @@
#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
#define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
#define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
#define CXL_HDM_DECODER_CTRL_OFFSET 0x4
#define CXL_HDM_DECODER_ENABLE BIT(1)
#define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)

View File

@ -888,11 +888,21 @@ static inline void cxl_mem_active_dec(void)
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
/**
* struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
* @regs: mapped registers, see devm_cxl_setup_hdm()
* @decoder_count: number of decoders for this port
* @target_count: for switch decoders, max downstream port targets
* @interleave_mask: interleave granularity capability, see check_interleave_cap()
* @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
* @port: mapped cxl_port, see devm_cxl_setup_hdm()
*/
struct cxl_hdm {
struct cxl_component_regs regs;
unsigned int decoder_count;
unsigned int target_count;
unsigned int interleave_mask;
unsigned long iw_cap_mask;
struct cxl_port *port;
};

View File

@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
else
nirq = DIV_ROUND_UP(ngpio, 16);
if (nirq > MAX_INT_PER_BANK) {
dev_err(dev, "Too many IRQs!\n");
return -EINVAL;
}
chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;

View File

@ -132,6 +132,10 @@ struct linehandle_state {
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
(GPIOHANDLE_REQUEST_INPUT | \
GPIOHANDLE_REQUEST_OUTPUT)
static int linehandle_validate_flags(u32 flags)
{
/* Return an error if an unknown flag is set */
@ -212,21 +216,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (ret)
return ret;
/* Lines must be reconfigured explicitly as input or output. */
if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
return -EINVAL;
for (i = 0; i < lh->num_descs; i++) {
desc = lh->descs[i];
linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
linehandle_flags_to_desc_flags(lflags, &desc->flags);
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
*/
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
ret = gpiod_direction_output(desc, val);
if (ret)
return ret;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
} else {
ret = gpiod_direction_input(desc);
if (ret)
return ret;

View File

@ -399,7 +399,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
*vram_width = mem_channel_number * 16;
break;
default:
return -EINVAL;

View File

@ -4685,11 +4685,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU mode1 reset\n");
/* Cache the state before bus master disable. The saved config space
* values are used in other cases like restore after mode-2 reset.
*/
amdgpu_device_cache_pci_state(adev->pdev);
/* disable BM */
pci_clear_master(adev->pdev);
amdgpu_device_cache_pci_state(adev->pdev);
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);

View File

@ -2,6 +2,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@ -313,7 +314,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
return 0;
}
afb = to_amdgpu_framebuffer(new_state->fb);
obj = new_state->fb->obj[0];
obj = drm_gem_fb_get_obj(new_state->fb, 0);
if (!obj) {
DRM_ERROR("Failed to get obj from framebuffer\n");
return -EINVAL;
}
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
@ -367,12 +374,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
struct drm_gem_object *obj;
int r;
if (!old_state->fb)
return;
rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
obj = drm_gem_fb_get_obj(old_state->fb, 0);
if (!obj) {
DRM_ERROR("Failed to get obj from framebuffer\n");
return;
}
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");

View File

@ -1584,9 +1584,17 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
if (dp_is_lttpr_present(link))
if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link);
// Echo TOTAL_LTTPR_CNT back downstream
core_link_write_dpcd(
link,
DP_TOTAL_LTTPR_CNT,
&link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
}
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);

View File

@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
/* Remove once drm_dp_helper.h is updated upstream */
#ifndef DP_TOTAL_LTTPR_CNT
#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */
#endif
#endif /* __DAL_DPCD_DEFS_H__ */

View File

@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (!info)
return ERR_PTR(-ENOMEM);
if (!drm_leak_fbdev_smem)
info->flags |= FBINFO_HIDE_SMEM_START;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info = fb_helper->info;
info->var.pixclock = 0;
if (!drm_leak_fbdev_smem)
info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */

View File

@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
}
info->fix.smem_len = info->screen_size;
return 0;

View File

@ -529,14 +529,12 @@ void drm_file_update_pid(struct drm_file *filp)
dev = filp->minor->dev;
mutex_lock(&dev->filelist_mutex);
get_pid(pid);
old = rcu_replace_pointer(filp->pid, pid, 1);
mutex_unlock(&dev->filelist_mutex);
if (pid != old) {
get_pid(pid);
synchronize_rcu();
put_pid(old);
}
synchronize_rcu();
put_pid(old);
}
/**

View File

@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
return;
GEM_BUG_ON(fence->vma != vma);
i915_active_wait(&fence->active);
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));

View File

@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
if (!mode)
continue;
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
if (!mode)
continue;
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
if (!mode)
continue;
}
/* CVT modes are sometimes unsuitable... */

View File

@ -883,10 +883,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
msleep(5);
/* And reset it */
gpiod_set_value(ctx->reset, 1);
gpiod_set_value_cansleep(ctx->reset, 1);
msleep(20);
gpiod_set_value(ctx->reset, 0);
gpiod_set_value_cansleep(ctx->reset, 0);
msleep(20);
for (i = 0; i < ctx->desc->init_length; i++) {
@ -941,7 +941,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
regulator_disable(ctx->power);
gpiod_set_value(ctx->reset, 1);
gpiod_set_value_cansleep(ctx->reset, 1);
return 0;
}

View File

@ -2523,6 +2523,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx26d202vm0bwa = {

View File

@ -132,7 +132,6 @@ extern int radeon_cik_support;
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
/* internal ring indices */

View File

@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc;
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
if (radeon_crtc == NULL)
return;
@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
radeon_crtc->mode_set.num_connectors = 0;
#endif
if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
radeon_atombios_init_crtc(dev, radeon_crtc);
else

View File

@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
queue_delayed_work(system_long_wq, &tu->worker,
msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
}
fallthrough;
break;
case I2C_SLAVE_WRITE_REQUESTED:
if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
return -EBUSY;
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
break;

View File

@ -325,6 +325,8 @@ config DMARD10
config FXLS8962AF
tristate
depends on I2C || !I2C # cannot be built-in for modular I2C
select IIO_BUFFER
select IIO_KFIFO_BUF
config FXLS8962AF_I2C
tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"

View File

@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
ret = ad7266_read_single(st, val, chan->address);
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val,

View File

@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
/* Run calibration of PS & PL as part of the sequence */
scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
for (i = 0; i < indio_dev->num_channels; i++)
scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
for (i = 0; i < indio_dev->num_channels; i++) {
const struct iio_chan_spec *chan = &indio_dev->channels[i];
if (chan->scan_index < AMS_CTRL_SEQ_BASE)
scan_mask |= BIT_ULL(chan->scan_index);
}
if (ams->ps_base) {
/* put sysmon in a soft reset to change the sequence */

View File

@ -54,7 +54,9 @@
#define BME680_NB_CONV_MASK GENMASK(3, 0)
#define BME680_REG_MEAS_STAT_0 0x1D
#define BME680_NEW_DATA_BIT BIT(7)
#define BME680_GAS_MEAS_BIT BIT(6)
#define BME680_MEAS_BIT BIT(5)
/* Calibration Parameters */
#define BME680_T2_LSB_REG 0x8A

View File

@ -10,6 +10,7 @@
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/log2.h>
@ -38,7 +39,7 @@ struct bme680_calib {
s8 par_h3;
s8 par_h4;
s8 par_h5;
s8 par_h6;
u8 par_h6;
s8 par_h7;
s8 par_gh1;
s16 par_gh2;
@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
if (!calib->par_t2)
bme680_read_calib(data, calib);
var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
var2 = (var1 * calib->par_t2) >> 11;
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
var3 = (var3 * (calib->par_t3 << 4)) >> 14;
var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
data->t_fine = var2 + var3;
calc_temp = (data->t_fine * 5 + 128) >> 8;
@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var1 = (data->t_fine >> 1) - 64000;
var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
var2 = var2 + (var1 * calib->par_p5 << 1);
var2 = (var2 >> 2) + (calib->par_p4 << 16);
var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
(calib->par_p3 << 5)) >> 3) +
((s32)calib->par_p3 << 5)) >> 3) +
((calib->par_p2 * var1) >> 1);
var1 = var1 >> 18;
var1 = ((32768 + var1) * calib->par_p1) >> 15;
@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
var3 = ((press_comp >> 8) * (press_comp >> 8) *
(press_comp >> 8) * calib->par_p10) >> 17;
press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
return press_comp;
}
@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
(((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
>> 6) / 100) + (1 << 14))) >> 10;
var3 = var1 * var2;
var4 = calib->par_h6 << 7;
var4 = (s32)calib->par_h6 << 7;
var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
var6 = (var4 * var5) >> 1;
@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
return ilog2(val) + 1;
}
/*
* Taken from Bosch BME680 API:
* https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
*/
static int bme680_wait_for_eoc(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
unsigned int check;
int ret;
/*
* (Sum of oversampling ratios * time per oversampling) +
* TPH measurement + gas measurement + wait transition from forced mode
* + heater duration
*/
int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
data->oversampling_humid) * 1936) + (477 * 4) +
(477 * 5) + 1000 + (data->heater_dur * 1000);
usleep_range(wait_eoc_us, wait_eoc_us + 100);
ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
if (ret) {
dev_err(dev, "failed to read measurement status register.\n");
return ret;
}
if (check & BME680_MEAS_BIT) {
dev_err(dev, "Device measurement cycle incomplete.\n");
return -EBUSY;
}
if (!(check & BME680_NEW_DATA_BIT)) {
dev_err(dev, "No new data available from the device.\n");
return -ENODATA;
}
return 0;
}
static int bme680_chip_config(struct bme680_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
if (ret < 0)
return ret;
ret = bme680_wait_for_eoc(data);
if (ret)
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
&tmp, 3);
if (ret < 0) {
@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
}
*val = bme680_compensate_press(data, adc_press);
*val2 = 100;
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
}
@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
if (ret < 0)
return ret;
ret = bme680_wait_for_eoc(data);
if (ret)
return ret;
ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
if (check & BME680_GAS_MEAS_BIT) {
dev_err(dev, "gas measurement incomplete\n");

View File

@ -920,7 +920,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
}
/*
* Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
* Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of
* "5123" equals 51.2º C. t_fine carries fine temperature as global value.
*
* Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
@ -1385,12 +1385,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
* form down 2^16. We reescale by x1000 to return milli Celsius
* to respect IIO ABI.
* form down 2^16. We rescale by x1000 to return millidegrees
* Celsius to respect IIO ABI.
*/
*val = raw_temp * 1000;
*val2 = 16;
return IIO_VAL_FRACTIONAL_LOG2;
raw_temp = sign_extend32(raw_temp, 23);
*val = ((s64)raw_temp * 1000) / (1 << 16);
return IIO_VAL_INT;
}
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
@ -1412,7 +1412,7 @@ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
}
/*
* Pressure is returned in Pascals in fractional form down 2^16.
* We reescale /1000 to convert to kilopascal to respect IIO ABI.
* We rescale /1000 to convert to kilopascal to respect IIO ABI.
*/
*val = raw_press;
*val2 = 64000; /* 2^6 * 1000 */

View File

@ -410,7 +410,7 @@ struct bmp280_data {
__le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
__be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
/* Miscellaneous, endianess-aware data buffers */
/* Miscellaneous, endianness-aware data buffers */
__le16 le16;
__be16 be16;
} __aligned(IIO_DMA_MINALIGN);

View File

@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
return 0;
}
static const char *type2str(enum rdma_restrack_type type)
{
static const char * const names[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_PD] = "PD",
[RDMA_RESTRACK_CQ] = "CQ",
[RDMA_RESTRACK_QP] = "QP",
[RDMA_RESTRACK_CM_ID] = "CM_ID",
[RDMA_RESTRACK_MR] = "MR",
[RDMA_RESTRACK_CTX] = "CTX",
[RDMA_RESTRACK_COUNTER] = "COUNTER",
[RDMA_RESTRACK_SRQ] = "SRQ",
};
return names[type];
};
/**
* rdma_restrack_clean() - clean resource tracking
* @dev: IB device
@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
void rdma_restrack_clean(struct ib_device *dev)
{
struct rdma_restrack_root *rt = dev->res;
struct rdma_restrack_entry *e;
char buf[TASK_COMM_LEN];
bool found = false;
const char *owner;
int i;
for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
struct xarray *xa = &dev->res[i].xa;
if (!xa_empty(xa)) {
unsigned long index;
if (!found) {
pr_err("restrack: %s", CUT_HERE);
dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
}
xa_for_each(xa, index, e) {
if (rdma_is_kernel_res(e)) {
owner = e->kern_name;
} else {
/*
* There is no need to call get_task_struct here,
* because we can be here only if there are more
* get_task_struct() call than put_task_struct().
*/
get_task_comm(buf, e->task);
owner = buf;
}
pr_err("restrack: %s %s object allocated by %s is not freed\n",
rdma_is_kernel_res(e) ? "Kernel" :
"User",
type2str(e->type), owner);
}
found = true;
}
WARN_ON(!xa_empty(xa));
xa_destroy(xa);
}
if (found)
pr_err("restrack: %s", CUT_HERE);
kfree(rt);
}

View File

@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
if (!error && data[0] == 2) {
error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
ILI251X_DATA_SIZE2);
if (error >= 0 && error != ILI251X_DATA_SIZE2)
error = -EIO;
if (error >= 0)
error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
}
return error;

View File

@ -565,7 +565,7 @@ config IRQ_LOONGARCH_CPU
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select LOONGSON_HTVEC
select LOONGSON_LIOINTC
select LOONGSON_EIOINTC

View File

@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <asm/numa.h>
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
@ -375,7 +376,7 @@ int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
int node;
if (cpu_has_flatmode)
node = eiointc_priv[nr_pics - 1]->node / NODES_PER_FLATMODE_NODE;
node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
else
node = eiointc_priv[nr_pics - 1]->node;
@ -478,7 +479,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_handle;
if (cpu_has_flatmode)
node = acpi_eiointc->node / NODES_PER_FLATMODE_NODE;
node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
else
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);

View File

@ -28,7 +28,7 @@
#define LIOINTC_INTC_CHIP_START 0x20
#define LIOINTC_REG_INTC_STATUS(cpuid) (LIOINTC_INTC_CHIP_START + 0x20 + (cpuid) * 8)
#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)

View File

@ -956,7 +956,7 @@ int dvb_usercopy(struct file *file,
int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
char sbuf[128];
char sbuf[128] = {};
void *mbuf = NULL;
void *parg = NULL;
int err = -EINVAL;

View File

@ -23,6 +23,7 @@
#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4)
#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
@ -325,6 +326,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
host->mmc_host_ops.card_busy = NULL;
/* Change the base clock frequency if the DT property exists */
if (device_property_read_u32(&pdev->dev, "clock-frequency",
&priv->base_freq_hz) != 0)

View File

@ -1325,7 +1325,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
return ret;
goto fail;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
@ -1336,7 +1336,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
return pci_write_config_byte(chip->pdev, 0xAE, scratch);
ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
fail:
return pcibios_err_to_errno(ret);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@ -2201,7 +2204,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
return ret;
return pcibios_err_to_errno(ret);
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
@ -2210,7 +2213,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
return ret;
return pcibios_err_to_errno(ret);
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;

View File

@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
return ret;
goto read_fail;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
return ret;
goto read_fail;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
return ret;
goto read_fail;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
goto read_fail;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
read_fail:
return pcibios_err_to_errno(ret);
}
#ifdef CONFIG_PM_SLEEP

View File

@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
bool allow_invert = false;
int is_readonly;
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
if (host->flags & SDHCI_DEVICE_DEAD) {
is_readonly = 0;
else if (host->ops->get_ro)
} else if (host->ops->get_ro) {
is_readonly = host->ops->get_ro(host);
else if (mmc_can_gpio_ro(host->mmc))
} else if (mmc_can_gpio_ro(host->mmc)) {
is_readonly = mmc_gpio_get_ro(host->mmc);
else
/* Do not invert twice */
allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
} else {
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
allow_invert = true;
}
spin_unlock_irqrestore(&host->lock, flags);
if (is_readonly >= 0 &&
allow_invert &&
(host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
is_readonly = !is_readonly;
/* This quirk needs to be replaced by a callback-function later */
return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
!is_readonly : is_readonly;
return is_readonly;
}
#define SAMPLE_COUNT 5

View File

@ -102,7 +102,7 @@ nogood:
offset -= master->erasesize;
}
} else {
offset = directory * master->erasesize;
offset = (unsigned long) directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)

View File

@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev)
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
WQ_FREEZABLE | WQ_MEM_RECLAIM,
dev_name(&spi->dev));
if (!priv->wq) {
err = -ENOMEM;
goto out_can_rx_offload_disable;
}
INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
goto out_can_rx_offload_disable;
goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev)
out_free_irq:
free_irq(spi->irq, priv);
out_destroy_workqueue:
destroy_workqueue(priv->wq);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);

View File

@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_ring *tx_ring,
int err)
{
struct net_device *ndev = priv->ndev;
struct net_device_stats *stats = &ndev->stats;
unsigned int frame_len = 0;
u8 tx_head;
tx_ring->head--;
stats->tx_dropped++;
tx_head = mcp251xfd_get_tx_head(tx_ring);
can_free_echo_skb(ndev, tx_head, &frame_len);
netdev_completed_queue(ndev, 1, frame_len);
netif_wake_queue(ndev);
if (net_ratelimit())
netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
}
void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
{
struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
tx_work);
struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
int err;
err = spi_sync(priv->spi, &tx_obj->msg);
if (err)
mcp251xfd_tx_failure_drop(priv, tx_ring, err);
}
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
static bool mcp251xfd_work_busy(struct work_struct *work)
{
return work_busy(work);
}
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (mcp251xfd_tx_busy(priv, tx_ring))
if (mcp251xfd_tx_busy(priv, tx_ring) ||
mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
goto out_err;
return NETDEV_TX_OK;
out_err:
netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
if (err == -EBUSY) {
netif_stop_queue(ndev);
priv->tx_work_obj = tx_obj;
queue_work(priv->wq, &priv->tx_work);
} else if (err) {
mcp251xfd_tx_failure_drop(priv, tx_ring, err);
}
return NETDEV_TX_OK;
}

View File

@ -633,6 +633,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
struct workqueue_struct *wq;
struct work_struct tx_work;
struct mcp251xfd_tx_obj *tx_work_obj;
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);

View File

@ -174,10 +174,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
ksz_write8(dev, REG_SW_LUE_CTRL_1,
SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@ -1114,6 +1112,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
/* Use collision based back pressure mode. */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
SW_BACK_PRESSURE_COLLISION);
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);

View File

@ -267,6 +267,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)

View File

@ -1973,7 +1973,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");

View File

@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct cpumask xps_mask;
struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
struct dpaa2_eth_fq *fq;
cpumask_var_t xps_mask;
int err = 0;
if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
return -ENOMEM;
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
cpumask_clear(&xps_mask);
cpumask_set_cpu(fq->target_cpu, &xps_mask);
cpumask_clear(xps_mask);
cpumask_set_cpu(fq->target_cpu, xps_mask);
err = netif_set_xps_queue(net_dev, &xps_mask, i);
err = netif_set_xps_queue(net_dev, xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
free_cpumask_var(xps_mask);
return err;
}

View File

@ -4057,6 +4057,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
/* Clean any remaining outstanding SKBs
* we freed the irq so we won't be hearing
* from them
*/
clean_tx_pools(adapter);
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])

Some files were not shown because too many files have changed in this diff Show More