Merge remote-tracking branch 'tip/perf/urgent' into perf/core
To pick fixes that are affecting tests of new 'perf diff' features in perf/core. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
e06094ab67
|
@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
|
|||
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
|
||||
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath9k-devel@lists.ath9k.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath9k
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/ath9k/
|
||||
|
|
|
@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
|
|||
cbz w6, .Lcbcencloop
|
||||
|
||||
ld1 {v0.16b}, [x5] /* get iv */
|
||||
enc_prepare w3, x2, x5
|
||||
enc_prepare w3, x2, x6
|
||||
|
||||
.Lcbcencloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||
encrypt_block v0, w3, x2, x5, w6
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
st1 {v0.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
bne .Lcbcencloop
|
||||
st1 {v0.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_encrypt)
|
||||
|
||||
|
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||
cbz w6, .LcbcdecloopNx
|
||||
|
||||
ld1 {v7.16b}, [x5] /* get iv */
|
||||
dec_prepare w3, x2, x5
|
||||
dec_prepare w3, x2, x6
|
||||
|
||||
.LcbcdecloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
|
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||
.Lcbcdecloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||
decrypt_block v0, w3, x2, x5, w6
|
||||
decrypt_block v0, w3, x2, x6, w7
|
||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
||||
mov v7.16b, v1.16b /* ct is next iv */
|
||||
st1 {v0.16b}, [x0], #16
|
||||
|
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||
bne .Lcbcdecloop
|
||||
.Lcbcdecout:
|
||||
FRAME_POP
|
||||
st1 {v7.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
|
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
|
|||
|
||||
AES_ENTRY(aes_ctr_encrypt)
|
||||
FRAME_PUSH
|
||||
cbnz w6, .Lctrfirst /* 1st time around? */
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrinc
|
||||
add x5, x5, #1 /* increment BE ctr */
|
||||
b .LctrincNx
|
||||
#else
|
||||
b .Lctrinc
|
||||
#endif
|
||||
.Lctrfirst:
|
||||
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||
enc_prepare w3, x2, x6
|
||||
ld1 {v4.16b}, [x5]
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
|
||||
.Lctrnotfirst:
|
||||
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x8, x8
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
cmn w8, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrloop
|
||||
.LctrloopNx:
|
||||
subs w4, w4, #INTERLEAVE
|
||||
|
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||
#if INTERLEAVE == 2
|
||||
mov v0.8b, v4.8b
|
||||
mov v1.8b, v4.8b
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v0.d[1], x7
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v1.d[1], x7
|
||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||
do_encrypt_block2x
|
||||
|
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||
dup v7.4s, w5
|
||||
dup v7.4s, w8
|
||||
mov v0.16b, v4.16b
|
||||
add v7.4s, v7.4s, v8.4s
|
||||
mov v1.16b, v4.16b
|
||||
|
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||
eor v2.16b, v7.16b, v2.16b
|
||||
eor v3.16b, v5.16b, v3.16b
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
add x5, x5, #INTERLEAVE
|
||||
add x8, x8, #INTERLEAVE
|
||||
#endif
|
||||
cbz w4, .LctroutNx
|
||||
.LctrincNx:
|
||||
rev x7, x5
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
cbz w4, .Lctrout
|
||||
b .LctrloopNx
|
||||
.LctroutNx:
|
||||
sub x5, x5, #1
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
b .Lctrout
|
||||
.Lctr1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
beq .Lctrout
|
||||
|
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||
.Lctrloop:
|
||||
mov v0.16b, v4.16b
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
|
||||
adds x8, x8, #1 /* increment BE ctr */
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
bcs .Lctrcarry /* overflow? */
|
||||
|
||||
.Lctrcarrydone:
|
||||
subs w4, w4, #1
|
||||
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
||||
ld1 {v3.16b}, [x1], #16
|
||||
eor v3.16b, v0.16b, v3.16b
|
||||
st1 {v3.16b}, [x0], #16
|
||||
beq .Lctrout
|
||||
.Lctrinc:
|
||||
adds x5, x5, #1 /* increment BE ctr */
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
bcc .Lctrloop /* no overflow? */
|
||||
bne .Lctrloop
|
||||
|
||||
.Lctrout:
|
||||
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrcarry:
|
||||
umov x7, v4.d[0] /* load upper word of ctr */
|
||||
rev x7, x7 /* ... to handle the carry */
|
||||
add x7, x7, #1
|
||||
rev x7, x7
|
||||
ins v4.d[0], x7
|
||||
b .Lctrloop
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
.Lctrout:
|
||||
FRAME_POP
|
||||
ret
|
||||
b .Lctrcarrydone
|
||||
AES_ENDPROC(aes_ctr_encrypt)
|
||||
.ltorg
|
||||
|
||||
|
|
|
@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
|
|||
static inline void tsb_context_switch(struct mm_struct *mm)
|
||||
{
|
||||
__tsb_context_switch(__pa(mm->pgd),
|
||||
&mm->context.tsb_block[0],
|
||||
&mm->context.tsb_block[MM_TSB_BASE],
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
(mm->context.tsb_block[1].tsb ?
|
||||
&mm->context.tsb_block[1] :
|
||||
(mm->context.tsb_block[MM_TSB_HUGE].tsb ?
|
||||
&mm->context.tsb_block[MM_TSB_HUGE] :
|
||||
NULL)
|
||||
#else
|
||||
NULL
|
||||
#endif
|
||||
, __pa(&mm->context.tsb_descr[0]));
|
||||
, __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
|
||||
}
|
||||
|
||||
void tsb_grow(struct mm_struct *mm,
|
||||
|
|
|
@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
|
|||
unsigned long order = get_order(size);
|
||||
unsigned long p;
|
||||
|
||||
p = __get_free_pages(GFP_KERNEL, order);
|
||||
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!p) {
|
||||
prom_printf("SUN4V: Error, cannot allocate queue.\n");
|
||||
prom_halt();
|
||||
|
|
|
@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
|
|||
"Linux powering off";
|
||||
static const char rebooting_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux rebooting";
|
||||
static const char panicing_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux panicing";
|
||||
static const char panicking_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux panicking";
|
||||
|
||||
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
|
||||
{
|
||||
|
@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
|
|||
|
||||
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
|
||||
{
|
||||
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
|
||||
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
|
|
@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
|
|||
atomic_inc(&sun4v_resum_oflow_cnt);
|
||||
}
|
||||
|
||||
/* Given a set of registers, get the virtual addressi that was being accessed
|
||||
* by the faulting instructions at tpc.
|
||||
*/
|
||||
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int insn;
|
||||
|
||||
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
|
||||
return compute_effective_address(regs, insn,
|
||||
(insn >> 25) & 0x1f);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Attempt to handle non-resumable errors generated from userspace.
|
||||
* Returns true if the signal was handled, false otherwise.
|
||||
*/
|
||||
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
|
||||
struct sun4v_error_entry *ent) {
|
||||
|
||||
unsigned int attrs = ent->err_attrs;
|
||||
|
||||
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
|
||||
unsigned long addr = ent->err_raddr;
|
||||
siginfo_t info;
|
||||
|
||||
if (addr == ~(u64)0) {
|
||||
/* This seems highly unlikely to ever occur */
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
|
||||
} else {
|
||||
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
|
||||
PAGE_SIZE);
|
||||
|
||||
/* Break the unfortunate news. */
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
|
||||
addr);
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
|
||||
page_cnt);
|
||||
|
||||
while (page_cnt-- > 0) {
|
||||
if (pfn_valid(addr >> PAGE_SHIFT))
|
||||
get_page(pfn_to_page(addr >> PAGE_SHIFT));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
info.si_signo = SIGKILL;
|
||||
info.si_errno = 0;
|
||||
info.si_trapno = 0;
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
|
||||
return true;
|
||||
}
|
||||
if (attrs & SUN4V_ERR_ATTRS_PIO) {
|
||||
siginfo_t info;
|
||||
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void __user *)sun4v_get_vaddr(regs);
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Default to doing nothing */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
|
||||
* Log the event, clear the first word of the entry, and die.
|
||||
*/
|
||||
|
@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
|
|||
|
||||
put_cpu();
|
||||
|
||||
if (!(regs->tstate & TSTATE_PRIV) &&
|
||||
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
|
||||
/* DON'T PANIC: This userspace error was handled. */
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Check for the special PCI poke sequence. */
|
||||
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
|
||||
|
|
|
@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
|
|||
|
||||
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
||||
{
|
||||
return rapl_pmus->pmus[topology_logical_package_id(cpu)];
|
||||
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||
|
||||
/*
|
||||
* The unsigned check also catches the '-1' return value for non
|
||||
* existent mappings in the topology map.
|
||||
*/
|
||||
return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
|
||||
}
|
||||
|
||||
static inline u64 rapl_read_counter(struct perf_event *event)
|
||||
|
@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
|||
|
||||
/* must be done before validate_group */
|
||||
pmu = cpu_to_rapl_pmu(event->cpu);
|
||||
if (!pmu)
|
||||
return -EINVAL;
|
||||
event->cpu = pmu->cpu;
|
||||
event->pmu_private = pmu;
|
||||
event->hw.event_base = msr;
|
||||
|
@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
|
|||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
int target;
|
||||
|
||||
if (!pmu) {
|
||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&pmu->lock);
|
||||
INIT_LIST_HEAD(&pmu->active_list);
|
||||
pmu->pmu = &rapl_pmus->pmu;
|
||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||
rapl_hrtimer_init(pmu);
|
||||
|
||||
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package which collects rapl
|
||||
* events already.
|
||||
|
@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rapl_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
|
||||
if (pmu)
|
||||
return 0;
|
||||
|
||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&pmu->lock);
|
||||
INIT_LIST_HEAD(&pmu->active_list);
|
||||
pmu->pmu = &rapl_pmus->pmu;
|
||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||
pmu->cpu = -1;
|
||||
rapl_hrtimer_init(pmu);
|
||||
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rapl_check_hw_unit(bool apply_quirk)
|
||||
{
|
||||
u64 msr_rapl_power_unit_bits;
|
||||
|
@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
|
|||
/*
|
||||
* Install callbacks. Core will call them for each online cpu.
|
||||
*/
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
|
||||
rapl_cpu_prepare, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||
"perf/x86/rapl:online",
|
||||
rapl_cpu_online, rapl_cpu_offline);
|
||||
if (ret)
|
||||
goto out1;
|
||||
goto out;
|
||||
|
||||
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
||||
if (ret)
|
||||
goto out2;
|
||||
goto out1;
|
||||
|
||||
rapl_advertise();
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
out1:
|
||||
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
out:
|
||||
pr_warn("Initialization failed (%d), disabled\n", ret);
|
||||
cleanup_rapl_pmus();
|
||||
|
@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
|
|||
static void __exit intel_rapl_exit(void)
|
||||
{
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
||||
perf_pmu_unregister(&rapl_pmus->pmu);
|
||||
cleanup_rapl_pmus();
|
||||
}
|
||||
|
|
|
@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
|
|||
|
||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
||||
{
|
||||
return pmu->boxes[topology_logical_package_id(cpu)];
|
||||
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||
|
||||
/*
|
||||
* The unsigned check also catches the '-1' return value for non
|
||||
* existent mappings in the topology map.
|
||||
*/
|
||||
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
|
||||
}
|
||||
|
||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
|
@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
|||
pmu->registered = false;
|
||||
}
|
||||
|
||||
static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
|
||||
{
|
||||
struct intel_uncore_pmu *pmu = type->pmus;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
if (pmu) {
|
||||
pkg = topology_physical_package_id(cpu);
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void uncore_exit_boxes(void *dummy)
|
||||
{
|
||||
struct intel_uncore_type **types;
|
||||
|
||||
for (types = uncore_msr_uncores; *types; types++)
|
||||
__uncore_exit_boxes(*types++, smp_processor_id());
|
||||
}
|
||||
|
||||
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
||||
{
|
||||
int pkg;
|
||||
|
@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
|
|||
}
|
||||
}
|
||||
|
||||
static int uncore_cpu_dying(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int first_init;
|
||||
|
||||
static int uncore_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg, ncpus = 1;
|
||||
|
||||
if (first_init) {
|
||||
/*
|
||||
* On init we get the number of online cpus in the package
|
||||
* and set refcount for all of them.
|
||||
*/
|
||||
ncpus = cpumask_weight(topology_core_cpumask(cpu));
|
||||
}
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (!box)
|
||||
continue;
|
||||
/* The first cpu on a package activates the box */
|
||||
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
|
||||
uncore_box_init(box);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uncore_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
if (pmu->boxes[pkg])
|
||||
continue;
|
||||
/* First cpu of a package allocates the box */
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
box->pmu = pmu;
|
||||
box->pkgid = pkg;
|
||||
pmu->boxes[pkg] = box;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
||||
int new_cpu)
|
||||
{
|
||||
|
@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
|||
|
||||
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg, target;
|
||||
|
||||
/* Check if exiting cpu is used for collecting uncore events */
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
||||
return 0;
|
||||
|
||||
goto unref;
|
||||
/* Find a new cpu to collect uncore events */
|
||||
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
||||
|
||||
|
@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
|||
|
||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||
|
||||
unref:
|
||||
/* Clear the references */
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int allocate_boxes(struct intel_uncore_type **types,
|
||||
unsigned int pkg, unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_box *box, *tmp;
|
||||
struct intel_uncore_type *type;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
LIST_HEAD(allocated);
|
||||
int i;
|
||||
|
||||
/* Try to allocate all required boxes */
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
if (pmu->boxes[pkg])
|
||||
continue;
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
goto cleanup;
|
||||
box->pmu = pmu;
|
||||
box->pkgid = pkg;
|
||||
list_add(&box->active_list, &allocated);
|
||||
}
|
||||
}
|
||||
/* Install them in the pmus */
|
||||
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||
list_del_init(&box->active_list);
|
||||
box->pmu->boxes[pkg] = box;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||
list_del_init(&box->active_list);
|
||||
kfree(box);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_online(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, ret, pkg, target;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
ret = allocate_boxes(types, pkg, cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (!box && atomic_inc_return(&box->refcnt) == 1)
|
||||
uncore_box_init(box);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package
|
||||
|
@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
|
|||
if (cret && pret)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Install callbacks. Core will call them for each online cpu.
|
||||
*
|
||||
* The first online cpu of each package allocates and takes
|
||||
* the refcounts for all other online cpus in that package.
|
||||
* If msrs are not enabled no allocation is required and
|
||||
* uncore_cpu_prepare() is not called for each online cpu.
|
||||
*/
|
||||
if (!cret) {
|
||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
|
||||
"perf/x86/intel/uncore:prepare",
|
||||
uncore_cpu_prepare, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
|
||||
"perf/x86/intel/uncore:prepare",
|
||||
uncore_cpu_prepare, NULL);
|
||||
}
|
||||
first_init = 1;
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||
"perf/x86/uncore:starting",
|
||||
uncore_cpu_starting, uncore_cpu_dying);
|
||||
first_init = 0;
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||
"perf/x86/uncore:online",
|
||||
uncore_event_cpu_online, uncore_event_cpu_offline);
|
||||
/* Install hotplug callbacks to setup the targets for each package */
|
||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||
"perf/x86/intel/uncore:online",
|
||||
uncore_event_cpu_online,
|
||||
uncore_event_cpu_offline);
|
||||
if (ret)
|
||||
goto err;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
/* Undo box->init_box() */
|
||||
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_pci_exit();
|
||||
return ret;
|
||||
|
@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
|
|||
|
||||
static void __exit intel_uncore_exit(void)
|
||||
{
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
|
||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_pci_exit();
|
||||
}
|
||||
|
|
|
@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
|
|||
extern void load_ucode_ap(void);
|
||||
void reload_early_microcode(void);
|
||||
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
||||
extern bool initrd_gone;
|
||||
#else
|
||||
static inline int __init microcode_init(void) { return 0; };
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
|
|
|
@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
|
|||
|
||||
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
|
||||
|
||||
static void __restart_timer(struct timer_list *t, unsigned long interval)
|
||||
static void __start_timer(struct timer_list *t, unsigned long interval)
|
||||
{
|
||||
unsigned long when = jiffies + interval;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (timer_pending(t)) {
|
||||
if (time_before(when, t->expires))
|
||||
mod_timer(t, when);
|
||||
} else {
|
||||
t->expires = round_jiffies(when);
|
||||
add_timer_on(t, smp_processor_id());
|
||||
}
|
||||
if (!timer_pending(t) || time_before(when, t->expires))
|
||||
mod_timer(t, round_jiffies(when));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
|
|||
|
||||
done:
|
||||
__this_cpu_write(mce_next_interval, iv);
|
||||
__restart_timer(t, iv);
|
||||
__start_timer(t, iv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
|
|||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
unsigned long iv = __this_cpu_read(mce_next_interval);
|
||||
|
||||
__restart_timer(t, interval);
|
||||
__start_timer(t, interval);
|
||||
|
||||
if (interval < iv)
|
||||
__this_cpu_write(mce_next_interval, interval);
|
||||
|
@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
|
||||
static void mce_start_timer(struct timer_list *t)
|
||||
{
|
||||
unsigned long iv = check_interval * HZ;
|
||||
|
||||
if (mca_cfg.ignore_ce || !iv)
|
||||
return;
|
||||
|
||||
per_cpu(mce_next_interval, cpu) = iv;
|
||||
|
||||
t->expires = round_jiffies(jiffies + iv);
|
||||
add_timer_on(t, cpu);
|
||||
this_cpu_write(mce_next_interval, iv);
|
||||
__start_timer(t, iv);
|
||||
}
|
||||
|
||||
static void __mcheck_cpu_setup_timer(void)
|
||||
|
@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
|
|||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
setup_pinned_timer(t, mce_timer_fn, cpu);
|
||||
mce_start_timer(cpu, t);
|
||||
mce_start_timer(t);
|
||||
}
|
||||
|
||||
/* Handle unconfigured int18 (should never happen) */
|
||||
|
@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
|
|||
|
||||
static int mce_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
int ret;
|
||||
|
||||
mce_device_create(cpu);
|
||||
|
@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
|
|||
return ret;
|
||||
}
|
||||
mce_reenable_cpu();
|
||||
mce_start_timer(cpu, t);
|
||||
mce_start_timer(t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_cpu_pre_down(unsigned int cpu)
|
||||
{
|
||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
|
||||
mce_disable_cpu();
|
||||
del_timer_sync(t);
|
||||
|
|
|
@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
|
|||
reget:
|
||||
if (!get_builtin_microcode(&cp, family)) {
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
||||
initrd_end - initrd_start, NULL);
|
||||
if (!initrd_gone)
|
||||
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
||||
initrd_end - initrd_start, NULL);
|
||||
#endif
|
||||
if (!(cp.data && cp.size)) {
|
||||
/*
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
static struct microcode_ops *microcode_ops;
|
||||
static bool dis_ucode_ldr = true;
|
||||
|
||||
bool initrd_gone;
|
||||
|
||||
LIST_HEAD(microcode_cache);
|
||||
|
||||
/*
|
||||
|
@ -190,21 +192,24 @@ void load_ucode_ap(void)
|
|||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
if (c->x86 >= 6)
|
||||
return save_microcode_in_initrd_intel();
|
||||
ret = save_microcode_in_initrd_intel();
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
if (c->x86 >= 0x10)
|
||||
return save_microcode_in_initrd_amd(c->x86);
|
||||
ret = save_microcode_in_initrd_amd(c->x86);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
initrd_gone = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||
|
@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
|||
* has the virtual address of the beginning of the initrd. It also
|
||||
* possibly relocates the ramdisk. In either case, initrd_start contains
|
||||
* the updated address so use that instead.
|
||||
*
|
||||
* initrd_gone is for the hotplug case where we've thrown out initrd
|
||||
* already.
|
||||
*/
|
||||
if (!use_pa && initrd_start)
|
||||
start = initrd_start;
|
||||
if (!use_pa) {
|
||||
if (initrd_gone)
|
||||
return (struct cpio_data){ NULL, 0, "" };
|
||||
if (initrd_start)
|
||||
start = initrd_start;
|
||||
}
|
||||
|
||||
return find_cpio_data(path, (void *)start, size, NULL);
|
||||
#else /* !CONFIG_BLK_DEV_INITRD */
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
|
||||
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
|
||||
|
||||
/* Current microcode patch used in early patching */
|
||||
/* Current microcode patch used in early patching on the APs. */
|
||||
struct microcode_intel *intel_ucode_patch;
|
||||
|
||||
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
|
||||
|
@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
|
|||
struct ucode_cpu_info uci;
|
||||
struct cpio_data cp;
|
||||
|
||||
/*
|
||||
* AP loading didn't find any microcode patch, no need to save anything.
|
||||
*/
|
||||
if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
|
||||
return 0;
|
||||
|
||||
if (!load_builtin_intel_microcode(&cp))
|
||||
cp = find_microcode_in_initrd(ucode_path, false);
|
||||
|
||||
|
@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* @res_patch, output: a pointer to the patch we found.
|
||||
*/
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <asm/fpu/regset.h>
|
||||
#include <asm/fpu/signal.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/fpu/xstate.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
|
@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
|
|||
* it will #GP. Make sure it is replaced after the memset().
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_XSAVES))
|
||||
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
|
||||
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
|
||||
xfeatures_mask;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_FXSR))
|
||||
fpstate_init_fxstate(&state->fxsave);
|
||||
|
|
|
@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
|
||||
efi_scratch.use_pgd = true;
|
||||
|
||||
/*
|
||||
* Certain firmware versions are way too sentimential and still believe
|
||||
* they are exclusive and unquestionable owners of the first physical page,
|
||||
* even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
|
||||
* (but then write-access it later during SetVirtualAddressMap()).
|
||||
*
|
||||
* Create a 1:1 mapping for this page, to avoid triple faults during early
|
||||
* boot with such firmware. We are free to hand this page to the BIOS,
|
||||
* as trim_bios_range() will reserve the first page and isolate it away
|
||||
* from memory allocators anyway.
|
||||
*/
|
||||
if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
|
||||
pr_err("Failed to create 1:1 mapping for the first page!\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When making calls to the firmware everything needs to be 1:1
|
||||
* mapped and addressable with 32-bit pointers. Map the kernel
|
||||
|
|
|
@ -419,7 +419,7 @@ subsys_initcall(topology_init);
|
|||
|
||||
void cpu_reset(void)
|
||||
{
|
||||
#if XCHAL_HAVE_PTP_MMU
|
||||
#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
|
||||
local_irq_disable();
|
||||
/*
|
||||
* We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
|
||||
|
|
|
@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
|
|||
struct crypto_larval *larval;
|
||||
int err;
|
||||
|
||||
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
|
||||
err = crypto_check_alg(alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
|||
|
||||
if (qc->err_mask & ~AC_ERR_OTHER)
|
||||
qc->err_mask &= ~AC_ERR_OTHER;
|
||||
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
|
||||
qc->result_tf.command |= ATA_SENSE;
|
||||
}
|
||||
|
||||
/* finish up */
|
||||
|
@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/*
|
||||
* Device times out with higher max sects.
|
||||
* These devices time out with higher max sects.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
|
|
@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
|
|||
host->iomap = NULL;
|
||||
hpriv->base = devm_ioremap(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!hpriv->base)
|
||||
return -ENOMEM;
|
||||
|
||||
hpriv->base -= SATAHC0_REG_BASE;
|
||||
|
||||
hpriv->clk = clk_get(&pdev->dev, NULL);
|
||||
|
|
|
@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
|
|||
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
|
||||
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
|
||||
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
|
||||
#ifdef CONFIG_BCMA_DRIVER_MIPS
|
||||
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
|
||||
#endif /* CONFIG_BCMA_DRIVER_MIPS */
|
||||
|
||||
/* driver_chipcommon_b.c */
|
||||
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/bcma/bcma.h>
|
||||
|
||||
static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
|
||||
|
||||
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
|
||||
u32 mask, u32 value)
|
||||
{
|
||||
|
@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
|
|||
if (cc->capabilities & BCMA_CC_CAP_PMU)
|
||||
bcma_pmu_early_init(cc);
|
||||
|
||||
if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
|
||||
bcma_chipco_serial_init(cc);
|
||||
|
||||
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
|
||||
bcma_core_chipcommon_flash_detect(cc);
|
||||
|
||||
|
@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
|
|||
return res;
|
||||
}
|
||||
|
||||
static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
|
||||
#ifdef CONFIG_BCMA_DRIVER_MIPS
|
||||
void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
#if IS_BUILTIN(CONFIG_BCM47XX)
|
||||
unsigned int irq;
|
||||
u32 baud_base;
|
||||
u32 i;
|
||||
|
@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
|
|||
ports[i].baud_base = baud_base;
|
||||
ports[i].reg_shift = 0;
|
||||
}
|
||||
#endif /* CONFIG_BCM47XX */
|
||||
}
|
||||
#endif /* CONFIG_BCMA_DRIVER_MIPS */
|
||||
|
|
|
@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
|
|||
|
||||
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
|
||||
{
|
||||
struct bcma_bus *bus = mcore->core->bus;
|
||||
|
||||
if (mcore->early_setup_done)
|
||||
return;
|
||||
|
||||
bcma_chipco_serial_init(&bus->drv_cc);
|
||||
bcma_core_mips_nvram_init(mcore);
|
||||
|
||||
mcore->early_setup_done = true;
|
||||
|
|
|
@ -153,6 +153,8 @@ struct cppi41_dd {
|
|||
|
||||
/* context for suspend/resume */
|
||||
unsigned int dma_tdfdq;
|
||||
|
||||
bool is_suspended;
|
||||
};
|
||||
|
||||
#define FIST_COMPLETION_QUEUE 93
|
||||
|
@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
|
|||
BUG_ON(desc_num >= ALLOC_DECS_NUM);
|
||||
c = cdd->chan_busy[desc_num];
|
||||
cdd->chan_busy[desc_num] = NULL;
|
||||
|
||||
/* Usecount for chan_busy[], paired with push_desc_queue() */
|
||||
pm_runtime_put(cdd->ddev.dev);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
|
|||
|
||||
while (val) {
|
||||
u32 desc, len;
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get(cdd->ddev.dev);
|
||||
if (error < 0)
|
||||
dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
|
||||
__func__, error);
|
||||
/*
|
||||
* This should never trigger, see the comments in
|
||||
* push_desc_queue()
|
||||
*/
|
||||
WARN_ON(cdd->is_suspended);
|
||||
|
||||
q_num = __fls(val);
|
||||
val &= ~(1 << q_num);
|
||||
|
@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
|
|||
c->residue = pd_trans_len(c->desc->pd6) - len;
|
||||
dma_cookie_complete(&c->txd);
|
||||
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
|
||||
|
||||
pm_runtime_mark_last_busy(cdd->ddev.dev);
|
||||
pm_runtime_put_autosuspend(cdd->ddev.dev);
|
||||
}
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
|
@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
|
|||
*/
|
||||
__iowmb();
|
||||
|
||||
/*
|
||||
* DMA transfers can take at least 200ms to complete with USB mass
|
||||
* storage connected. To prevent autosuspend timeouts, we must use
|
||||
* pm_runtime_get/put() when chan_busy[] is modified. This will get
|
||||
* cleared in desc_to_chan() or cppi41_stop_chan() depending on the
|
||||
* outcome of the transfer.
|
||||
*/
|
||||
pm_runtime_get(cdd->ddev.dev);
|
||||
|
||||
desc_phys = lower_32_bits(c->desc_phys);
|
||||
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
|
||||
WARN_ON(cdd->chan_busy[desc_num]);
|
||||
|
@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
|
|||
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
|
||||
}
|
||||
|
||||
static void pending_desc(struct cppi41_channel *c)
|
||||
/*
|
||||
* Caller must hold cdd->lock to prevent push_desc_queue()
|
||||
* getting called out of order. We have both cppi41_dma_issue_pending()
|
||||
* and cppi41_runtime_resume() call this function.
|
||||
*/
|
||||
static void cppi41_run_queue(struct cppi41_dd *cdd)
|
||||
{
|
||||
struct cppi41_dd *cdd = c->cdd;
|
||||
unsigned long flags;
|
||||
struct cppi41_channel *c, *_c;
|
||||
|
||||
spin_lock_irqsave(&cdd->lock, flags);
|
||||
list_add_tail(&c->node, &cdd->pending);
|
||||
spin_unlock_irqrestore(&cdd->lock, flags);
|
||||
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
|
||||
push_desc_queue(c);
|
||||
list_del(&c->node);
|
||||
}
|
||||
}
|
||||
|
||||
static void cppi41_dma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct cppi41_channel *c = to_cpp41_chan(chan);
|
||||
struct cppi41_dd *cdd = c->cdd;
|
||||
unsigned long flags;
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get(cdd->ddev.dev);
|
||||
|
@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
|
|||
return;
|
||||
}
|
||||
|
||||
if (likely(pm_runtime_active(cdd->ddev.dev)))
|
||||
push_desc_queue(c);
|
||||
else
|
||||
pending_desc(c);
|
||||
spin_lock_irqsave(&cdd->lock, flags);
|
||||
list_add_tail(&c->node, &cdd->pending);
|
||||
if (!cdd->is_suspended)
|
||||
cppi41_run_queue(cdd);
|
||||
spin_unlock_irqrestore(&cdd->lock, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(cdd->ddev.dev);
|
||||
pm_runtime_put_autosuspend(cdd->ddev.dev);
|
||||
|
@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
|
|||
WARN_ON(!cdd->chan_busy[desc_num]);
|
||||
cdd->chan_busy[desc_num] = NULL;
|
||||
|
||||
/* Usecount for chan_busy[], paired with push_desc_queue() */
|
||||
pm_runtime_put(cdd->ddev.dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
|
|||
static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct cppi41_dd *cdd = dev_get_drvdata(dev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cdd->lock, flags);
|
||||
cdd->is_suspended = true;
|
||||
WARN_ON(!list_empty(&cdd->pending));
|
||||
spin_unlock_irqrestore(&cdd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
|
|||
static int __maybe_unused cppi41_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct cppi41_dd *cdd = dev_get_drvdata(dev);
|
||||
struct cppi41_channel *c, *_c;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cdd->lock, flags);
|
||||
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
|
||||
push_desc_queue(c);
|
||||
list_del(&c->node);
|
||||
}
|
||||
cdd->is_suspended = false;
|
||||
cppi41_run_queue(cdd);
|
||||
spin_unlock_irqrestore(&cdd->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
|
|||
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
|
||||
{
|
||||
struct pl330_thread *thrd = NULL;
|
||||
unsigned long flags;
|
||||
int chans, i;
|
||||
|
||||
if (pl330->state == DYING)
|
||||
|
@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
|
|||
|
||||
chans = pl330->pcfg.num_chan;
|
||||
|
||||
spin_lock_irqsave(&pl330->lock, flags);
|
||||
|
||||
for (i = 0; i < chans; i++) {
|
||||
thrd = &pl330->channels[i];
|
||||
if ((thrd->free) && (!_manager_ns(thrd) ||
|
||||
|
@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
|
|||
thrd = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
|
||||
return thrd;
|
||||
}
|
||||
|
||||
|
@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
|
|||
static void pl330_release_channel(struct pl330_thread *thrd)
|
||||
{
|
||||
struct pl330_dmac *pl330;
|
||||
unsigned long flags;
|
||||
|
||||
if (!thrd || thrd->free)
|
||||
return;
|
||||
|
@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
|
|||
|
||||
pl330 = thrd->dmac;
|
||||
|
||||
spin_lock_irqsave(&pl330->lock, flags);
|
||||
_free_event(thrd, thrd->ev);
|
||||
thrd->free = true;
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
}
|
||||
|
||||
/* Initialize the structure for PL330 configuration, that can be used
|
||||
|
@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
|
|||
struct pl330_dmac *pl330 = pch->dmac;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
spin_lock_irqsave(&pl330->lock, flags);
|
||||
|
||||
dma_cookie_init(chan);
|
||||
pch->cyclic = false;
|
||||
|
||||
pch->thread = pl330_request_channel(pl330);
|
||||
if (!pch->thread) {
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
|
||||
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
|
|||
static void pl330_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||
struct pl330_dmac *pl330 = pch->dmac;
|
||||
unsigned long flags;
|
||||
|
||||
tasklet_kill(&pch->task);
|
||||
|
||||
pm_runtime_get_sync(pch->dmac->ddma.dev);
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
spin_lock_irqsave(&pl330->lock, flags);
|
||||
|
||||
pl330_release_channel(pch->thread);
|
||||
pch->thread = NULL;
|
||||
|
@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
|
|||
if (pch->cyclic)
|
||||
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
|
||||
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
|
||||
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
|
||||
}
|
||||
|
|
|
@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
|
|||
struct exit_boot_struct {
|
||||
efi_memory_desc_t *runtime_map;
|
||||
int *runtime_entry_count;
|
||||
void *new_fdt_addr;
|
||||
};
|
||||
|
||||
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
|
||||
|
@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
|
|||
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
|
||||
p->runtime_map, p->runtime_entry_count);
|
||||
|
||||
return EFI_SUCCESS;
|
||||
return update_fdt_memmap(p->new_fdt_addr, map);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
|
||||
priv.runtime_map = runtime_map;
|
||||
priv.runtime_entry_count = &runtime_entry_count;
|
||||
priv.new_fdt_addr = (void *)*new_fdt_addr;
|
||||
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
|
||||
exit_boot_func);
|
||||
|
||||
if (status == EFI_SUCCESS) {
|
||||
efi_set_virtual_address_map_t *svam;
|
||||
|
||||
status = update_fdt_memmap((void *)*new_fdt_addr, &map);
|
||||
if (status != EFI_SUCCESS) {
|
||||
/*
|
||||
* The kernel won't get far without the memory map, but
|
||||
* may still be able to print something meaningful so
|
||||
* return success here.
|
||||
*/
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
/* Install the new virtual address map */
|
||||
svam = sys_table->runtime->set_virtual_address_map;
|
||||
status = svam(runtime_entry_count * desc_size, desc_size,
|
||||
|
|
|
@ -168,7 +168,7 @@ struct cp2112_device {
|
|||
atomic_t xfer_avail;
|
||||
struct gpio_chip gc;
|
||||
u8 *in_out_buffer;
|
||||
spinlock_t lock;
|
||||
struct mutex lock;
|
||||
|
||||
struct gpio_desc *desc[8];
|
||||
bool gpio_poll;
|
||||
|
@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|||
struct cp2112_device *dev = gpiochip_get_data(chip);
|
||||
struct hid_device *hdev = dev->hdev;
|
||||
u8 *buf = dev->in_out_buffer;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
mutex_lock(&dev->lock);
|
||||
|
||||
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
|
||||
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
|
||||
|
@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|||
ret = 0;
|
||||
|
||||
exit:
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
return ret <= 0 ? ret : -EIO;
|
||||
mutex_unlock(&dev->lock);
|
||||
return ret < 0 ? ret : -EIO;
|
||||
}
|
||||
|
||||
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
||||
|
@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
|||
struct cp2112_device *dev = gpiochip_get_data(chip);
|
||||
struct hid_device *hdev = dev->hdev;
|
||||
u8 *buf = dev->in_out_buffer;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
mutex_lock(&dev->lock);
|
||||
|
||||
buf[0] = CP2112_GPIO_SET;
|
||||
buf[1] = value ? 0xff : 0;
|
||||
|
@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
|||
if (ret < 0)
|
||||
hid_err(hdev, "error setting GPIO values: %d\n", ret);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->lock);
|
||||
}
|
||||
|
||||
static int cp2112_gpio_get_all(struct gpio_chip *chip)
|
||||
|
@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
|
|||
struct cp2112_device *dev = gpiochip_get_data(chip);
|
||||
struct hid_device *hdev = dev->hdev;
|
||||
u8 *buf = dev->in_out_buffer;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
mutex_lock(&dev->lock);
|
||||
|
||||
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
|
||||
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
|
||||
|
@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
|
|||
ret = buf[1];
|
||||
|
||||
exit:
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
|
|||
struct cp2112_device *dev = gpiochip_get_data(chip);
|
||||
struct hid_device *hdev = dev->hdev;
|
||||
u8 *buf = dev->in_out_buffer;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
mutex_lock(&dev->lock);
|
||||
|
||||
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
|
||||
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
|
||||
|
@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->lock);
|
||||
|
||||
/*
|
||||
* Set gpio value when output direction is already set,
|
||||
|
@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
mutex_unlock(&dev->lock);
|
||||
return ret < 0 ? ret : -EIO;
|
||||
}
|
||||
|
||||
|
@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||
if (!dev->in_out_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dev->lock);
|
||||
mutex_init(&dev->lock);
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (ret) {
|
||||
|
|
|
@ -76,6 +76,9 @@
|
|||
#define USB_VENDOR_ID_ALPS_JP 0x044E
|
||||
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
|
||||
|
||||
#define USB_VENDOR_ID_AMI 0x046b
|
||||
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
|
||||
|
||||
#define USB_VENDOR_ID_ANTON 0x1130
|
||||
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
|
||||
|
||||
|
|
|
@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
|
||||
.driver_data = LG_NOGET | LG_FF4 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
|
||||
.driver_data = LG_FF2 },
|
||||
.driver_data = LG_NOGET | LG_FF2 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
|
||||
.driver_data = LG_FF3 },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
|
||||
|
|
|
@ -57,6 +57,7 @@ static const struct hid_blacklist {
|
|||
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
|
||||
|
|
|
@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
|
|||
wacom->id[0] = STYLUS_DEVICE_ID;
|
||||
}
|
||||
|
||||
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
|
||||
if (features->pressure_max > 255)
|
||||
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
|
||||
pressure += (features->pressure_max + 1) / 2;
|
||||
if (prox) {
|
||||
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
|
||||
if (features->pressure_max > 255)
|
||||
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
|
||||
pressure += (features->pressure_max + 1) / 2;
|
||||
|
||||
input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
|
||||
input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
|
||||
input_report_abs(input, ABS_PRESSURE, pressure);
|
||||
input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
|
||||
input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
|
||||
input_report_abs(input, ABS_PRESSURE, pressure);
|
||||
|
||||
input_report_key(input, BTN_TOUCH, data[4] & 0x08);
|
||||
input_report_key(input, BTN_STYLUS, data[4] & 0x10);
|
||||
/* Only allow the stylus2 button to be reported for the pen tool. */
|
||||
input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
|
||||
input_report_key(input, BTN_TOUCH, data[4] & 0x08);
|
||||
input_report_key(input, BTN_STYLUS, data[4] & 0x10);
|
||||
/* Only allow the stylus2 button to be reported for the pen tool. */
|
||||
input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
|
||||
}
|
||||
|
||||
if (!prox)
|
||||
wacom->id[0] = 0;
|
||||
|
|
|
@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
|
|||
data->enabled = true;
|
||||
if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
|
||||
retval = disable_irq_wake(irq);
|
||||
if (!retval)
|
||||
if (retval)
|
||||
dev_warn(&rmi_dev->dev,
|
||||
"Failed to disable irq for wake: %d\n",
|
||||
retval);
|
||||
|
@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
|
|||
disable_irq(irq);
|
||||
if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
|
||||
retval = enable_irq_wake(irq);
|
||||
if (!retval)
|
||||
if (retval)
|
||||
dev_warn(&rmi_dev->dev,
|
||||
"Failed to enable irq for wake: %d\n",
|
||||
retval);
|
||||
|
|
|
@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
|
|||
}
|
||||
platform_set_drvdata(wm->battery_dev, wm);
|
||||
wm->battery_dev->dev.parent = dev;
|
||||
wm->battery_dev->dev.platform_data = pdata->batt_pdata;
|
||||
wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
|
||||
ret = platform_device_add(wm->battery_dev);
|
||||
if (ret < 0)
|
||||
goto batt_reg_err;
|
||||
|
|
|
@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
|
|||
if (skb == NULL)
|
||||
break;
|
||||
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_info[i].mapping)) {
|
||||
dev_kfree_skb(skb);
|
||||
np->rx_info[i].skb = NULL;
|
||||
break;
|
||||
}
|
||||
/* Grrr, we cannot offset to correctly align the IP header. */
|
||||
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
|
||||
}
|
||||
|
@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
{
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unsigned int entry;
|
||||
unsigned int prev_tx;
|
||||
u32 status;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* be cautious here, wrapping the queue has weird semantics
|
||||
|
@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
|
||||
|
||||
prev_tx = np->cur_tx;
|
||||
entry = np->cur_tx % TX_RING_SIZE;
|
||||
for (i = 0; i < skb_num_frags(skb); i++) {
|
||||
int wrap_ring = 0;
|
||||
|
@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
skb_frag_size(this_frag),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->tx_info[entry].mapping)) {
|
||||
dev->stats.tx_dropped++;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
|
||||
np->tx_ring[entry].status = cpu_to_le32(status);
|
||||
|
@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
err_out:
|
||||
entry = prev_tx % TX_RING_SIZE;
|
||||
np->tx_info[entry].skb = NULL;
|
||||
if (i > 0) {
|
||||
pci_unmap_single(np->pci_dev,
|
||||
np->tx_info[entry].mapping,
|
||||
skb_first_frag_len(skb),
|
||||
PCI_DMA_TODEVICE);
|
||||
np->tx_info[entry].mapping = 0;
|
||||
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
|
||||
for (j = 1; j < i; j++) {
|
||||
pci_unmap_single(np->pci_dev,
|
||||
np->tx_info[entry].mapping,
|
||||
skb_frag_size(
|
||||
&skb_shinfo(skb)->frags[j-1]),
|
||||
PCI_DMA_TODEVICE);
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
np->cur_tx = prev_tx;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* The interrupt handler does all of the Rx thread work and cleans up
|
||||
after the Tx thread. */
|
||||
|
@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
|
|||
break; /* Better luck next round. */
|
||||
np->rx_info[entry].mapping =
|
||||
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_info[entry].mapping)) {
|
||||
dev_kfree_skb(skb);
|
||||
np->rx_info[entry].skb = NULL;
|
||||
break;
|
||||
}
|
||||
np->rx_ring[entry].rxaddr =
|
||||
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
|
||||
}
|
||||
|
|
|
@ -43,13 +43,13 @@
|
|||
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
|
||||
#define MIN_RX_RING_SIZE 64
|
||||
#define MAX_RX_RING_SIZE 8192
|
||||
#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
|
||||
#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
|
||||
* (bp)->rx_ring_size)
|
||||
|
||||
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
|
||||
#define MIN_TX_RING_SIZE 64
|
||||
#define MAX_TX_RING_SIZE 4096
|
||||
#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
|
||||
#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
|
||||
* (bp)->tx_ring_size)
|
||||
|
||||
/* level of occupied TX descriptors under which we wake up TX process */
|
||||
|
@ -78,6 +78,37 @@
|
|||
*/
|
||||
#define MACB_HALT_TIMEOUT 1230
|
||||
|
||||
/* DMA buffer descriptor might be different size
|
||||
* depends on hardware configuration.
|
||||
*/
|
||||
static unsigned int macb_dma_desc_get_size(struct macb *bp)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
|
||||
#endif
|
||||
return sizeof(struct macb_dma_desc);
|
||||
}
|
||||
|
||||
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
/* Dma buffer descriptor is 4 words length (instead of 2 words)
|
||||
* for 64b GEM.
|
||||
*/
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
idx <<= 1;
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
|
||||
{
|
||||
return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Ring buffer accessors */
|
||||
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
|
||||
{
|
||||
|
@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
|
|||
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
|
||||
unsigned int index)
|
||||
{
|
||||
return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
|
||||
index = macb_tx_ring_wrap(queue->bp, index);
|
||||
index = macb_adj_dma_desc_idx(queue->bp, index);
|
||||
return &queue->tx_ring[index];
|
||||
}
|
||||
|
||||
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
|
||||
|
@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
|
|||
dma_addr_t offset;
|
||||
|
||||
offset = macb_tx_ring_wrap(queue->bp, index) *
|
||||
sizeof(struct macb_dma_desc);
|
||||
macb_dma_desc_get_size(queue->bp);
|
||||
|
||||
return queue->tx_ring_dma + offset;
|
||||
}
|
||||
|
@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
|
|||
|
||||
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
|
||||
{
|
||||
return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
|
||||
index = macb_rx_ring_wrap(bp, index);
|
||||
index = macb_adj_dma_desc_idx(bp, index);
|
||||
return &bp->rx_ring[index];
|
||||
}
|
||||
|
||||
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
|
||||
|
@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
|
||||
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
|
||||
{
|
||||
desc->addr = (u32)addr;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
desc->addrh = (u32)(addr >> 32);
|
||||
struct macb_dma_desc_64 *desc_64;
|
||||
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
|
||||
desc_64 = macb_64b_desc(bp, desc);
|
||||
desc_64->addrh = upper_32_bits(addr);
|
||||
}
|
||||
#endif
|
||||
desc->addr = lower_32_bits(addr);
|
||||
}
|
||||
|
||||
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
|
||||
{
|
||||
dma_addr_t addr = 0;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
struct macb_dma_desc_64 *desc_64;
|
||||
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
|
||||
desc_64 = macb_64b_desc(bp, desc);
|
||||
addr = ((u64)(desc_64->addrh) << 32);
|
||||
}
|
||||
#endif
|
||||
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void macb_tx_error_task(struct work_struct *work)
|
||||
|
@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
|
|||
|
||||
/* Set end of TX queue */
|
||||
desc = macb_tx_desc(queue, 0);
|
||||
macb_set_addr(desc, 0);
|
||||
macb_set_addr(bp, desc, 0);
|
||||
desc->ctrl = MACB_BIT(TX_USED);
|
||||
|
||||
/* Make descriptor updates visible to hardware */
|
||||
wmb();
|
||||
|
||||
/* Reinitialize the TX desc queue */
|
||||
queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
|
||||
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
|
||||
#endif
|
||||
/* Make TX ring reflect state of hardware */
|
||||
queue->tx_head = 0;
|
||||
|
@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
|
|||
unsigned int entry;
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t paddr;
|
||||
struct macb_dma_desc *desc;
|
||||
|
||||
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
|
||||
bp->rx_ring_size) > 0) {
|
||||
|
@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
|
|||
rmb();
|
||||
|
||||
bp->rx_prepared_head++;
|
||||
desc = macb_rx_desc(bp, entry);
|
||||
|
||||
if (!bp->rx_skbuff[entry]) {
|
||||
/* allocate sk_buff for this free entry in ring */
|
||||
|
@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
|
|||
|
||||
if (entry == bp->rx_ring_size - 1)
|
||||
paddr |= MACB_BIT(RX_WRAP);
|
||||
macb_set_addr(&(bp->rx_ring[entry]), paddr);
|
||||
bp->rx_ring[entry].ctrl = 0;
|
||||
macb_set_addr(bp, desc, paddr);
|
||||
desc->ctrl = 0;
|
||||
|
||||
/* properly align Ethernet header */
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
} else {
|
||||
bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
|
||||
bp->rx_ring[entry].ctrl = 0;
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
desc->ctrl = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
|
|||
bool rxused;
|
||||
|
||||
entry = macb_rx_ring_wrap(bp, bp->rx_tail);
|
||||
desc = &bp->rx_ring[entry];
|
||||
desc = macb_rx_desc(bp, entry);
|
||||
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
|
||||
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
addr |= ((u64)(desc->addrh) << 32);
|
||||
#endif
|
||||
addr = macb_get_addr(bp, desc);
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
if (!rxused)
|
||||
|
@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
|
|||
static inline void macb_init_rx_ring(struct macb *bp)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
struct macb_dma_desc *desc = NULL;
|
||||
int i;
|
||||
|
||||
addr = bp->rx_buffers_dma;
|
||||
for (i = 0; i < bp->rx_ring_size; i++) {
|
||||
bp->rx_ring[i].addr = addr;
|
||||
bp->rx_ring[i].ctrl = 0;
|
||||
desc = macb_rx_desc(bp, i);
|
||||
macb_set_addr(bp, desc, addr);
|
||||
desc->ctrl = 0;
|
||||
addr += bp->rx_buffer_size;
|
||||
}
|
||||
bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
|
||||
desc->addr |= MACB_BIT(RX_WRAP);
|
||||
bp->rx_tail = 0;
|
||||
}
|
||||
|
||||
|
@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
|
|||
|
||||
for (tail = bp->rx_tail; budget > 0; tail++) {
|
||||
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
|
||||
u32 addr, ctrl;
|
||||
u32 ctrl;
|
||||
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
addr = desc->addr;
|
||||
ctrl = desc->ctrl;
|
||||
|
||||
if (!(addr & MACB_BIT(RX_USED)))
|
||||
if (!(desc->addr & MACB_BIT(RX_USED)))
|
||||
break;
|
||||
|
||||
if (ctrl & MACB_BIT(RX_SOF)) {
|
||||
|
@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|||
i = tx_head;
|
||||
entry = macb_tx_ring_wrap(bp, i);
|
||||
ctrl = MACB_BIT(TX_USED);
|
||||
desc = &queue->tx_ring[entry];
|
||||
desc = macb_tx_desc(queue, entry);
|
||||
desc->ctrl = ctrl;
|
||||
|
||||
if (lso_ctrl) {
|
||||
|
@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|||
i--;
|
||||
entry = macb_tx_ring_wrap(bp, i);
|
||||
tx_skb = &queue->tx_skb[entry];
|
||||
desc = &queue->tx_ring[entry];
|
||||
desc = macb_tx_desc(queue, entry);
|
||||
|
||||
ctrl = (u32)tx_skb->size;
|
||||
if (eof) {
|
||||
|
@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
|
|||
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
|
||||
|
||||
/* Set TX buffer descriptor */
|
||||
macb_set_addr(desc, tx_skb->mapping);
|
||||
macb_set_addr(bp, desc, tx_skb->mapping);
|
||||
/* desc->addr must be visible to hardware before clearing
|
||||
* 'TX_USED' bit in desc->ctrl.
|
||||
*/
|
||||
|
@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
|
|||
if (!skb)
|
||||
continue;
|
||||
|
||||
desc = &bp->rx_ring[i];
|
||||
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
addr |= ((u64)(desc->addrh) << 32);
|
||||
#endif
|
||||
desc = macb_rx_desc(bp, i);
|
||||
addr = macb_get_addr(bp, desc);
|
||||
|
||||
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -1711,15 +1765,17 @@ out_err:
|
|||
static void gem_init_rings(struct macb *bp)
|
||||
{
|
||||
struct macb_queue *queue;
|
||||
struct macb_dma_desc *desc = NULL;
|
||||
unsigned int q;
|
||||
int i;
|
||||
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
||||
for (i = 0; i < bp->tx_ring_size; i++) {
|
||||
queue->tx_ring[i].addr = 0;
|
||||
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
||||
desc = macb_tx_desc(queue, i);
|
||||
macb_set_addr(bp, desc, 0);
|
||||
desc->ctrl = MACB_BIT(TX_USED);
|
||||
}
|
||||
queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
|
||||
desc->ctrl |= MACB_BIT(TX_WRAP);
|
||||
queue->tx_head = 0;
|
||||
queue->tx_tail = 0;
|
||||
}
|
||||
|
@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
|
|||
static void macb_init_rings(struct macb *bp)
|
||||
{
|
||||
int i;
|
||||
struct macb_dma_desc *desc = NULL;
|
||||
|
||||
macb_init_rx_ring(bp);
|
||||
|
||||
for (i = 0; i < bp->tx_ring_size; i++) {
|
||||
bp->queues[0].tx_ring[i].addr = 0;
|
||||
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
|
||||
desc = macb_tx_desc(&bp->queues[0], i);
|
||||
macb_set_addr(bp, desc, 0);
|
||||
desc->ctrl = MACB_BIT(TX_USED);
|
||||
}
|
||||
bp->queues[0].tx_head = 0;
|
||||
bp->queues[0].tx_tail = 0;
|
||||
bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
|
||||
desc->ctrl |= MACB_BIT(TX_WRAP);
|
||||
}
|
||||
|
||||
static void macb_reset_hw(struct macb *bp)
|
||||
|
@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
|
|||
dmacfg &= ~GEM_BIT(TXCOEN);
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
dmacfg |= GEM_BIT(ADDR64);
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
dmacfg |= GEM_BIT(ADDR64);
|
||||
#endif
|
||||
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
|
||||
dmacfg);
|
||||
|
@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
|
|||
macb_configure_dma(bp);
|
||||
|
||||
/* Initialize TX and RX buffers */
|
||||
macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
|
||||
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
|
||||
#endif
|
||||
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
|
||||
queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
|
||||
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
|
||||
#endif
|
||||
|
||||
/* Enable interrupts */
|
||||
|
@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
|
|||
queue->IMR = GEM_IMR(hw_q - 1);
|
||||
queue->TBQP = GEM_TBQP(hw_q - 1);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
queue->TBQPH = GEM_TBQPH(hw_q -1);
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
queue->TBQPH = GEM_TBQPH(hw_q - 1);
|
||||
#endif
|
||||
} else {
|
||||
/* queue0 uses legacy registers */
|
||||
|
@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
|
|||
queue->IMR = MACB_IMR;
|
||||
queue->TBQP = MACB_TBQP;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
queue->TBQPH = MACB_TBQPH;
|
||||
if (bp->hw_dma_cap == HW_DMA_CAP_64B)
|
||||
queue->TBQPH = MACB_TBQPH;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
|
|||
static int at91ether_start(struct net_device *dev)
|
||||
{
|
||||
struct macb *lp = netdev_priv(dev);
|
||||
struct macb_dma_desc *desc;
|
||||
dma_addr_t addr;
|
||||
u32 ctl;
|
||||
int i;
|
||||
|
||||
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
|
||||
(AT91ETHER_MAX_RX_DESCR *
|
||||
sizeof(struct macb_dma_desc)),
|
||||
macb_dma_desc_get_size(lp)),
|
||||
&lp->rx_ring_dma, GFP_KERNEL);
|
||||
if (!lp->rx_ring)
|
||||
return -ENOMEM;
|
||||
|
@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
|
|||
if (!lp->rx_buffers) {
|
||||
dma_free_coherent(&lp->pdev->dev,
|
||||
AT91ETHER_MAX_RX_DESCR *
|
||||
sizeof(struct macb_dma_desc),
|
||||
macb_dma_desc_get_size(lp),
|
||||
lp->rx_ring, lp->rx_ring_dma);
|
||||
lp->rx_ring = NULL;
|
||||
return -ENOMEM;
|
||||
|
@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
|
|||
|
||||
addr = lp->rx_buffers_dma;
|
||||
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
|
||||
lp->rx_ring[i].addr = addr;
|
||||
lp->rx_ring[i].ctrl = 0;
|
||||
desc = macb_rx_desc(lp, i);
|
||||
macb_set_addr(lp, desc, addr);
|
||||
desc->ctrl = 0;
|
||||
addr += AT91ETHER_MAX_RBUFF_SZ;
|
||||
}
|
||||
|
||||
/* Set the Wrap bit on the last descriptor */
|
||||
lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
|
||||
desc->addr |= MACB_BIT(RX_WRAP);
|
||||
|
||||
/* Reset buffer index */
|
||||
lp->rx_tail = 0;
|
||||
|
@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
|
|||
|
||||
dma_free_coherent(&lp->pdev->dev,
|
||||
AT91ETHER_MAX_RX_DESCR *
|
||||
sizeof(struct macb_dma_desc),
|
||||
macb_dma_desc_get_size(lp),
|
||||
lp->rx_ring, lp->rx_ring_dma);
|
||||
lp->rx_ring = NULL;
|
||||
|
||||
|
@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
static void at91ether_rx(struct net_device *dev)
|
||||
{
|
||||
struct macb *lp = netdev_priv(dev);
|
||||
struct macb_dma_desc *desc;
|
||||
unsigned char *p_recv;
|
||||
struct sk_buff *skb;
|
||||
unsigned int pktlen;
|
||||
|
||||
while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
|
||||
desc = macb_rx_desc(lp, lp->rx_tail);
|
||||
while (desc->addr & MACB_BIT(RX_USED)) {
|
||||
p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
|
||||
pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
|
||||
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
|
||||
skb = netdev_alloc_skb(dev, pktlen + 2);
|
||||
if (skb) {
|
||||
skb_reserve(skb, 2);
|
||||
|
@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
|
|||
lp->stats.rx_dropped++;
|
||||
}
|
||||
|
||||
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
|
||||
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
|
||||
lp->stats.multicast++;
|
||||
|
||||
/* reset ownership bit */
|
||||
lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
|
||||
/* wrap after last buffer */
|
||||
if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
|
||||
lp->rx_tail = 0;
|
||||
else
|
||||
lp->rx_tail++;
|
||||
|
||||
desc = macb_rx_desc(lp, lp->rx_tail);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
|
|||
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
|
||||
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
||||
bp->hw_dma_cap = HW_DMA_CAP_64B;
|
||||
} else
|
||||
bp->hw_dma_cap = HW_DMA_CAP_32B;
|
||||
#endif
|
||||
|
||||
spin_lock_init(&bp->lock);
|
||||
|
|
|
@ -385,6 +385,8 @@
|
|||
/* Bitfields in DCFG6. */
|
||||
#define GEM_PBUF_LSO_OFFSET 27
|
||||
#define GEM_PBUF_LSO_SIZE 1
|
||||
#define GEM_DAW64_OFFSET 23
|
||||
#define GEM_DAW64_SIZE 1
|
||||
|
||||
/* Constants for CLK */
|
||||
#define MACB_CLK_DIV8 0
|
||||
|
@ -487,12 +489,20 @@
|
|||
struct macb_dma_desc {
|
||||
u32 addr;
|
||||
u32 ctrl;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
u32 addrh;
|
||||
u32 resvd;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
enum macb_hw_dma_cap {
|
||||
HW_DMA_CAP_32B,
|
||||
HW_DMA_CAP_64B,
|
||||
};
|
||||
|
||||
struct macb_dma_desc_64 {
|
||||
u32 addrh;
|
||||
u32 resvd;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* DMA descriptor bitfields */
|
||||
#define MACB_RX_USED_OFFSET 0
|
||||
#define MACB_RX_USED_SIZE 1
|
||||
|
@ -874,6 +884,10 @@ struct macb {
|
|||
unsigned int jumbo_max_len;
|
||||
|
||||
u32 wol;
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
enum macb_hw_dma_cap hw_dma_cap;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline bool macb_is_gem(struct macb *bp)
|
||||
|
|
|
@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
|
|||
int speed = 2;
|
||||
|
||||
if (!xcv) {
|
||||
dev_err(&xcv->pdev->dev,
|
||||
"XCV init not done, probe may have failed\n");
|
||||
pr_err("XCV init not done, probe may have failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
|
|||
status = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
done:
|
||||
|
||||
/* Remember currently programmed MAC */
|
||||
ether_addr_copy(adapter->dev_mac, addr->sa_data);
|
||||
done:
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
|
||||
return 0;
|
||||
|
@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
|
|||
{
|
||||
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
|
||||
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
|
||||
check_privilege(adapter, BE_PRIV_FILTMGMT))
|
||||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
|
||||
be_dev_mac_del(adapter, adapter->pmac_id[0]);
|
||||
eth_zero_addr(adapter->dev_mac);
|
||||
}
|
||||
|
||||
be_clear_uc_list(adapter);
|
||||
be_clear_mc_list(adapter);
|
||||
|
@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
|
||||
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
|
||||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
|
||||
/* Normally this condition usually true as the ->dev_mac is zeroed.
|
||||
* But on BE3 VFs the initial MAC is pre-programmed by PF and
|
||||
* subsequent be_dev_mac_add() can fail (after fresh boot)
|
||||
*/
|
||||
if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
|
||||
int old_pmac_id = -1;
|
||||
|
||||
/* Remember old programmed MAC if any - can happen on BE3 VF */
|
||||
if (!is_zero_ether_addr(adapter->dev_mac))
|
||||
old_pmac_id = adapter->pmac_id[0];
|
||||
|
||||
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Delete the old programmed MAC as we successfully programmed
|
||||
* a new MAC
|
||||
*/
|
||||
if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
|
||||
be_dev_mac_del(adapter, old_pmac_id);
|
||||
|
||||
ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
|
||||
}
|
||||
|
||||
|
@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
|
|||
|
||||
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
|
||||
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
|
||||
|
||||
/* Initial MAC for BE3 VFs is already programmed by PF */
|
||||
if (BEx_chip(adapter) && be_virtfn(adapter))
|
||||
memcpy(adapter->dev_mac, mac, ETH_ALEN);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
|
|||
if (!rxb->page)
|
||||
continue;
|
||||
|
||||
dma_unmap_single(rx_queue->dev, rxb->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma_unmap_page(rx_queue->dev, rxb->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_page(rxb->page);
|
||||
|
||||
rxb->page = NULL;
|
||||
|
|
|
@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int mlx4_comm_internal_err(u32 slave_read)
|
||||
int mlx4_comm_internal_err(u32 slave_read)
|
||||
{
|
||||
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
|
||||
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
|
||||
|
|
|
@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
|
|||
return;
|
||||
|
||||
mlx4_stop_catas_poll(dev);
|
||||
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
|
||||
mlx4_is_slave(dev)) {
|
||||
/* In mlx4_remove_one on a VF */
|
||||
u32 slave_read =
|
||||
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
|
||||
|
||||
if (mlx4_comm_internal_err(slave_read)) {
|
||||
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
|
||||
__func__);
|
||||
mlx4_enter_error_state(dev->persist);
|
||||
}
|
||||
}
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
|
|
|
@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
|
|||
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
|
||||
|
||||
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
|
||||
int mlx4_comm_internal_err(u32 slave_read);
|
||||
|
||||
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
|
||||
enum mlx4_port_type *type);
|
||||
|
|
|
@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
|||
if (cmd->cmdif_rev > CMD_IF_REV) {
|
||||
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
|
||||
CMD_IF_REV, cmd->cmdif_rev);
|
||||
err = -ENOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_free_page;
|
||||
}
|
||||
|
||||
|
|
|
@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
|
|||
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
|
||||
|
||||
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
|
||||
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
|
||||
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
|
||||
enum mlx5e_traffic_types tt);
|
||||
|
||||
int mlx5e_open_locked(struct net_device *netdev);
|
||||
int mlx5e_close_locked(struct net_device *netdev);
|
||||
|
@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
|
|||
|
||||
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#else
|
||||
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
|
||||
|
|
|
@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
|
|||
int i;
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
|
||||
for (i = 0; i < ets->ets_cap; i++) {
|
||||
|
@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
|
|||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = mlx5e_dbcnl_validate_ets(netdev, ets);
|
||||
if (err)
|
||||
|
@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct ieee_ets ets;
|
||||
struct ieee_pfc pfc;
|
||||
int err = -ENOTSUPP;
|
||||
int err = -EOPNOTSUPP;
|
||||
int i;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ets))
|
||||
|
@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
|
|||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets)) {
|
||||
netdev_err(netdev, "%s, ets is not supported\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (priority >= CEE_DCBX_MAX_PRIO) {
|
||||
netdev_err(netdev,
|
||||
"%s, priority is out of range\n", __func__);
|
||||
|
|
|
@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
|||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
|
||||
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
|
||||
|
@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
|
|||
int i;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
||||
|
@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
|||
|
||||
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
|
||||
int i;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int ctxlen = MLX5_ST_SZ_BYTES(tirc);
|
||||
int tt;
|
||||
|
||||
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
|
||||
mlx5e_build_tir_ctx_hash(tirc, priv);
|
||||
|
||||
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
|
||||
mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
|
||||
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
|
||||
memset(tirc, 0, ctxlen);
|
||||
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
|
||||
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
|
||||
|
@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
|
||||
bool hash_changed = false;
|
||||
void *in;
|
||||
|
||||
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
|
||||
|
@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
|
||||
}
|
||||
|
||||
if (key)
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
|
||||
hfunc != priv->params.rss_hfunc) {
|
||||
priv->params.rss_hfunc = hfunc;
|
||||
hash_changed = true;
|
||||
}
|
||||
|
||||
if (key) {
|
||||
memcpy(priv->params.toeplitz_hash_key, key,
|
||||
sizeof(priv->params.toeplitz_hash_key));
|
||||
hash_changed = hash_changed ||
|
||||
priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
|
||||
}
|
||||
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
|
||||
priv->params.rss_hfunc = hfunc;
|
||||
|
||||
mlx5e_modify_tirs_hash(priv, in, inlen);
|
||||
if (hash_changed)
|
||||
mlx5e_modify_tirs_hash(priv, in, inlen);
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
|
@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
|||
u32 mlx5_wol_mode;
|
||||
|
||||
if (!wol_supported)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (wol->wolopts & ~wol_supported)
|
||||
return -EINVAL;
|
||||
|
@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
|
|||
|
||||
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
|
||||
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!rx_mode_changed)
|
||||
return 0;
|
||||
|
@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
|
|||
bool reset;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cqe_compression))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
|
||||
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
|
||||
|
|
|
@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
|
|||
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||
|
||||
if (!priv->fs.ns)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = mlx5e_arfs_create_tables(priv);
|
||||
if (err) {
|
||||
|
|
|
@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
|
|||
ns = mlx5_get_flow_namespace(priv->mdev,
|
||||
MLX5_FLOW_NAMESPACE_ETHTOOL);
|
||||
if (!ns)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
|
||||
flow_table_properties_nic_receive.log_max_ft_size)),
|
||||
|
|
|
@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
|
|||
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
|
||||
}
|
||||
|
||||
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
|
||||
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
|
||||
enum mlx5e_traffic_types tt)
|
||||
{
|
||||
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
|
||||
|
||||
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP)
|
||||
|
||||
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP |\
|
||||
MLX5_HASH_FIELD_SEL_L4_SPORT |\
|
||||
MLX5_HASH_FIELD_SEL_L4_DPORT)
|
||||
|
||||
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP |\
|
||||
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
|
||||
|
||||
MLX5_SET(tirc, tirc, rx_hash_fn,
|
||||
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
|
||||
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
|
||||
|
@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
|
|||
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
|
||||
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
|
||||
}
|
||||
|
||||
switch (tt) {
|
||||
case MLX5E_TT_IPV4_TCP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_TCP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_TCP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_TCP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_UDP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_UDP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_UDP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_UDP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_IPSEC_AH:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_IPSEC_AH:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_IPSEC_ESP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_IPSEC_ESP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
|
||||
|
@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
|
|||
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
|
||||
enum mlx5e_traffic_types tt)
|
||||
{
|
||||
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
|
||||
|
||||
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
|
||||
|
||||
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP)
|
||||
|
||||
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP |\
|
||||
MLX5_HASH_FIELD_SEL_L4_SPORT |\
|
||||
MLX5_HASH_FIELD_SEL_L4_DPORT)
|
||||
|
||||
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
|
||||
MLX5_HASH_FIELD_SEL_DST_IP |\
|
||||
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
|
||||
|
||||
mlx5e_build_tir_ctx_lro(tirc, priv);
|
||||
|
||||
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
|
||||
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
|
||||
mlx5e_build_tir_ctx_hash(tirc, priv);
|
||||
|
||||
switch (tt) {
|
||||
case MLX5E_TT_IPV4_TCP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_TCP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_TCP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_TCP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_UDP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_UDP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_UDP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
|
||||
MLX5_L4_PROT_TYPE_UDP);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_L4PORTS);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_IPSEC_AH:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_IPSEC_AH:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4_IPSEC_ESP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6_IPSEC_ESP:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP_IPSEC_SPI);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV4:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV4);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP);
|
||||
break;
|
||||
|
||||
case MLX5E_TT_IPV6:
|
||||
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
|
||||
MLX5_L3_PROT_TYPE_IPV6);
|
||||
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
|
||||
MLX5_HASH_IP);
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(true,
|
||||
"mlx5e_build_indir_tir_ctx: bad traffic type!\n");
|
||||
}
|
||||
mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
|
||||
}
|
||||
|
||||
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
|
||||
|
@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
|||
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
|
||||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
|
||||
!MLX5_CAP_ETH(mdev, csum_cap) ||
|
||||
|
@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
|||
< 3) {
|
||||
mlx5_core_warn(mdev,
|
||||
"Not creating net device, some required device capabilities are missing\n");
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
|
||||
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
|
||||
|
|
|
@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
|||
__be32 *saddr,
|
||||
int *out_ttl)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct rtable *rt;
|
||||
struct neighbour *n = NULL;
|
||||
int ttl;
|
||||
|
@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
|||
#else
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
|
||||
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
|
||||
pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
|
||||
ip_rt_put(rt);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/* if the egress device isn't on the same HW e-switch, we use the uplink */
|
||||
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
|
||||
*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
else
|
||||
*out_dev = rt->dst.dev;
|
||||
|
||||
ttl = ip4_dst_hoplimit(&rt->dst);
|
||||
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
|
||||
|
@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
|||
*out_n = n;
|
||||
*saddr = fl4->saddr;
|
||||
*out_ttl = ttl;
|
||||
*out_dev = rt->dst.dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
|
|||
|
||||
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
|
||||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
|
||||
vport, vlan, qos, set_flags);
|
||||
|
@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
||||
return -ENOMEM;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_group_in = mlx5_vzalloc(inlen);
|
||||
|
@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
|
||||
return -EIO;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_group_in = mlx5_vzalloc(inlen);
|
||||
|
@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
|
||||
return -EIO;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_group_in = mlx5_vzalloc(inlen);
|
||||
|
@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
|||
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
|
||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
|
||||
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
|
||||
|
|
|
@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
|
|||
return 0;
|
||||
|
||||
out_notsupp:
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
|
||||
|
@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto ns_err;
|
||||
}
|
||||
|
||||
|
@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
|
|||
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
|
||||
if (!ns) {
|
||||
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
|
||||
return -ENOMEM;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
|
||||
|
@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
|
|||
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
|
||||
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err1)
|
||||
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
|
||||
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
|
||||
}
|
||||
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
||||
if (mlx5_eswitch_inline_mode_get(esw,
|
||||
|
@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
int vport;
|
||||
int err;
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
err = esw_create_offloads_fdb_table(esw, nvports);
|
||||
if (err)
|
||||
return err;
|
||||
goto create_fdb_err;
|
||||
|
||||
err = esw_create_offloads_table(esw);
|
||||
if (err)
|
||||
|
@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
goto err_reps;
|
||||
}
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -717,6 +718,13 @@ create_fg_err:
|
|||
|
||||
create_ft_err:
|
||||
esw_destroy_offloads_fdb_table(esw);
|
||||
|
||||
create_fdb_err:
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err) {
|
||||
|
@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
|
||||
}
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
|
|||
flow_table_properties_nic_receive.
|
||||
flow_modify_en);
|
||||
if (!atomic_mod_cap)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
opmod = 1;
|
||||
|
||||
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
|
||||
|
|
|
@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
|
|||
struct mlx5_flow_table *ft;
|
||||
|
||||
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
|
||||
if (!ns)
|
||||
if (WARN_ON(!ns))
|
||||
return -EINVAL;
|
||||
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
|
||||
if (IS_ERR(ft)) {
|
||||
|
|
|
@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
|
|||
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
|
||||
MLX5_REG_QETCR, 0, 1);
|
||||
|
@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
|
|||
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
|
||||
|
|
|
@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
|
|||
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||
return -EACCES;
|
||||
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
|
|
|
@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
|
|||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||||
u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
|
||||
int ret = 0;
|
||||
|
||||
/* Discard masked bits */
|
||||
intr_status &= ~intr_mask;
|
||||
|
||||
/* Not used events (e.g. MMC interrupts) are not handled. */
|
||||
if ((intr_status & GMAC_INT_STATUS_MMCTIS))
|
||||
x->mmc_tx_irq_n++;
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.phy_id = PHY_ID_KSZ8795,
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
.name = "Micrel KSZ8795",
|
||||
.features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
|
||||
.features = PHY_BASIC_FEATURES,
|
||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = ksz8873mll_config_aneg,
|
||||
|
|
|
@ -91,7 +91,7 @@
|
|||
|
||||
#define IWL8000_FW_PRE "iwlwifi-8000C-"
|
||||
#define IWL8000_MODULE_FIRMWARE(api) \
|
||||
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
IWL8000_FW_PRE __stringify(api) ".ucode"
|
||||
|
||||
#define IWL8265_FW_PRE "iwlwifi-8265-"
|
||||
#define IWL8265_MODULE_FIRMWARE(api) \
|
||||
|
|
|
@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
/* Make sure reserved queue is still marked as such (or allocated) */
|
||||
mvm->queue_info[mvm_sta->reserved_queue].status =
|
||||
IWL_MVM_QUEUE_RESERVED;
|
||||
/* Make sure reserved queue is still marked as such (if allocated) */
|
||||
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
|
||||
mvm->queue_info[mvm_sta->reserved_queue].status =
|
||||
IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
||||
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
|
||||
|
|
|
@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
|
|||
return;
|
||||
|
||||
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
|
||||
thermal_zone_device_unregister(mvm->tz_device.tzone);
|
||||
mvm->tz_device.tzone = NULL;
|
||||
if (mvm->tz_device.tzone) {
|
||||
thermal_zone_device_unregister(mvm->tz_device.tzone);
|
||||
mvm->tz_device.tzone = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
|
||||
|
@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
|
|||
return;
|
||||
|
||||
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
|
||||
thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
|
||||
mvm->cooling_dev.cdev = NULL;
|
||||
if (mvm->cooling_dev.cdev) {
|
||||
thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
|
||||
mvm->cooling_dev.cdev = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_THERMAL */
|
||||
|
||||
|
|
|
@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
|
|||
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
||||
if (!link)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&link->sibling);
|
||||
INIT_LIST_HEAD(&link->children);
|
||||
INIT_LIST_HEAD(&link->link);
|
||||
link->pdev = pdev;
|
||||
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
|
||||
|
||||
/*
|
||||
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
|
||||
* hierarchies.
|
||||
*/
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
|
||||
pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
|
||||
link->root = link;
|
||||
} else {
|
||||
struct pcie_link_state *parent;
|
||||
|
||||
parent = pdev->bus->parent->self->link_state;
|
||||
if (!parent) {
|
||||
kfree(link);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
link->parent = parent;
|
||||
link->root = link->parent->root;
|
||||
list_add(&link->link, &parent->children);
|
||||
}
|
||||
/* Setup a pointer to the root port link */
|
||||
if (!link->parent)
|
||||
link->root = link;
|
||||
else
|
||||
link->root = link->parent->root;
|
||||
|
||||
list_add(&link->sibling, &link_list);
|
||||
pdev->link_state = link;
|
||||
|
|
|
@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
|
|||
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
|
||||
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
|
||||
BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
|
||||
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
|
||||
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
|
||||
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
|
||||
|
|
|
@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
|
|||
int reg)
|
||||
{
|
||||
struct byt_community *comm = byt_get_community(vg, offset);
|
||||
u32 reg_offset = 0;
|
||||
u32 reg_offset;
|
||||
|
||||
if (!comm)
|
||||
return NULL;
|
||||
|
||||
offset -= comm->pin_base;
|
||||
if (reg == BYT_INT_STAT_REG)
|
||||
switch (reg) {
|
||||
case BYT_INT_STAT_REG:
|
||||
reg_offset = (offset / 32) * 4;
|
||||
else
|
||||
break;
|
||||
case BYT_DEBOUNCE_REG:
|
||||
reg_offset = 0;
|
||||
break;
|
||||
default:
|
||||
reg_offset = comm->pad_map[offset] * 16;
|
||||
break;
|
||||
}
|
||||
|
||||
return comm->reg_base + reg_offset + reg;
|
||||
}
|
||||
|
@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
|
|||
debounce = readl(db_reg);
|
||||
debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
|
||||
|
||||
if (arg)
|
||||
conf |= BYT_DEBOUNCE_EN;
|
||||
else
|
||||
conf &= ~BYT_DEBOUNCE_EN;
|
||||
|
||||
switch (arg) {
|
||||
case 0:
|
||||
conf &= BYT_DEBOUNCE_EN;
|
||||
break;
|
||||
case 375:
|
||||
debounce |= BYT_DEBOUNCE_PULSE_375US;
|
||||
break;
|
||||
|
@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
|
|||
debounce |= BYT_DEBOUNCE_PULSE_24MS;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
if (arg)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
|
|||
continue;
|
||||
}
|
||||
|
||||
raw_spin_lock(&vg->lock);
|
||||
pending = readl(reg);
|
||||
raw_spin_unlock(&vg->lock);
|
||||
for_each_set_bit(pin, &pending, 32) {
|
||||
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
|
||||
generic_handle_irq(virq);
|
||||
|
|
|
@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
|
|||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (!mrfld_buf_available(mp, pin))
|
||||
return -ENOTSUPP;
|
||||
|
||||
for (i = 0; i < nconfigs; i++) {
|
||||
switch (pinconf_to_config_param(configs[i])) {
|
||||
case PIN_CONFIG_BIAS_DISABLE:
|
||||
|
|
|
@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
|
|||
val = arg / 10 - 1;
|
||||
break;
|
||||
case PIN_CONFIG_BIAS_DISABLE:
|
||||
val = 0;
|
||||
break;
|
||||
continue;
|
||||
case PIN_CONFIG_BIAS_PULL_UP:
|
||||
if (arg == 0)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
|
|||
will be called rtc-mpc5121.
|
||||
|
||||
config RTC_DRV_JZ4740
|
||||
bool "Ingenic JZ4740 SoC"
|
||||
tristate "Ingenic JZ4740 SoC"
|
||||
depends on MACH_INGENIC || COMPILE_TEST
|
||||
help
|
||||
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
|
||||
controllers.
|
||||
|
||||
This driver can also be buillt as a module. If so, the module
|
||||
will be called rtc-jz4740.
|
||||
|
||||
config RTC_DRV_LPC24XX
|
||||
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
|
||||
depends on ARCH_LPC18XX || COMPILE_TEST
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reboot.h>
|
||||
|
@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
|
|||
JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
|
||||
|
||||
jz4740_rtc_poweroff(dev_for_power_off);
|
||||
machine_halt();
|
||||
kernel_halt();
|
||||
}
|
||||
|
||||
static const struct of_device_id jz4740_rtc_of_match[] = {
|
||||
|
@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
|
|||
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
|
||||
|
||||
static int jz4740_rtc_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
|
|||
{ "jz4780-rtc", ID_JZ4780 },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
|
||||
|
||||
static struct platform_driver jz4740_rtc_driver = {
|
||||
.probe = jz4740_rtc_probe,
|
||||
|
@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
|
|||
.id_table = jz4740_rtc_ids,
|
||||
};
|
||||
|
||||
builtin_platform_driver(jz4740_rtc_driver);
|
||||
module_platform_driver(jz4740_rtc_driver);
|
||||
|
||||
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
|
||||
MODULE_ALIAS("platform:jz4740-rtc");
|
||||
|
|
|
@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
|
|||
rc = -ENOMEM;
|
||||
goto error_exit;
|
||||
}
|
||||
spin_lock_init(&cifsFile->file_info_lock);
|
||||
file->private_data = cifsFile;
|
||||
cifsFile->tlink = cifs_get_tlink(tlink);
|
||||
tcon = tlink_tcon(tlink);
|
||||
|
|
|
@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
|
|||
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
|
||||
if (invalidate)
|
||||
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
|
||||
}
|
||||
} else {
|
||||
|
@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
|
|||
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/* Make sure any pending writes are cancelled. */
|
||||
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
|
||||
fscache_invalidate_writes(cookie);
|
||||
|
||||
/* Reset the cookie state if it wasn't relinquished */
|
||||
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
|
||||
atomic_inc(&cookie->n_active);
|
||||
|
|
|
@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
|
|||
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
|
||||
|
||||
spin_lock_init(&cookie->lock);
|
||||
spin_lock_init(&cookie->stores_lock);
|
||||
INIT_HLIST_HEAD(&cookie->backing_objects);
|
||||
|
||||
/* check the netfs type is not already present */
|
||||
|
|
|
@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
|
|||
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
|
||||
static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
|
||||
|
||||
#define __STATE_NAME(n) fscache_osm_##n
|
||||
#define STATE(n) (&__STATE_NAME(n))
|
||||
|
@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
|
|||
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
|
||||
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
|
||||
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
|
||||
static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
|
||||
static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
|
||||
|
||||
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
|
||||
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
|
||||
|
@ -229,6 +230,10 @@ execute_work_state:
|
|||
event = -1;
|
||||
if (new_state == NO_TRANSIT) {
|
||||
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
|
||||
if (unlikely(state == STATE(OBJECT_DEAD))) {
|
||||
_leave(" [dead]");
|
||||
return;
|
||||
}
|
||||
fscache_enqueue_object(object);
|
||||
event_mask = object->oob_event_mask;
|
||||
goto unmask_events;
|
||||
|
@ -239,7 +244,7 @@ execute_work_state:
|
|||
object->state = state = new_state;
|
||||
|
||||
if (state->work) {
|
||||
if (unlikely(state->work == ((void *)2UL))) {
|
||||
if (unlikely(state == STATE(OBJECT_DEAD))) {
|
||||
_leave(" [dead]");
|
||||
return;
|
||||
}
|
||||
|
@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
|
|||
fscache_mark_object_dead(object);
|
||||
object->oob_event_mask = 0;
|
||||
|
||||
if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
|
||||
/* Reject any new read/write ops and abort any that are pending. */
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
fscache_cancel_all_ops(object);
|
||||
}
|
||||
|
||||
if (list_empty(&object->dependents) &&
|
||||
object->n_ops == 0 &&
|
||||
object->n_children == 0)
|
||||
|
@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_mark_killed);
|
||||
|
||||
/*
|
||||
* The object is dead. We can get here if an object gets queued by an event
|
||||
* that would lead to its death (such as EV_KILL) when the dispatcher is
|
||||
* already running (and so can be requeued) but hasn't yet cleared the event
|
||||
* mask.
|
||||
*/
|
||||
static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
|
||||
int event)
|
||||
{
|
||||
if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
|
||||
&object->flags))
|
||||
return NO_TRANSIT;
|
||||
|
||||
WARN(true, "FS-Cache object redispatched after death");
|
||||
return NO_TRANSIT;
|
||||
}
|
||||
|
|
|
@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
|
|||
struct nfs4_layout_stateid *ls;
|
||||
struct nfs4_stid *stp;
|
||||
|
||||
stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
|
||||
stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
|
||||
nfsd4_free_layout_stateid);
|
||||
if (!stp)
|
||||
return NULL;
|
||||
stp->sc_free = nfsd4_free_layout_stateid;
|
||||
|
||||
get_nfs4_file(fp);
|
||||
stp->sc_file = fp;
|
||||
|
||||
|
|
|
@ -633,8 +633,8 @@ out:
|
|||
return co;
|
||||
}
|
||||
|
||||
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
|
||||
struct kmem_cache *slab)
|
||||
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
|
||||
void (*sc_free)(struct nfs4_stid *))
|
||||
{
|
||||
struct nfs4_stid *stid;
|
||||
int new_id;
|
||||
|
@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
|
|||
idr_preload_end();
|
||||
if (new_id < 0)
|
||||
goto out_free;
|
||||
|
||||
stid->sc_free = sc_free;
|
||||
stid->sc_client = cl;
|
||||
stid->sc_stateid.si_opaque.so_id = new_id;
|
||||
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
|
||||
|
@ -675,15 +677,12 @@ out_free:
|
|||
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
|
||||
{
|
||||
struct nfs4_stid *stid;
|
||||
struct nfs4_ol_stateid *stp;
|
||||
|
||||
stid = nfs4_alloc_stid(clp, stateid_slab);
|
||||
stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
|
||||
if (!stid)
|
||||
return NULL;
|
||||
|
||||
stp = openlockstateid(stid);
|
||||
stp->st_stid.sc_free = nfs4_free_ol_stateid;
|
||||
return stp;
|
||||
return openlockstateid(stid);
|
||||
}
|
||||
|
||||
static void nfs4_free_deleg(struct nfs4_stid *stid)
|
||||
|
@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
|
|||
goto out_dec;
|
||||
if (delegation_blocked(¤t_fh->fh_handle))
|
||||
goto out_dec;
|
||||
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
|
||||
dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
|
||||
if (dp == NULL)
|
||||
goto out_dec;
|
||||
|
||||
dp->dl_stid.sc_free = nfs4_free_deleg;
|
||||
/*
|
||||
* delegation seqid's are never incremented. The 4.1 special
|
||||
* meaning of seqid 0 isn't meaningful, really, but let's avoid
|
||||
|
@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
|
|||
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
|
||||
get_nfs4_file(fp);
|
||||
stp->st_stid.sc_file = fp;
|
||||
stp->st_stid.sc_free = nfs4_free_lock_stateid;
|
||||
stp->st_access_bmap = 0;
|
||||
stp->st_deny_bmap = open_stp->st_deny_bmap;
|
||||
stp->st_openstp = open_stp;
|
||||
|
@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
|
|||
lst = find_lock_stateid(lo, fi);
|
||||
if (lst == NULL) {
|
||||
spin_unlock(&clp->cl_lock);
|
||||
ns = nfs4_alloc_stid(clp, stateid_slab);
|
||||
ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
|
||||
if (ns == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
|
|||
__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
|
||||
stateid_t *stateid, unsigned char typemask,
|
||||
struct nfs4_stid **s, struct nfsd_net *nn);
|
||||
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
|
||||
struct kmem_cache *slab);
|
||||
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
|
||||
void (*sc_free)(struct nfs4_stid *));
|
||||
void nfs4_unhash_stid(struct nfs4_stid *s);
|
||||
void nfs4_put_stid(struct nfs4_stid *s);
|
||||
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
|
||||
|
|
|
@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
|
|||
}
|
||||
}
|
||||
|
||||
static __be32
|
||||
nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||
struct iattr *iap)
|
||||
{
|
||||
struct inode *inode = d_inode(fhp->fh_dentry);
|
||||
int host_err;
|
||||
|
||||
if (iap->ia_size < inode->i_size) {
|
||||
__be32 err;
|
||||
|
||||
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
|
||||
NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
host_err = get_write_access(inode);
|
||||
if (host_err)
|
||||
goto out_nfserrno;
|
||||
|
||||
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
|
||||
if (host_err)
|
||||
goto out_put_write_access;
|
||||
return 0;
|
||||
|
||||
out_put_write_access:
|
||||
put_write_access(inode);
|
||||
out_nfserrno:
|
||||
return nfserrno(host_err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set various file attributes. After this call fhp needs an fh_put.
|
||||
*/
|
||||
|
@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
|||
__be32 err;
|
||||
int host_err;
|
||||
bool get_write_count;
|
||||
int size_change = 0;
|
||||
|
||||
if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
|
||||
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
|
||||
|
@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
|||
/* Get inode */
|
||||
err = fh_verify(rqstp, fhp, ftype, accmode);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
if (get_write_count) {
|
||||
host_err = fh_want_write(fhp);
|
||||
if (host_err)
|
||||
return nfserrno(host_err);
|
||||
goto out_host_err;
|
||||
}
|
||||
|
||||
dentry = fhp->fh_dentry;
|
||||
|
@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
|
|||
iap->ia_valid &= ~ATTR_MODE;
|
||||
|
||||
if (!iap->ia_valid)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
nfsd_sanitize_attrs(inode, iap);
|
||||
|
||||
if (check_guard && guardtime != inode->i_ctime.tv_sec)
|
||||
return nfserr_notsync;
|
||||
|
||||
/*
|
||||
* The size case is special, it changes the file in addition to the
|
||||
* attributes.
|
||||
* attributes, and file systems don't expect it to be mixed with
|
||||
* "random" attribute changes. We thus split out the size change
|
||||
* into a separate call for vfs_truncate, and do the rest as a
|
||||
* a separate setattr call.
|
||||
*/
|
||||
if (iap->ia_valid & ATTR_SIZE) {
|
||||
err = nfsd_get_write_access(rqstp, fhp, iap);
|
||||
if (err)
|
||||
goto out;
|
||||
size_change = 1;
|
||||
struct path path = {
|
||||
.mnt = fhp->fh_export->ex_path.mnt,
|
||||
.dentry = dentry,
|
||||
};
|
||||
bool implicit_mtime = false;
|
||||
|
||||
/*
|
||||
* RFC5661, Section 18.30.4:
|
||||
* Changing the size of a file with SETATTR indirectly
|
||||
* changes the time_modify and change attributes.
|
||||
*
|
||||
* (and similar for the older RFCs)
|
||||
* vfs_truncate implicity updates the mtime IFF the file size
|
||||
* actually changes. Avoid the additional seattr call below if
|
||||
* the only other attribute that the client sends is the mtime.
|
||||
*/
|
||||
if (iap->ia_size != i_size_read(inode))
|
||||
iap->ia_valid |= ATTR_MTIME;
|
||||
if (iap->ia_size != i_size_read(inode) &&
|
||||
((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
|
||||
implicit_mtime = true;
|
||||
|
||||
host_err = vfs_truncate(&path, iap->ia_size);
|
||||
if (host_err)
|
||||
goto out_host_err;
|
||||
|
||||
iap->ia_valid &= ~ATTR_SIZE;
|
||||
if (implicit_mtime)
|
||||
iap->ia_valid &= ~ATTR_MTIME;
|
||||
if (!iap->ia_valid)
|
||||
goto done;
|
||||
}
|
||||
|
||||
iap->ia_valid |= ATTR_CTIME;
|
||||
|
||||
if (check_guard && guardtime != inode->i_ctime.tv_sec) {
|
||||
err = nfserr_notsync;
|
||||
goto out_put_write_access;
|
||||
}
|
||||
|
||||
fh_lock(fhp);
|
||||
host_err = notify_change(dentry, iap, NULL);
|
||||
fh_unlock(fhp);
|
||||
err = nfserrno(host_err);
|
||||
if (host_err)
|
||||
goto out_host_err;
|
||||
|
||||
out_put_write_access:
|
||||
if (size_change)
|
||||
put_write_access(inode);
|
||||
if (!err)
|
||||
err = nfserrno(commit_metadata(fhp));
|
||||
out:
|
||||
return err;
|
||||
done:
|
||||
host_err = commit_metadata(fhp);
|
||||
out_host_err:
|
||||
return nfserrno(host_err);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NFSD_V4)
|
||||
|
|
|
@ -45,10 +45,9 @@ struct can_proto {
|
|||
extern int can_proto_register(const struct can_proto *cp);
|
||||
extern void can_proto_unregister(const struct can_proto *cp);
|
||||
|
||||
extern int can_rx_register(struct net_device *dev, canid_t can_id,
|
||||
canid_t mask,
|
||||
void (*func)(struct sk_buff *, void *),
|
||||
void *data, char *ident);
|
||||
int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
|
||||
void (*func)(struct sk_buff *, void *),
|
||||
void *data, char *ident, struct sock *sk);
|
||||
|
||||
extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
|
||||
canid_t mask,
|
||||
|
|
|
@ -8,9 +8,7 @@ enum cpuhp_state {
|
|||
CPUHP_CREATE_THREADS,
|
||||
CPUHP_PERF_PREPARE,
|
||||
CPUHP_PERF_X86_PREPARE,
|
||||
CPUHP_PERF_X86_UNCORE_PREP,
|
||||
CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
||||
CPUHP_PERF_X86_RAPL_PREP,
|
||||
CPUHP_PERF_BFIN,
|
||||
CPUHP_PERF_POWER,
|
||||
CPUHP_PERF_SUPERH,
|
||||
|
@ -86,7 +84,6 @@ enum cpuhp_state {
|
|||
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_STARTING,
|
||||
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||
|
|
|
@ -360,6 +360,7 @@ struct fscache_object {
|
|||
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
|
||||
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
|
||||
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
|
||||
#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
|
||||
|
||||
struct list_head cache_link; /* link in cache->object_list */
|
||||
struct hlist_node cookie_link; /* link in cookie->backing_objects */
|
||||
|
|
|
@ -866,11 +866,15 @@ struct netdev_xdp {
|
|||
* of useless work if you return NETDEV_TX_BUSY.
|
||||
* Required; cannot be NULL.
|
||||
*
|
||||
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
|
||||
* netdev_features_t features);
|
||||
* Adjusts the requested feature flags according to device-specific
|
||||
* constraints, and returns the resulting flags. Must not modify
|
||||
* the device state.
|
||||
* netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
|
||||
* struct net_device *dev
|
||||
* netdev_features_t features);
|
||||
* Called by core transmit path to determine if device is capable of
|
||||
* performing offload operations on a given packet. This is to give
|
||||
* the device an opportunity to implement any restrictions that cannot
|
||||
* be otherwise expressed by feature flags. The check is called with
|
||||
* the set of features that the stack has calculated and it returns
|
||||
* those the driver believes to be appropriate.
|
||||
*
|
||||
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
|
||||
* void *accel_priv, select_queue_fallback_t fallback);
|
||||
|
@ -1028,6 +1032,12 @@ struct netdev_xdp {
|
|||
* Called to release previously enslaved netdev.
|
||||
*
|
||||
* Feature/offload setting functions.
|
||||
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
|
||||
* netdev_features_t features);
|
||||
* Adjusts the requested feature flags according to device-specific
|
||||
* constraints, and returns the resulting flags. Must not modify
|
||||
* the device state.
|
||||
*
|
||||
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
|
||||
* Called to update device configuration to new features. Passed
|
||||
* feature set might be less than what was returned by ndo_fix_features()).
|
||||
|
@ -1100,15 +1110,6 @@ struct netdev_xdp {
|
|||
* Callback to use for xmit over the accelerated station. This
|
||||
* is used in place of ndo_start_xmit on accelerated net
|
||||
* devices.
|
||||
* netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
|
||||
* struct net_device *dev
|
||||
* netdev_features_t features);
|
||||
* Called by core transmit path to determine if device is capable of
|
||||
* performing offload operations on a given packet. This is to give
|
||||
* the device an opportunity to implement any restrictions that cannot
|
||||
* be otherwise expressed by feature flags. The check is called with
|
||||
* the set of features that the stack has calculated and it returns
|
||||
* those the driver believes to be appropriate.
|
||||
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
|
||||
* int queue_index, u32 maxrate);
|
||||
* Called when a user wants to set a max-rate limitation of specific
|
||||
|
|
|
@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
|
|||
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
||||
{
|
||||
unsigned long __percpu *percpu_count;
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
|
||||
|
@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
|||
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
|
||||
{
|
||||
unsigned long __percpu *percpu_count;
|
||||
int ret = false;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
|
||||
|
|
|
@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
|
|||
{
|
||||
u32 hash;
|
||||
|
||||
/* @flowlabel may include more than a flow label, eg, the traffic class.
|
||||
* Here we want only the flow label value.
|
||||
*/
|
||||
flowlabel &= IPV6_FLOWLABEL_MASK;
|
||||
|
||||
if (flowlabel ||
|
||||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
|
||||
(!autolabel &&
|
||||
|
|
|
@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
|
|||
ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
|
||||
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
|
||||
ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
|
||||
ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
|
||||
ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
|
||||
|
||||
|
||||
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
|
||||
|
@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
|
|||
*/
|
||||
|
||||
__ETHTOOL_LINK_MODE_LAST
|
||||
= ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
|
||||
= ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
|
||||
};
|
||||
|
||||
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
|
||||
|
|
|
@ -5221,6 +5221,11 @@ err_free_css:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* The returned cgroup is fully initialized including its control mask, but
|
||||
* it isn't associated with its kernfs_node and doesn't have the control
|
||||
* mask applied.
|
||||
*/
|
||||
static struct cgroup *cgroup_create(struct cgroup *parent)
|
||||
{
|
||||
struct cgroup_root *root = parent->root;
|
||||
|
@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
|
|||
|
||||
cgroup_propagate_control(cgrp);
|
||||
|
||||
/* @cgrp doesn't have dir yet so the following will only create csses */
|
||||
ret = cgroup_apply_control_enable(cgrp);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
return cgrp;
|
||||
|
||||
out_cancel_ref:
|
||||
|
@ -5300,9 +5300,6 @@ out_cancel_ref:
|
|||
out_free_cgrp:
|
||||
kfree(cgrp);
|
||||
return ERR_PTR(ret);
|
||||
out_destroy:
|
||||
cgroup_destroy_locked(cgrp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
|
||||
|
|
|
@ -266,7 +266,7 @@ out:
|
|||
static struct cpumask save_cpumask;
|
||||
static bool disable_migrate;
|
||||
|
||||
static void move_to_next_cpu(void)
|
||||
static void move_to_next_cpu(bool initmask)
|
||||
{
|
||||
static struct cpumask *current_mask;
|
||||
int next_cpu;
|
||||
|
@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
|
|||
return;
|
||||
|
||||
/* Just pick the first CPU on first iteration */
|
||||
if (!current_mask) {
|
||||
if (initmask) {
|
||||
current_mask = &save_cpumask;
|
||||
get_online_cpus();
|
||||
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
|
||||
|
@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
|
|||
static int kthread_fn(void *data)
|
||||
{
|
||||
u64 interval;
|
||||
bool initmask = true;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
move_to_next_cpu();
|
||||
move_to_next_cpu(initmask);
|
||||
initmask = false;
|
||||
|
||||
local_irq_disable();
|
||||
get_sample();
|
||||
|
|
|
@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
|
|||
* @func: callback function on filter match
|
||||
* @data: returned parameter for callback function
|
||||
* @ident: string for calling module identification
|
||||
* @sk: socket pointer (might be NULL)
|
||||
*
|
||||
* Description:
|
||||
* Invokes the callback function with the received sk_buff and the given
|
||||
|
@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
|
|||
*/
|
||||
int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
|
||||
void (*func)(struct sk_buff *, void *), void *data,
|
||||
char *ident)
|
||||
char *ident, struct sock *sk)
|
||||
{
|
||||
struct receiver *r;
|
||||
struct hlist_head *rl;
|
||||
|
@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
|
|||
r->func = func;
|
||||
r->data = data;
|
||||
r->ident = ident;
|
||||
r->sk = sk;
|
||||
|
||||
hlist_add_head_rcu(&r->list, rl);
|
||||
d->entries++;
|
||||
|
@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
|
|||
static void can_rx_delete_receiver(struct rcu_head *rp)
|
||||
{
|
||||
struct receiver *r = container_of(rp, struct receiver, rcu);
|
||||
struct sock *sk = r->sk;
|
||||
|
||||
kmem_cache_free(rcv_cache, r);
|
||||
if (sk)
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
|
|||
spin_unlock(&can_rcvlists_lock);
|
||||
|
||||
/* schedule the receiver item for deletion */
|
||||
if (r)
|
||||
if (r) {
|
||||
if (r->sk)
|
||||
sock_hold(r->sk);
|
||||
call_rcu(&r->rcu, can_rx_delete_receiver);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(can_rx_unregister);
|
||||
|
||||
|
|
|
@ -50,13 +50,14 @@
|
|||
|
||||
struct receiver {
|
||||
struct hlist_node list;
|
||||
struct rcu_head rcu;
|
||||
canid_t can_id;
|
||||
canid_t mask;
|
||||
unsigned long matches;
|
||||
void (*func)(struct sk_buff *, void *);
|
||||
void *data;
|
||||
char *ident;
|
||||
struct sock *sk;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
|
||||
|
|
|
@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
|
|||
|
||||
static void bcm_remove_op(struct bcm_op *op)
|
||||
{
|
||||
hrtimer_cancel(&op->timer);
|
||||
hrtimer_cancel(&op->thrtimer);
|
||||
if (op->tsklet.func) {
|
||||
while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
|
||||
test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
|
||||
hrtimer_active(&op->timer)) {
|
||||
hrtimer_cancel(&op->timer);
|
||||
tasklet_kill(&op->tsklet);
|
||||
}
|
||||
}
|
||||
|
||||
if (op->tsklet.func)
|
||||
tasklet_kill(&op->tsklet);
|
||||
|
||||
if (op->thrtsklet.func)
|
||||
tasklet_kill(&op->thrtsklet);
|
||||
if (op->thrtsklet.func) {
|
||||
while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
|
||||
test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
|
||||
hrtimer_active(&op->thrtimer)) {
|
||||
hrtimer_cancel(&op->thrtimer);
|
||||
tasklet_kill(&op->thrtsklet);
|
||||
}
|
||||
}
|
||||
|
||||
if ((op->frames) && (op->frames != &op->sframe))
|
||||
kfree(op->frames);
|
||||
|
@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|||
err = can_rx_register(dev, op->can_id,
|
||||
REGMASK(op->can_id),
|
||||
bcm_rx_handler, op,
|
||||
"bcm");
|
||||
"bcm", sk);
|
||||
|
||||
op->rx_reg_dev = dev;
|
||||
dev_put(dev);
|
||||
|
@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|||
} else
|
||||
err = can_rx_register(NULL, op->can_id,
|
||||
REGMASK(op->can_id),
|
||||
bcm_rx_handler, op, "bcm");
|
||||
bcm_rx_handler, op, "bcm", sk);
|
||||
if (err) {
|
||||
/* this bcm rx op is broken -> remove it */
|
||||
list_del(&op->list);
|
||||
|
|
|
@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
|
|||
{
|
||||
return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
|
||||
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
|
||||
gwj, "gw");
|
||||
gwj, "gw", NULL);
|
||||
}
|
||||
|
||||
static inline void cgw_unregister_filter(struct cgw_job *gwj)
|
||||
|
|
|
@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
|
|||
for (i = 0; i < count; i++) {
|
||||
err = can_rx_register(dev, filter[i].can_id,
|
||||
filter[i].can_mask,
|
||||
raw_rcv, sk, "raw");
|
||||
raw_rcv, sk, "raw", sk);
|
||||
if (err) {
|
||||
/* clean up successfully registered filters */
|
||||
while (--i >= 0)
|
||||
|
@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
|
|||
|
||||
if (err_mask)
|
||||
err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
|
||||
raw_rcv, sk, "raw");
|
||||
raw_rcv, sk, "raw", sk);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
|
|||
int full_space = min_t(int, tp->window_clamp, allowed_space);
|
||||
int window;
|
||||
|
||||
if (mss > full_space)
|
||||
if (unlikely(mss > full_space)) {
|
||||
mss = full_space;
|
||||
|
||||
if (mss <= 0)
|
||||
return 0;
|
||||
}
|
||||
if (free_space < (full_space >> 1)) {
|
||||
icsk->icsk_ack.quick = 0;
|
||||
|
||||
|
|
|
@ -1344,7 +1344,7 @@ emsgsize:
|
|||
*/
|
||||
if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
|
||||
headersize == sizeof(struct ipv6hdr) &&
|
||||
length < mtu - headersize &&
|
||||
length <= mtu - headersize &&
|
||||
!(flags & MSG_MORE) &&
|
||||
rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
|
||||
csummode = CHECKSUM_PARTIAL;
|
||||
|
|
|
@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
|
|||
if (i + sizeof(*tel) > optlen)
|
||||
break;
|
||||
|
||||
tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
|
||||
tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
|
||||
/* return index of option if found and valid */
|
||||
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
|
||||
tel->length == 1)
|
||||
|
|
|
@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
|||
&mask->icmp.type,
|
||||
TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
|
||||
sizeof(key->icmp.type));
|
||||
fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
|
||||
fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
|
||||
&mask->icmp.code,
|
||||
TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
|
||||
TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
|
||||
sizeof(key->icmp.code));
|
||||
}
|
||||
|
||||
|
|
|
@ -16,16 +16,11 @@
|
|||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
struct cls_mall_filter {
|
||||
struct cls_mall_head {
|
||||
struct tcf_exts exts;
|
||||
struct tcf_result res;
|
||||
u32 handle;
|
||||
struct rcu_head rcu;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct cls_mall_head {
|
||||
struct cls_mall_filter *filter;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|||
struct tcf_result *res)
|
||||
{
|
||||
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (tc_skip_sw(f->flags))
|
||||
if (tc_skip_sw(head->flags))
|
||||
return -1;
|
||||
|
||||
return tcf_exts_exec(skb, &f->exts, res);
|
||||
return tcf_exts_exec(skb, &head->exts, res);
|
||||
}
|
||||
|
||||
static int mall_init(struct tcf_proto *tp)
|
||||
{
|
||||
struct cls_mall_head *head;
|
||||
|
||||
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
||||
if (!head)
|
||||
return -ENOBUFS;
|
||||
|
||||
rcu_assign_pointer(tp->root, head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mall_destroy_filter(struct rcu_head *head)
|
||||
static void mall_destroy_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
|
||||
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
|
||||
rcu);
|
||||
|
||||
tcf_exts_destroy(&f->exts);
|
||||
|
||||
kfree(f);
|
||||
tcf_exts_destroy(&head->exts);
|
||||
kfree(head);
|
||||
}
|
||||
|
||||
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
struct cls_mall_head *head,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
|
@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
|
|||
offload.type = TC_SETUP_MATCHALL;
|
||||
offload.cls_mall = &mall_offload;
|
||||
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
|
||||
offload.cls_mall->exts = &f->exts;
|
||||
offload.cls_mall->exts = &head->exts;
|
||||
offload.cls_mall->cookie = cookie;
|
||||
|
||||
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
||||
|
@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
|
|||
}
|
||||
|
||||
static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
struct cls_mall_head *head,
|
||||
unsigned long cookie)
|
||||
{
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
|
@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
|
|||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (!force && f)
|
||||
return false;
|
||||
if (!head)
|
||||
return true;
|
||||
|
||||
if (f) {
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
if (tc_should_offload(dev, tp, head->flags))
|
||||
mall_destroy_hw_filter(tp, head, (unsigned long) head);
|
||||
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
}
|
||||
kfree_rcu(head, rcu);
|
||||
call_rcu(&head->rcu, mall_destroy_rcu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (f && f->handle == handle)
|
||||
return (unsigned long) f;
|
||||
return 0;
|
||||
return 0UL;
|
||||
}
|
||||
|
||||
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
||||
|
@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
|||
};
|
||||
|
||||
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
||||
struct cls_mall_filter *f,
|
||||
struct cls_mall_head *head,
|
||||
unsigned long base, struct nlattr **tb,
|
||||
struct nlattr *est, bool ovr)
|
||||
{
|
||||
|
@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
|||
return err;
|
||||
|
||||
if (tb[TCA_MATCHALL_CLASSID]) {
|
||||
f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
||||
tcf_bind_filter(tp, &f->res, base);
|
||||
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
||||
tcf_bind_filter(tp, &head->res, base);
|
||||
}
|
||||
|
||||
tcf_exts_change(tp, &f->exts, &e);
|
||||
tcf_exts_change(tp, &head->exts, &e);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||
unsigned long *arg, bool ovr)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
struct cls_mall_filter *f;
|
||||
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
||||
struct cls_mall_head *new;
|
||||
u32 flags = 0;
|
||||
int err;
|
||||
|
||||
if (!tca[TCA_OPTIONS])
|
||||
return -EINVAL;
|
||||
|
||||
if (head->filter)
|
||||
return -EBUSY;
|
||||
|
||||
if (fold)
|
||||
return -EINVAL;
|
||||
if (head)
|
||||
return -EEXIST;
|
||||
|
||||
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
|
||||
tca[TCA_OPTIONS], mall_policy);
|
||||
|
@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
if (!f)
|
||||
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOBUFS;
|
||||
|
||||
tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
|
||||
tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
|
||||
|
||||
if (!handle)
|
||||
handle = 1;
|
||||
f->handle = handle;
|
||||
f->flags = flags;
|
||||
new->handle = handle;
|
||||
new->flags = flags;
|
||||
|
||||
err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
|
||||
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
if (tc_should_offload(dev, tp, flags)) {
|
||||
err = mall_replace_hw_filter(tp, f, (unsigned long) f);
|
||||
err = mall_replace_hw_filter(tp, new, (unsigned long) new);
|
||||
if (err) {
|
||||
if (tc_skip_sw(flags))
|
||||
goto errout;
|
||||
|
@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||
}
|
||||
}
|
||||
|
||||
*arg = (unsigned long) f;
|
||||
rcu_assign_pointer(head->filter, f);
|
||||
|
||||
*arg = (unsigned long) head;
|
||||
rcu_assign_pointer(tp->root, new);
|
||||
if (head)
|
||||
call_rcu(&head->rcu, mall_destroy_rcu);
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
kfree(f);
|
||||
kfree(new);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
|
||||
struct net_device *dev = tp->q->dev_queue->dev;
|
||||
|
||||
if (tc_should_offload(dev, tp, f->flags))
|
||||
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
||||
|
||||
RCU_INIT_POINTER(head->filter, NULL);
|
||||
tcf_unbind_filter(tp, &f->res);
|
||||
call_rcu(&f->rcu, mall_destroy_filter);
|
||||
return 0;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
|
||||
{
|
||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_mall_filter *f = head->filter;
|
||||
|
||||
if (arg->count < arg->skip)
|
||||
goto skip;
|
||||
if (arg->fn(tp, (unsigned long) f, arg) < 0)
|
||||
if (arg->fn(tp, (unsigned long) head, arg) < 0)
|
||||
arg->stop = 1;
|
||||
skip:
|
||||
arg->count++;
|
||||
|
@ -255,28 +218,28 @@ skip:
|
|||
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
|
||||
struct sk_buff *skb, struct tcmsg *t)
|
||||
{
|
||||
struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
|
||||
struct cls_mall_head *head = (struct cls_mall_head *) fh;
|
||||
struct nlattr *nest;
|
||||
|
||||
if (!f)
|
||||
if (!head)
|
||||
return skb->len;
|
||||
|
||||
t->tcm_handle = f->handle;
|
||||
t->tcm_handle = head->handle;
|
||||
|
||||
nest = nla_nest_start(skb, TCA_OPTIONS);
|
||||
if (!nest)
|
||||
goto nla_put_failure;
|
||||
|
||||
if (f->res.classid &&
|
||||
nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
|
||||
if (head->res.classid &&
|
||||
nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (tcf_exts_dump(skb, &f->exts))
|
||||
if (tcf_exts_dump(skb, &head->exts))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(skb, nest);
|
||||
|
||||
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
|
||||
if (tcf_exts_dump_stats(skb, &head->exts) < 0)
|
||||
goto nla_put_failure;
|
||||
|
||||
return skb->len;
|
||||
|
|
|
@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
|
|||
if (!oa->data)
|
||||
return -ENOMEM;
|
||||
|
||||
creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
|
||||
creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
|
||||
if (!creds) {
|
||||
kfree(oa->data);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
|
|||
*type = INSN_RETURN;
|
||||
break;
|
||||
|
||||
case 0xc5: /* iret */
|
||||
case 0xca: /* retf */
|
||||
case 0xcb: /* retf */
|
||||
case 0xcf: /* iret */
|
||||
*type = INSN_CONTEXT_SWITCH;
|
||||
break;
|
||||
|
||||
|
|
|
@ -1199,7 +1199,7 @@ static int ui_init(void)
|
|||
BUG_ON(1);
|
||||
}
|
||||
|
||||
perf_hpp__register_sort_field(fmt);
|
||||
perf_hpp__prepend_sort_field(fmt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
|
|||
list_add_tail(&format->sort_list, &list->sorts);
|
||||
}
|
||||
|
||||
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
|
||||
struct perf_hpp_fmt *format)
|
||||
{
|
||||
list_add(&format->sort_list, &list->sorts);
|
||||
}
|
||||
|
||||
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
|
||||
{
|
||||
list_del(&format->list);
|
||||
|
@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
|
|||
perf_hpp_list__for_each_sort_list(list, fmt) {
|
||||
struct perf_hpp_fmt *pos;
|
||||
|
||||
/* skip sort-only fields ("sort_compute" in perf diff) */
|
||||
if (!fmt->entry && !fmt->color)
|
||||
continue;
|
||||
|
||||
perf_hpp_list__for_each_format(list, pos) {
|
||||
if (fmt_equal(fmt, pos))
|
||||
goto next;
|
||||
|
|
|
@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
|
|||
struct perf_hpp_fmt *format);
|
||||
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
|
||||
struct perf_hpp_fmt *format);
|
||||
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
|
||||
struct perf_hpp_fmt *format);
|
||||
|
||||
static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
|
||||
{
|
||||
|
@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
|
|||
perf_hpp_list__register_sort_field(&perf_hpp_list, format);
|
||||
}
|
||||
|
||||
static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
|
||||
{
|
||||
perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
|
||||
}
|
||||
|
||||
#define perf_hpp_list__for_each_format(_list, format) \
|
||||
list_for_each_entry(format, &(_list)->fields, list)
|
||||
|
||||
|
|
Loading…
Reference in New Issue