soc/fsl/qbman: Add an argument to signal if NAPI processing is required.

dpaa_eth_napi_schedule() and caam_qi_napi_schedule() schedule NAPI if
invoked from:

 - Hard interrupt context
 - Any context which is not serving soft interrupts

Any context which is not serving soft interrupts includes hard interrupts
so the in_irq() check is redundant. caam_qi_napi_schedule() has a comment
about this:

        /*
         * In case of threaded ISR, for RT kernels in_irq() does not return
         * appropriate value, so use in_serving_softirq to distinguish between
         * softirq and irq contexts.
         */
         if (in_irq() || !in_serving_softirq())

This has nothing to do with RT. Even on a non RT kernel force threaded
interrupts run obviously in thread context and therefore in_irq() returns
false when invoked from the handler.

The extension of the in_irq() check with !in_serving_softirq() was there
when the drivers were added, but in the out of tree FSL BSP the original
condition was in_irq() which got extended due to failures on RT.

The usage of in_xxx() in drivers is phased out and Linus clearly requested
that code which changes behaviour depending on context should either be
separated or the context be conveyed in an argument passed by the caller,
which usually knows the context. Right he is, the above construct is
clearly showing why.

The following callchains have been analyzed to end up in
dpaa_eth_napi_schedule():

qman_p_poll_dqrr()
  __poll_portal_fast()
    fq->cb.dqrr()
       dpaa_eth_napi_schedule()

portal_isr()
  __poll_portal_fast()
    fq->cb.dqrr()
       dpaa_eth_napi_schedule()

Both need to schedule NAPI.
The crypto part has another code path leading up to this:
  kill_fq()
     empty_retired_fq()
       qman_p_poll_dqrr()
         __poll_portal_fast()
            fq->cb.dqrr()
               dpaa_eth_napi_schedule()

kill_fq() is called from task context and ends up scheduling NAPI, but
that's pointless and an unintended side effect of the !in_serving_softirq()
check.

The code path:
  caam_qi_poll() -> qman_p_poll_dqrr()

is invoked from NAPI and I *assume* from crypto's NAPI device and not
from qbman's NAPI device. I *guess* it is okay to skip scheduling NAPI
(because this is what happens now) but could be changed if it is wrong
due to `budget' handling.

Add an argument to __poll_portal_fast() which is true if NAPI needs to be
scheduled. This requires propagating the value to the caller including
`qman_cb_dqrr' typedef which is used by the dpaa and the crypto driver.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Aymen Sghaier <aymen.sghaier@nxp.com>
Cc: Herbert XS <herbert@gondor.apana.org.au>
Cc: Li Yang <leoyang.li@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Madalin Bucur <madalin.bucur@oss.nxp.com>
Tested-by: Camelia Groza <camelia.groza@nxp.com>
This commit is contained in:
Sebastian Andrzej Siewior 2020-11-02 00:22:55 +01:00 committed by Jakub Kicinski
parent e9e13b6adc
commit f84754dbc5
6 changed files with 26 additions and 16 deletions

View File

@ -564,7 +564,8 @@ static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct qman_fq *rsp_fq, struct qman_fq *rsp_fq,
const struct qm_dqrr_entry *dqrr) const struct qm_dqrr_entry *dqrr,
bool sched_napi)
{ {
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req; struct caam_drv_req *drv_req;

View File

@ -2316,7 +2316,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq,
bool sched_napi)
{ {
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
struct dpaa_percpu_priv *percpu_priv; struct dpaa_percpu_priv *percpu_priv;
@ -2343,7 +2344,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq,
bool sched_napi)
{ {
struct skb_shared_hwtstamps *shhwtstamps; struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats; struct rtnl_link_stats64 *percpu_stats;
@ -2460,7 +2462,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq,
bool sched_napi)
{ {
struct dpaa_percpu_priv *percpu_priv; struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev; struct net_device *net_dev;
@ -2481,7 +2484,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq,
bool sched_napi)
{ {
struct dpaa_percpu_priv *percpu_priv; struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev; struct net_device *net_dev;

View File

@ -1159,7 +1159,7 @@ static u32 fq_to_tag(struct qman_fq *fq)
static u32 __poll_portal_slow(struct qman_portal *p, u32 is); static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
static inline unsigned int __poll_portal_fast(struct qman_portal *p, static inline unsigned int __poll_portal_fast(struct qman_portal *p,
unsigned int poll_limit); unsigned int poll_limit, bool sched_napi);
static void qm_congestion_task(struct work_struct *work); static void qm_congestion_task(struct work_struct *work);
static void qm_mr_process_task(struct work_struct *work); static void qm_mr_process_task(struct work_struct *work);
@ -1174,7 +1174,7 @@ static irqreturn_t portal_isr(int irq, void *ptr)
/* DQRR-handling if it's interrupt-driven */ /* DQRR-handling if it's interrupt-driven */
if (is & QM_PIRQ_DQRI) { if (is & QM_PIRQ_DQRI) {
__poll_portal_fast(p, QMAN_POLL_LIMIT); __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
} }
/* Handling of anything else that's interrupt-driven */ /* Handling of anything else that's interrupt-driven */
@ -1602,7 +1602,7 @@ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
* user callbacks to call into any QMan API. * user callbacks to call into any QMan API.
*/ */
static inline unsigned int __poll_portal_fast(struct qman_portal *p, static inline unsigned int __poll_portal_fast(struct qman_portal *p,
unsigned int poll_limit) unsigned int poll_limit, bool sched_napi)
{ {
const struct qm_dqrr_entry *dq; const struct qm_dqrr_entry *dq;
struct qman_fq *fq; struct qman_fq *fq;
@ -1636,7 +1636,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
* and we don't want multiple if()s in the critical * and we don't want multiple if()s in the critical
* path (SDQCR). * path (SDQCR).
*/ */
res = fq->cb.dqrr(p, fq, dq); res = fq->cb.dqrr(p, fq, dq, sched_napi);
if (res == qman_cb_dqrr_stop) if (res == qman_cb_dqrr_stop)
break; break;
/* Check for VDQCR completion */ /* Check for VDQCR completion */
@ -1646,7 +1646,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
/* SDQCR: context_b points to the FQ */ /* SDQCR: context_b points to the FQ */
fq = tag_to_fq(be32_to_cpu(dq->context_b)); fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */ /* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq); res = fq->cb.dqrr(p, fq, dq, sched_napi);
/* /*
* The callback can request that we exit without * The callback can request that we exit without
* consuming this entry nor advancing; * consuming this entry nor advancing;
@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(qman_start_using_portal);
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{ {
return __poll_portal_fast(p, limit); return __poll_portal_fast(p, limit, false);
} }
EXPORT_SYMBOL(qman_p_poll_dqrr); EXPORT_SYMBOL(qman_p_poll_dqrr);

View File

@ -45,7 +45,8 @@
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *, struct qman_fq *,
const struct qm_dqrr_entry *); const struct qm_dqrr_entry *,
bool sched_napi);
static void cb_ern(struct qman_portal *, struct qman_fq *, static void cb_ern(struct qman_portal *, struct qman_fq *,
const union qm_mr_entry *); const union qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *, static void cb_fqs(struct qman_portal *, struct qman_fq *,
@ -208,7 +209,8 @@ failed:
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dq) const struct qm_dqrr_entry *dq,
bool sched_napi)
{ {
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n"); pr_err("BADNESS: dequeued frame doesn't match;\n");

View File

@ -275,7 +275,8 @@ static inline int process_frame_data(struct hp_handler *handler,
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr) const struct qm_dqrr_entry *dqrr,
bool sched_napi)
{ {
struct hp_handler *handler = (struct hp_handler *)fq; struct hp_handler *handler = (struct hp_handler *)fq;
@ -293,7 +294,8 @@ skip:
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr) const struct qm_dqrr_entry *dqrr,
bool sched_napi)
{ {
struct hp_handler *handler = (struct hp_handler *)fq; struct hp_handler *handler = (struct hp_handler *)fq;

View File

@ -689,7 +689,8 @@ enum qman_cb_dqrr_result {
}; };
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq, struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr); const struct qm_dqrr_entry *dqrr,
bool sched_napi);
/* /*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They