crypto: inside-secure - irq balance

Balance the irqs of the inside secure driver over all
available cpus.
Currently all interrupts are handled by the first CPU.

From my testing with IPSec AES-GCM 256
on my MCbin with 4 Cores I get a 50% speed increase:

Before the patch: 99.73 Kpps
With the patch: 151.25 Kpps

Signed-off-by: Sven Auhagen <sven.auhagen@voleatech.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Sven Auhagen 2020-07-21 06:37:59 +02:00 committed by Herbert Xu
parent 6914dd53eb
commit c672041590
2 changed files with 14 additions and 2 deletions

View File

@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq;
int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
/* Set affinity */
cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
irq_set_affinity_hint(irq, get_cpu_mask(cpu));
return irq;
}
@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev,
return irq;
}
priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
for (i = 0; i < priv->config.rings; i++) {
irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
}
return 0;
}

View File

@ -707,6 +707,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
/* irq of this ring */
int irq;
};
/* EIP integration context flags */