2014-07-17 17:30:07 +08:00
|
|
|
/*
|
|
|
|
* ARM64 CPU idle arch support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
|
|
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2016-07-20 01:52:58 +08:00
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/cpuidle.h>
|
|
|
|
#include <linux/cpu_pm.h>
|
2014-07-17 17:30:07 +08:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_device.h>
|
|
|
|
|
|
|
|
#include <asm/cpuidle.h>
|
|
|
|
#include <asm/cpu_ops.h>
|
|
|
|
|
2016-07-20 01:52:55 +08:00
|
|
|
int arm_cpuidle_init(unsigned int cpu)
|
2014-07-17 17:30:07 +08:00
|
|
|
{
|
|
|
|
int ret = -EOPNOTSUPP;
|
|
|
|
|
arm64: cpuidle: make arm_cpuidle_suspend() a bit more efficient
Currently, we check two pointers: cpu_ops and cpu_suspend on every idle
state entry. These pointers check can be avoided:
If cpu_ops has not been registered, arm_cpuidle_init() will return
-EOPNOTSUPP, so arm_cpuidle_suspend() will never have chance to
run. In other word, the cpu_ops check can be avoid.
Similarly, the cpu_suspend check could be avoided in this hot path by
moving it into arm_cpuidle_init().
I measured the 4096 * time from arm_cpuidle_suspend entry point to the
cpu_psci_cpu_suspend entry point. HW platform is Marvell BG4CT STB
board.
1. only one shell, no other process, hot-unplug secondary cpus, execute
the following cmd
while true
do
sleep 0.2
done
before the patch: 1581220ns
after the patch: 1579630ns
reduced by 0.1%
2. only one shell, no other process, hot-unplug secondary cpus, execute
the following cmd
while true
do
md5sum /tmp/testfile
sleep 0.2
done
NOTE: the testfile size should be larger than L1+L2 cache size
before the patch: 1961960ns
after the patch: 1912500ns
reduced by 2.5%
So the more complex the system load, the bigger the improvement.
Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-03-25 11:08:55 +08:00
|
|
|
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
|
|
|
|
cpu_ops[cpu]->cpu_init_idle)
|
2015-05-13 21:12:46 +08:00
|
|
|
ret = cpu_ops[cpu]->cpu_init_idle(cpu);
|
2014-07-17 17:30:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-01-27 02:33:44 +08:00
|
|
|
|
|
|
|
/**
|
2017-02-17 22:25:08 +08:00
|
|
|
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
2015-01-27 02:33:44 +08:00
|
|
|
* @arg: argument to pass to CPU suspend operations
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
|
|
|
* operations back-end error code otherwise.
|
|
|
|
*/
|
2015-06-18 22:41:32 +08:00
|
|
|
int arm_cpuidle_suspend(int index)
|
2015-01-27 02:33:44 +08:00
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
2015-06-18 22:41:32 +08:00
|
|
|
return cpu_ops[cpu]->cpu_suspend(index);
|
2015-01-27 02:33:44 +08:00
|
|
|
}
|
2016-07-20 01:52:58 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI
|
|
|
|
|
|
|
|
#include <acpi/processor.h>
|
|
|
|
|
2017-11-16 01:11:50 +08:00
|
|
|
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
|
|
|
|
|
2016-07-20 01:52:58 +08:00
|
|
|
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
|
|
|
|
{
|
|
|
|
return arm_cpuidle_init(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
|
|
|
|
{
|
2017-11-16 01:11:50 +08:00
|
|
|
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
|
|
|
|
return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
|
|
|
|
lpi->index);
|
|
|
|
else
|
|
|
|
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
|
2016-07-20 01:52:58 +08:00
|
|
|
}
|
|
|
|
#endif
|