2007-05-26 06:49:59 +08:00
|
|
|
#ifndef _SPARC64_MDESC_H
|
|
|
|
#define _SPARC64_MDESC_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
[SPARC64]: Initial LDOM cpu hotplug support.
Only adding cpus is supports at the moment, removal
will come next.
When new cpus are configured, the machine description is
updated. When we get the configure request we pass in a
cpu mask of to-be-added cpus to the mdesc CPU node parser
so it only fetches information for those cpus. That code
also proceeds to update the SMT/multi-core scheduling bitmaps.
cpu_up() does all the work and we return the status back
over the DS channel.
CPUs via dr-cpu need to be booted straight out of the
hypervisor, and this requires:
1) A new trampoline mechanism. CPUs are booted straight
out of the hypervisor with MMU disabled and running in
physical addresses with no mappings installed in the TLB.
The new hvtramp.S code sets up the critical cpu state,
installs the locked TLB mappings for the kernel, and
turns the MMU on. It then proceeds to follow the logic
of the existing trampoline.S SMP cpu bringup code.
2) All calls into OBP have to be disallowed when domaining
is enabled. Since cpus boot straight into the kernel from
the hypervisor, OBP has no state about that cpu and therefore
cannot handle being invoked on that cpu.
Luckily it's only a handful of interfaces which can be called
after the OBP device tree is obtained. For example, rebooting,
halting, powering-off, and setting options node variables.
CPU removal support will require some infrastructure changes
here. Namely we'll have to process the requests via a true
kernel thread instead of in a workqueue. workqueues run on
a per-cpu thread, but when unconfiguring we might need to
force the thread to execute on another cpu if the current cpu
is the one being removed. Removal of a cpu also causes the kernel
to destroy that cpu's workqueue running thread.
Another issue on removal is that we may have interrupts still
pointing to the cpu-to-be-removed. So new code will be needed
to walk the active INO list and retarget those cpus as-needed.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-07-14 07:03:42 +08:00
|
|
|
#include <linux/cpumask.h>
|
2007-05-26 06:49:59 +08:00
|
|
|
#include <asm/prom.h>
|
|
|
|
|
2007-07-13 04:47:50 +08:00
|
|
|
struct mdesc_handle;
|
|
|
|
|
|
|
|
/* Machine description operations are to be surrounded by grab and
|
|
|
|
* release calls. The mdesc_handle returned from the grab is
|
|
|
|
* the first argument to all of the operational calls that work
|
|
|
|
* on mdescs.
|
|
|
|
*/
|
|
|
|
extern struct mdesc_handle *mdesc_grab(void);
|
|
|
|
extern void mdesc_release(struct mdesc_handle *);
|
|
|
|
|
|
|
|
#define MDESC_NODE_NULL (~(u64)0)
|
|
|
|
|
|
|
|
extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
|
|
|
|
u64 from_node, const char *name);
|
|
|
|
#define mdesc_for_each_node_by_name(__hdl, __node, __name) \
|
|
|
|
for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
|
|
|
|
(__node) != MDESC_NODE_NULL; \
|
|
|
|
__node = mdesc_node_by_name(__hdl, __node, __name))
|
|
|
|
|
2007-07-13 05:16:22 +08:00
|
|
|
/* Access to property values returned from mdesc_get_property() are
|
|
|
|
* only valid inside of a mdesc_grab()/mdesc_release() sequence.
|
|
|
|
* Once mdesc_release() is called, the memory backed up by these
|
|
|
|
* pointers may reference freed up memory.
|
|
|
|
*
|
|
|
|
* Therefore callers must make copies of any property values
|
|
|
|
* they need.
|
|
|
|
*
|
|
|
|
* These same rules apply to mdesc_node_name().
|
|
|
|
*/
|
2007-07-13 04:47:50 +08:00
|
|
|
extern const void *mdesc_get_property(struct mdesc_handle *handle,
|
|
|
|
u64 node, const char *name, int *lenp);
|
2007-07-13 05:16:22 +08:00
|
|
|
extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
|
|
|
|
|
|
|
|
/* MD arc iteration, the standard sequence is:
|
|
|
|
*
|
|
|
|
* unsigned long arc;
|
|
|
|
* mdesc_for_each_arc(arc, handle, node, MDESC_ARC_TYPE_{FWD,BACK}) {
|
|
|
|
* unsigned long target = mdesc_arc_target(handle, arc);
|
|
|
|
* ...
|
|
|
|
* }
|
|
|
|
*/
|
2007-07-13 04:47:50 +08:00
|
|
|
|
|
|
|
#define MDESC_ARC_TYPE_FWD "fwd"
|
|
|
|
#define MDESC_ARC_TYPE_BACK "back"
|
|
|
|
|
|
|
|
extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
|
|
|
|
const char *arc_type);
|
|
|
|
#define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
|
|
|
|
for (__arc = mdesc_next_arc(__hdl, __node, __type); \
|
|
|
|
(__arc) != MDESC_NODE_NULL; \
|
|
|
|
__arc = mdesc_next_arc(__hdl, __arc, __type))
|
|
|
|
|
|
|
|
extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
|
|
|
|
|
|
|
|
extern void mdesc_update(void);
|
2007-05-26 06:49:59 +08:00
|
|
|
|
[SPARC64]: Initial LDOM cpu hotplug support.
Only adding cpus is supports at the moment, removal
will come next.
When new cpus are configured, the machine description is
updated. When we get the configure request we pass in a
cpu mask of to-be-added cpus to the mdesc CPU node parser
so it only fetches information for those cpus. That code
also proceeds to update the SMT/multi-core scheduling bitmaps.
cpu_up() does all the work and we return the status back
over the DS channel.
CPUs via dr-cpu need to be booted straight out of the
hypervisor, and this requires:
1) A new trampoline mechanism. CPUs are booted straight
out of the hypervisor with MMU disabled and running in
physical addresses with no mappings installed in the TLB.
The new hvtramp.S code sets up the critical cpu state,
installs the locked TLB mappings for the kernel, and
turns the MMU on. It then proceeds to follow the logic
of the existing trampoline.S SMP cpu bringup code.
2) All calls into OBP have to be disallowed when domaining
is enabled. Since cpus boot straight into the kernel from
the hypervisor, OBP has no state about that cpu and therefore
cannot handle being invoked on that cpu.
Luckily it's only a handful of interfaces which can be called
after the OBP device tree is obtained. For example, rebooting,
halting, powering-off, and setting options node variables.
CPU removal support will require some infrastructure changes
here. Namely we'll have to process the requests via a true
kernel thread instead of in a workqueue. workqueues run on
a per-cpu thread, but when unconfiguring we might need to
force the thread to execute on another cpu if the current cpu
is the one being removed. Removal of a cpu also causes the kernel
to destroy that cpu's workqueue running thread.
Another issue on removal is that we may have interrupts still
pointing to the cpu-to-be-removed. So new code will be needed
to walk the active INO list and retarget those cpus as-needed.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-07-14 07:03:42 +08:00
|
|
|
extern void mdesc_fill_in_cpu_data(cpumask_t mask);
|
|
|
|
|
2007-05-26 06:49:59 +08:00
|
|
|
extern void sun4v_mdesc_init(void);
|
|
|
|
|
|
|
|
#endif
|