slurm服务迁移

This commit is contained in:
tzwang 2022-10-25 02:15:25 -07:00
parent cc9eaf9cae
commit 1eb2a03f45
5 changed files with 958 additions and 0 deletions

View File

@ -0,0 +1,16 @@
package slurm
/*
#cgo LDFLAGS: -lslurm
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
*/
import "C"
func GetErrorString(errno uint32) string {
msg := C.GoString(C.slurm_strerror(C.int(errno)))
return msg
}

View File

@ -0,0 +1,227 @@
package slurm
/*
#cgo LDFLAGS: -lslurm
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
*/
import "C"
func Job_info_convert_c_to_go(c_struct *C.struct_job_info) Job_info {
var go_struct Job_info
go_struct.account = C.GoString(c_struct.account)
go_struct.alloc_node = C.GoString(c_struct.alloc_node)
go_struct.alloc_sid = uint32(c_struct.alloc_sid)
go_struct.array_job_id = uint32(c_struct.array_job_id)
go_struct.array_task_id = uint16(c_struct.array_task_id)
go_struct.assoc_id = uint32(c_struct.assoc_id)
go_struct.batch_flag = uint16(c_struct.batch_flag)
go_struct.batch_host = C.GoString(c_struct.batch_host)
go_struct.boards_per_node = uint16(c_struct.boards_per_node)
go_struct.batch_script = C.GoString(c_struct.batch_script)
go_struct.command = C.GoString(c_struct.command)
go_struct.comment = C.GoString(c_struct.comment)
go_struct.contiguous = uint16(c_struct.contiguous)
go_struct.cores_per_socket = uint16(c_struct.cores_per_socket)
go_struct.cpus_per_task = uint16(c_struct.cpus_per_task)
go_struct.dependency = C.GoString(c_struct.dependency)
go_struct.derived_ec = uint32(c_struct.derived_ec)
go_struct.eligible_time = int64(c_struct.eligible_time)
go_struct.end_time = int64(c_struct.end_time)
go_struct.exc_nodes = C.GoString(c_struct.exc_nodes)
go_struct.exc_node_inx = int32(C.int32_ptr(c_struct.exc_node_inx))
go_struct.exit_code = uint32(c_struct.exit_code)
go_struct.features = C.GoString(c_struct.features)
go_struct.group_id = uint32(c_struct.group_id)
go_struct.gres = C.GoString(c_struct.gres)
go_struct.job_id = uint32(c_struct.job_id)
go_struct.job_state = uint16(c_struct.job_state)
go_struct.licenses = C.GoString(c_struct.licenses)
go_struct.max_cpus = uint32(c_struct.max_cpus)
go_struct.max_nodes = uint32(c_struct.max_nodes)
go_struct.name = C.GoString(c_struct.name)
go_struct.network = C.GoString(c_struct.network)
go_struct.nodes = C.GoString(c_struct.nodes)
go_struct.nice = uint16(c_struct.nice)
go_struct.node_inx = int32(C.int32_ptr(c_struct.node_inx))
go_struct.ntasks_per_core = uint16(c_struct.ntasks_per_core)
go_struct.ntasks_per_node = uint16(c_struct.ntasks_per_node)
go_struct.ntasks_per_socket = uint16(c_struct.ntasks_per_socket)
go_struct.ntasks_per_board = uint16(c_struct.ntasks_per_board)
go_struct.num_cpus = uint32(c_struct.num_cpus)
go_struct.num_nodes = uint32(c_struct.num_nodes)
go_struct.partition = C.GoString(c_struct.partition)
go_struct.pn_min_memory = uint32(c_struct.pn_min_memory)
go_struct.pn_min_cpus = uint16(c_struct.pn_min_cpus)
go_struct.pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk)
go_struct.preempt_time = int64(c_struct.preempt_time)
go_struct.pre_sus_time = int64(c_struct.pre_sus_time)
go_struct.priority = uint32(c_struct.priority)
go_struct.profile = uint32(c_struct.profile)
go_struct.qos = C.GoString(c_struct.qos)
go_struct.req_nodes = C.GoString(c_struct.req_nodes)
go_struct.req_node_inx = int32(C.int32_ptr(c_struct.req_node_inx))
go_struct.req_switch = uint32(c_struct.req_switch)
go_struct.requeue = uint16(c_struct.requeue)
go_struct.resize_time = int64(c_struct.resize_time)
go_struct.restart_cnt = uint16(c_struct.restart_cnt)
go_struct.resv_name = C.GoString(c_struct.resv_name)
go_struct.shared = uint16(c_struct.shared)
go_struct.show_flags = uint16(c_struct.show_flags)
go_struct.sockets_per_board = uint16(c_struct.sockets_per_board)
go_struct.sockets_per_node = uint16(c_struct.sockets_per_node)
go_struct.start_time = int64(c_struct.start_time)
go_struct.state_desc = C.GoString(c_struct.state_desc)
go_struct.state_reason = uint16(c_struct.state_reason)
go_struct.submit_time = int64(c_struct.submit_time)
go_struct.suspend_time = int64(c_struct.suspend_time)
go_struct.time_limit = uint32(c_struct.time_limit)
go_struct.time_min = uint32(c_struct.time_min)
go_struct.threads_per_core = uint16(c_struct.threads_per_core)
go_struct.user_id = uint32(c_struct.user_id)
go_struct.wait4switch = uint32(c_struct.wait4switch)
go_struct.wckey = C.GoString(c_struct.wckey)
go_struct.work_dir = C.GoString(c_struct.work_dir)
return go_struct
}
func Job_descriptor_convert_c_to_go(c_struct *C.struct_job_descriptor) Job_descriptor {
var go_struct Job_descriptor
go_struct.Account = C.GoString(c_struct.account)
go_struct.Acctg_freq = C.GoString(c_struct.acctg_freq)
go_struct.Alloc_node = C.GoString(c_struct.alloc_node)
go_struct.Alloc_resp_port = uint16(c_struct.alloc_resp_port)
go_struct.Alloc_sid = uint32(c_struct.alloc_sid)
go_struct.Argc = uint32(c_struct.argc)
go_struct.Array_inx = C.GoString(c_struct.array_inx)
go_struct.Begin_time = int64(c_struct.begin_time)
go_struct.Ckpt_interval = uint16(c_struct.ckpt_interval)
go_struct.Ckpt_dir = C.GoString(c_struct.ckpt_dir)
go_struct.Comment = C.GoString(c_struct.comment)
go_struct.Contiguous = uint16(c_struct.contiguous)
go_struct.Cpu_bind = C.GoString(c_struct.cpu_bind)
go_struct.Cpu_bind_type = uint16(c_struct.cpu_bind_type)
go_struct.Dependency = C.GoString(c_struct.dependency)
go_struct.End_time = int64(c_struct.end_time)
go_struct.Env_size = uint32(c_struct.env_size)
go_struct.Exc_nodes = C.GoString(c_struct.exc_nodes)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Group_id = uint32(c_struct.group_id)
go_struct.Immediate = uint16(c_struct.immediate)
go_struct.Job_id = uint32(c_struct.job_id)
go_struct.Kill_on_node_fail = uint16(c_struct.kill_on_node_fail)
go_struct.Licenses = C.GoString(c_struct.licenses)
go_struct.Mail_type = uint16(c_struct.mail_type)
go_struct.Mail_user = C.GoString(c_struct.mail_user)
go_struct.Mem_bind = C.GoString(c_struct.mem_bind)
go_struct.Mem_bind_type = uint16(c_struct.mem_bind_type)
go_struct.Name = C.GoString(c_struct.name)
go_struct.Network = C.GoString(c_struct.network)
go_struct.Nice = uint16(c_struct.nice)
go_struct.Num_tasks = uint32(c_struct.num_tasks)
go_struct.Open_mode = uint8(c_struct.open_mode)
go_struct.Other_port = uint16(c_struct.other_port)
go_struct.Overcommit = uint8(c_struct.overcommit)
go_struct.Partition = C.GoString(c_struct.partition)
go_struct.Plane_size = uint16(c_struct.plane_size)
go_struct.Priority = uint32(c_struct.priority)
go_struct.Profile = uint32(c_struct.profile)
go_struct.Qos = C.GoString(c_struct.qos)
go_struct.Reboot = uint16(c_struct.reboot)
go_struct.Resp_host = C.GoString(c_struct.resp_host)
go_struct.Req_nodes = C.GoString(c_struct.req_nodes)
go_struct.Requeue = uint16(c_struct.requeue)
go_struct.Reservation = C.GoString(c_struct.reservation)
go_struct.Script = C.GoString(c_struct.script)
go_struct.Shared = uint16(c_struct.shared)
go_struct.Spank_job_env_size = uint32(c_struct.spank_job_env_size)
go_struct.Task_dist = uint16(c_struct.task_dist)
go_struct.Time_limit = uint32(c_struct.time_limit)
go_struct.Time_min = uint32(c_struct.time_min)
go_struct.User_id = uint32(c_struct.user_id)
go_struct.Wait_all_nodes = uint16(c_struct.wait_all_nodes)
go_struct.Warn_signal = uint16(c_struct.warn_signal)
go_struct.Warn_time = uint16(c_struct.warn_time)
go_struct.Work_dir = C.GoString(c_struct.work_dir)
go_struct.Cpus_per_task = uint16(c_struct.cpus_per_task)
go_struct.Min_cpus = uint32(c_struct.min_cpus)
go_struct.Max_cpus = uint32(c_struct.max_cpus)
go_struct.Min_nodes = uint32(c_struct.min_nodes)
go_struct.Max_nodes = uint32(c_struct.max_nodes)
go_struct.Boards_per_node = uint16(c_struct.boards_per_node)
go_struct.Sockets_per_board = uint16(c_struct.sockets_per_board)
go_struct.Sockets_per_node = uint16(c_struct.sockets_per_node)
go_struct.Cores_per_socket = uint16(c_struct.cores_per_socket)
go_struct.Threads_per_core = uint16(c_struct.threads_per_core)
go_struct.Ntasks_per_node = uint16(c_struct.ntasks_per_node)
go_struct.Ntasks_per_socket = uint16(c_struct.ntasks_per_socket)
go_struct.Ntasks_per_core = uint16(c_struct.ntasks_per_core)
go_struct.Ntasks_per_board = uint16(c_struct.ntasks_per_board)
go_struct.Pn_min_cpus = uint16(c_struct.pn_min_cpus)
go_struct.Pn_min_memory = uint32(c_struct.pn_min_memory)
go_struct.Pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk)
go_struct.Req_switch = uint32(c_struct.req_switch)
go_struct.Std_err = C.GoString(c_struct.std_err)
go_struct.Std_in = C.GoString(c_struct.std_in)
go_struct.Std_out = C.GoString(c_struct.std_out)
go_struct.Wait4switch = uint32(c_struct.wait4switch)
go_struct.Wckey = C.GoString(c_struct.wckey)
return go_struct
}
func submit_response_msg_convert_c_to_go(c_struct *C.struct_submit_response_msg) Submit_response_msg {
var go_struct Submit_response_msg
go_struct.Job_id = uint32(c_struct.job_id)
go_struct.Step_id = uint32(c_struct.step_id)
go_struct.Error_code = uint32(c_struct.error_code)
return go_struct
}

View File

@ -0,0 +1,94 @@
package slurm
/*
#cgo LDFLAGS: -lslurm
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
struct job_info_msg *get_job_info(){
struct job_info_msg* job_buffer;
if(slurm_load_jobs ((time_t) NULL,
&job_buffer, SHOW_ALL)) {
return NULL;
}
return job_buffer;
}
struct job_info* job_from_list(struct job_info_msg *list, int i){
return &list->job_array[i];
}
struct job_info_msg *get_single_job_info(uint32_t id){
struct job_info_msg* job_buffer;
if( slurm_load_job (&job_buffer, id, SHOW_DETAIL)) {
return NULL;
}
return job_buffer;
}
//static time_t last_update_time = (time_t) NULL;
//int error_code;
//job_info_msg_t * job_info_msg_ptr = NULL;
//
//error_code = slurm_load_jobs (last_update_time, &job_info_msg_ptr, 1);
//if (error_code) {
// slurm_perror ("slurm_load_jobs");
// return (error_code);
//}
//
//slurm_print_job_info_msg ( stdout, job_info_msg_ptr, 1 ) ;
//
//slurm_free_job_info_msg ( job_info_msg_ptr ) ;
*/
import "C"
func Get_all_jobs() Job_info_msg {
var go_job_buffer Job_info_msg
c_job_buffer := C.get_job_info()
if c_job_buffer == nil {
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Job_list = nil
return go_job_buffer
}
go_job_buffer.Last_update = int64(c_job_buffer.last_update)
go_job_buffer.Record_count = uint32(c_job_buffer.record_count)
go_job_buffer.Job_list = make([]Job_info, c_job_buffer.record_count, c_job_buffer.record_count)
for i := uint32(0); i < go_job_buffer.Record_count; i++ {
job := C.job_from_list(c_job_buffer, C.int(i))
go_job := Job_info_convert_c_to_go(job)
go_job_buffer.Job_list[i] = go_job
}
C.slurm_free_job_info_msg(c_job_buffer)
return go_job_buffer
}
func Get_job(id uint32) Job_info_msg {
var go_job_buffer Job_info_msg
c_job_buffer := C.get_single_job_info(C.uint32_t(id))
if c_job_buffer == nil {
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Job_list = nil
return go_job_buffer
}
go_job_buffer.Last_update = int64(c_job_buffer.last_update)
go_job_buffer.Record_count = uint32(c_job_buffer.record_count)
go_job_buffer.Job_list = make([]Job_info, c_job_buffer.record_count, c_job_buffer.record_count)
for i := uint32(0); i < go_job_buffer.Record_count; i++ {
job := C.job_from_list(c_job_buffer, C.int(i))
go_job := Job_info_convert_c_to_go(job)
go_job_buffer.Job_list[i] = go_job
}
C.slurm_free_job_info_msg(c_job_buffer)
return go_job_buffer
}

View File

@ -0,0 +1,367 @@
package slurm
/*
#cgo LDFLAGS: -lslurm
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
struct submit_response_msg *submit_job(struct job_descriptor *desc)
{
struct submit_response_msg *resp_msg;
if (slurm_submit_batch_job(desc,
&resp_msg)) {
return NULL;
}
return resp_msg;
}
void free_submit_response_msg(struct submit_response_msg *msg)
{
slurm_free_submit_response_response_msg(msg);
}
*/
import "C"
import (
"fmt"
"unsafe"
)
func Submit_job(go_struct *Job_descriptor) Submit_response_msg {
var c_struct C.struct_job_descriptor
C.slurm_init_job_desc_msg(&c_struct)
if go_struct.Account != "" {
account_s := C.CString(go_struct.Account)
defer C.free(unsafe.Pointer(account_s))
c_struct.account = account_s
}
if go_struct.Acctg_freq != "" {
acctg_freq_s := C.CString(go_struct.Acctg_freq)
defer C.free(unsafe.Pointer(acctg_freq_s))
c_struct.acctg_freq = acctg_freq_s
}
if go_struct.Alloc_node != "" {
alloc_node_s := C.CString(go_struct.Alloc_node)
defer C.free(unsafe.Pointer(alloc_node_s))
c_struct.alloc_node = alloc_node_s
}
if go_struct.Alloc_resp_port != 0 {
c_struct.alloc_resp_port = C.uint16_t(go_struct.Alloc_resp_port)
}
if go_struct.Alloc_sid != 0 {
c_struct.alloc_sid = C.uint32_t(go_struct.Alloc_sid)
}
if len(go_struct.Argv) > 0 {
c_struct.argc = C.uint32_t(len(go_struct.Argv))
cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Argv)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(go_struct.Argv); i++ {
a[i] = C.CString(go_struct.Argv[i])
}
c_struct.argv = (**C.char)(cArray)
fmt.Printf("test\n")
}
if go_struct.Array_inx != "" {
array_inx_s := C.CString(go_struct.Array_inx)
defer C.free(unsafe.Pointer(array_inx_s))
c_struct.array_inx = array_inx_s
}
if go_struct.Begin_time != 0 {
c_struct.begin_time = C.int64_t(go_struct.Begin_time)
}
if go_struct.Ckpt_interval != 0 {
c_struct.ckpt_interval = C.uint16_t(go_struct.Ckpt_interval)
}
if go_struct.Ckpt_dir != "" {
ckpt_dir_s := C.CString(go_struct.Ckpt_dir)
defer C.free(unsafe.Pointer(ckpt_dir_s))
c_struct.ckpt_dir = ckpt_dir_s
}
if go_struct.Comment != "" {
comment_s := C.CString(go_struct.Comment)
defer C.free(unsafe.Pointer(comment_s))
c_struct.comment = comment_s
}
if go_struct.Contiguous != 0 {
c_struct.contiguous = C.uint16_t(go_struct.Contiguous)
}
if go_struct.Cpu_bind != "" {
cpu_bind_s := C.CString(go_struct.Cpu_bind)
defer C.free(unsafe.Pointer(cpu_bind_s))
c_struct.cpu_bind = cpu_bind_s
}
if go_struct.Cpu_bind_type != 0 {
c_struct.cpu_bind_type = C.uint16_t(go_struct.Cpu_bind_type)
}
if go_struct.Dependency != "" {
dependency_s := C.CString(go_struct.Dependency)
defer C.free(unsafe.Pointer(dependency_s))
c_struct.dependency = dependency_s
}
if go_struct.End_time != 0 {
c_struct.end_time = C.int64_t(go_struct.End_time)
}
if len(go_struct.Environment) > 0 {
c_struct.env_size = C.uint32_t(len(go_struct.Environment))
cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Environment)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(go_struct.Environment); i++ {
a[i] = C.CString(go_struct.Environment[i])
defer C.free(unsafe.Pointer(a[i]))
}
c_struct.environment = (**C.char)(cArray)
} else {
c_struct.env_size = 1
cArray := C.malloc(C.size_t(C.size_t(1) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
a[0] = C.CString("SLURM_GO_JOB=TRUE")
defer C.free(unsafe.Pointer(a[0]))
c_struct.environment = (**C.char)(cArray)
}
if go_struct.Exc_nodes != "" {
exc_nodes_s := C.CString(go_struct.Exc_nodes)
defer C.free(unsafe.Pointer(exc_nodes_s))
c_struct.exc_nodes = exc_nodes_s
}
if go_struct.Features != "" {
features_s := C.CString(go_struct.Features)
defer C.free(unsafe.Pointer(features_s))
c_struct.features = features_s
}
if go_struct.Group_id != 0 {
c_struct.group_id = C.uint32_t(go_struct.Group_id)
}
if go_struct.Immediate != 0 {
c_struct.immediate = C.uint16_t(go_struct.Immediate)
}
if go_struct.Job_id != 0 {
c_struct.job_id = C.uint32_t(go_struct.Job_id)
}
if go_struct.Kill_on_node_fail != 0 {
c_struct.kill_on_node_fail = C.uint16_t(go_struct.Kill_on_node_fail)
}
if go_struct.Licenses != "" {
licenses_s := C.CString(go_struct.Licenses)
defer C.free(unsafe.Pointer(licenses_s))
c_struct.licenses = licenses_s
}
if go_struct.Mail_type != 0 {
c_struct.mail_type = C.uint16_t(go_struct.Mail_type)
}
if go_struct.Mail_user != "" {
mail_user_s := C.CString(go_struct.Mail_user)
defer C.free(unsafe.Pointer(mail_user_s))
c_struct.mail_user = mail_user_s
}
if go_struct.Mem_bind != "" {
mem_bind_s := C.CString(go_struct.Mem_bind)
defer C.free(unsafe.Pointer(mem_bind_s))
c_struct.mem_bind = mem_bind_s
}
if go_struct.Mem_bind_type != 0 {
c_struct.mem_bind_type = C.uint16_t(go_struct.Mem_bind_type)
}
if go_struct.Name != "" {
name_s := C.CString(go_struct.Name)
defer C.free(unsafe.Pointer(name_s))
c_struct.name = name_s
}
if go_struct.Network != "" {
network_s := C.CString(go_struct.Network)
defer C.free(unsafe.Pointer(network_s))
c_struct.network = network_s
}
if go_struct.Nice != 0 {
c_struct.nice = C.uint16_t(go_struct.Nice)
}
if go_struct.Num_tasks != 0 {
c_struct.num_tasks = C.uint32_t(go_struct.Num_tasks)
}
if go_struct.Open_mode != 0 {
c_struct.open_mode = C.uint8_t(go_struct.Open_mode)
}
if go_struct.Other_port != 0 {
c_struct.other_port = C.uint16_t(go_struct.Other_port)
}
if go_struct.Overcommit != 0 {
c_struct.overcommit = C.uint8_t(go_struct.Overcommit)
}
if go_struct.Partition != "" {
partition_s := C.CString(go_struct.Partition)
defer C.free(unsafe.Pointer(partition_s))
c_struct.partition = partition_s
}
if go_struct.Plane_size != 0 {
c_struct.plane_size = C.uint16_t(go_struct.Plane_size)
}
if go_struct.Priority != 0 {
c_struct.priority = C.uint32_t(go_struct.Priority)
}
if go_struct.Profile != 0 {
c_struct.profile = C.uint32_t(go_struct.Profile)
}
if go_struct.Qos != "" {
qos_s := C.CString(go_struct.Qos)
defer C.free(unsafe.Pointer(qos_s))
c_struct.qos = qos_s
}
if go_struct.Reboot != 0 {
c_struct.reboot = C.uint16_t(go_struct.Reboot)
}
if go_struct.Resp_host != "" {
resp_host_s := C.CString(go_struct.Resp_host)
defer C.free(unsafe.Pointer(resp_host_s))
c_struct.resp_host = resp_host_s
}
if go_struct.Req_nodes != "" {
req_nodes_s := C.CString(go_struct.Req_nodes)
defer C.free(unsafe.Pointer(req_nodes_s))
c_struct.req_nodes = req_nodes_s
}
if go_struct.Requeue != 0 {
c_struct.requeue = C.uint16_t(go_struct.Requeue)
}
if go_struct.Reservation != "" {
reservation_s := C.CString(go_struct.Reservation)
defer C.free(unsafe.Pointer(reservation_s))
c_struct.reservation = reservation_s
}
if go_struct.Script != "" {
script_s := C.CString(go_struct.Script)
defer C.free(unsafe.Pointer(script_s))
c_struct.script = script_s
}
if go_struct.Shared != 0 {
c_struct.shared = C.uint16_t(go_struct.Shared)
}
if go_struct.Spank_job_env_size != 0 {
c_struct.spank_job_env_size = C.uint32_t(go_struct.Spank_job_env_size)
}
if go_struct.Task_dist != 0 {
c_struct.task_dist = C.uint16_t(go_struct.Task_dist)
}
if go_struct.Time_limit != 0 {
c_struct.time_limit = C.uint32_t(go_struct.Time_limit)
}
if go_struct.Time_min != 0 {
c_struct.time_min = C.uint32_t(go_struct.Time_min)
}
//if go_struct.User_id != 0 {
// c_struct.user_id = C.uint32_t(go_struct.User_id)
//}
c_struct.user_id = C.uint32_t(go_struct.User_id)
if go_struct.Wait_all_nodes != 0 {
c_struct.wait_all_nodes = C.uint16_t(go_struct.Wait_all_nodes)
}
if go_struct.Warn_signal != 0 {
c_struct.warn_signal = C.uint16_t(go_struct.Warn_signal)
}
if go_struct.Warn_time != 0 {
c_struct.warn_time = C.uint16_t(go_struct.Warn_time)
}
if go_struct.Work_dir != "" {
work_dir_s := C.CString(go_struct.Work_dir)
defer C.free(unsafe.Pointer(work_dir_s))
c_struct.work_dir = work_dir_s
}
if go_struct.Cpus_per_task != 0 {
c_struct.cpus_per_task = C.uint16_t(go_struct.Cpus_per_task)
}
if go_struct.Min_cpus != 0 {
c_struct.min_cpus = C.uint32_t(go_struct.Min_cpus)
}
if go_struct.Max_cpus != 0 {
c_struct.max_cpus = C.uint32_t(go_struct.Max_cpus)
}
if go_struct.Min_nodes != 0 {
c_struct.min_nodes = C.uint32_t(go_struct.Min_nodes)
}
if go_struct.Max_nodes != 0 {
c_struct.max_nodes = C.uint32_t(go_struct.Max_nodes)
}
if go_struct.Boards_per_node != 0 {
c_struct.boards_per_node = C.uint16_t(go_struct.Boards_per_node)
}
if go_struct.Sockets_per_board != 0 {
c_struct.sockets_per_board = C.uint16_t(go_struct.Sockets_per_board)
}
if go_struct.Sockets_per_node != 0 {
c_struct.sockets_per_node = C.uint16_t(go_struct.Sockets_per_node)
}
if go_struct.Cores_per_socket != 0 {
c_struct.cores_per_socket = C.uint16_t(go_struct.Cores_per_socket)
}
if go_struct.Threads_per_core != 0 {
c_struct.threads_per_core = C.uint16_t(go_struct.Threads_per_core)
}
if go_struct.Ntasks_per_node != 0 {
c_struct.ntasks_per_node = C.uint16_t(go_struct.Ntasks_per_node)
}
if go_struct.Ntasks_per_socket != 0 {
c_struct.ntasks_per_socket = C.uint16_t(go_struct.Ntasks_per_socket)
}
if go_struct.Ntasks_per_core != 0 {
c_struct.ntasks_per_core = C.uint16_t(go_struct.Ntasks_per_core)
}
if go_struct.Ntasks_per_board != 0 {
c_struct.ntasks_per_board = C.uint16_t(go_struct.Ntasks_per_board)
}
if go_struct.Pn_min_cpus != 0 {
c_struct.pn_min_cpus = C.uint16_t(go_struct.Pn_min_cpus)
}
if go_struct.Pn_min_memory != 0 {
c_struct.pn_min_memory = C.uint32_t(go_struct.Pn_min_memory)
}
if go_struct.Pn_min_tmp_disk != 0 {
c_struct.pn_min_tmp_disk = C.uint32_t(go_struct.Pn_min_tmp_disk)
}
if go_struct.Req_switch != 0 {
c_struct.req_switch = C.uint32_t(go_struct.Req_switch)
}
if go_struct.Std_err != "" {
std_err_s := C.CString(go_struct.Std_err)
defer C.free(unsafe.Pointer(std_err_s))
c_struct.std_err = std_err_s
}
if go_struct.Std_in != "" {
std_in_s := C.CString(go_struct.Std_in)
defer C.free(unsafe.Pointer(std_in_s))
c_struct.std_in = std_in_s
}
if go_struct.Std_out != "" {
std_out_s := C.CString(go_struct.Std_out)
defer C.free(unsafe.Pointer(std_out_s))
c_struct.std_out = std_out_s
}
if go_struct.Wait4switch != 0 {
c_struct.wait4switch = C.uint32_t(go_struct.Wait4switch)
}
if go_struct.Wckey != "" {
wckey_s := C.CString(go_struct.Wckey)
defer C.free(unsafe.Pointer(wckey_s))
c_struct.wckey = wckey_s
}
c_msg := C.submit_job(&c_struct)
defer C.free_submit_response_msg(c_msg)
if c_msg == nil {
go_msg := Submit_response_msg{}
go_msg.Job_id = 1<<31 - 1
go_msg.Error_code = uint32(C.slurm_get_errno())
return go_msg
}
go_msg := submit_response_msg_convert_c_to_go(c_msg)
return go_msg
}

View File

@ -0,0 +1,254 @@
package slurm
type Job_descriptor struct {
Account string /* charge to specified account */
Acctg_freq string /* accounting polling intervals (seconds) */
Alloc_node string /* node making resource allocation request
* NOTE: Normally set by slurm_submit* or
* slurm_allocate* function */
Alloc_resp_port uint16 /* port to send allocation confirmation to */
Alloc_sid uint32 /* local sid making resource allocation request
* NOTE: Normally set by slurm_submit* or
* slurm_allocate* function
* NOTE: Also used for update flags, see
* ALLOC_SID_* flags */
Argc uint32 /* number of arguments to the script */
Argv []string /* arguments to the script */
Array_inx string /* job array index values */
//void *array_bitmap; /* NOTE: Set by slurmctld */
Begin_time int64 /* delay initiation until this time */
Ckpt_interval uint16 /* periodically checkpoint this job */
Ckpt_dir string /* directory to store checkpoint images */
Comment string /* arbitrary comment (used by Moab scheduler) */
Contiguous uint16 /* 1 if job requires contiguous nodes,
* 0 otherwise,default=0 */
Cpu_bind string /* binding map for map/mask_cpu */
Cpu_bind_type uint16 /* see cpu_bind_type_t */
Dependency string /* synchronize job execution with other jobs */
End_time int64 /* time by which job must complete, used for
* job update only now, possible deadline
* scheduling in the future */
Environment []string /* environment variables to set for job,
* name=value pairs, one per line */
Env_size uint32 /* element count in environment */
Exc_nodes string /* comma separated list of nodes excluded
* from job's allocation, default NONE */
Features string /* comma separated list of required features,
* default NONE */
Gres string /* comma separated list of required generic
* resources, default NONE */
Group_id uint32 /* group to assume, if run as root. */
Immediate uint16 /* 1 if allocate to run or fail immediately,
* 0 if to be queued awaiting resources */
Job_id uint32 /* job ID, default set by SLURM */
Kill_on_node_fail uint16 /* 1 if node failure to kill job,
* 0 otherwise,default=1 */
Licenses string /* licenses required by the job */
Mail_type uint16 /* see MAIL_JOB_ definitions above */
Mail_user string /* user to receive notification */
Mem_bind string /* binding map for map/mask_cpu */
Mem_bind_type uint16 /* see mem_bind_type_t */
Name string /* name of the job, default "" */
Network string /* network use spec */
Nice uint16 /* requested priority change,
* NICE_OFFSET == no change */
Num_tasks uint32 /* number of tasks to be started,
* for batch only */
Open_mode uint8 /* out/err open mode truncate or append,
* see OPEN_MODE_* */
Other_port uint16 /* port to send various notification msg to */
Overcommit uint8 /* over subscribe resources, for batch only */
Partition string /* name of requested partition,
* default in SLURM config */
Plane_size uint16 /* plane size when task_dist =
SLURM_DIST_PLANE */
Priority uint32 /* relative priority of the job,
* explicitly set only for user root,
* 0 == held (don't initiate) */
Profile uint32 /* Level of acct_gather_profile {all | none} */
Qos string /* Quality of Service */
Resp_host string /* NOTE: Set by slurmctld */
Req_nodes string /* comma separated list of required nodes
* default NONE */
Requeue uint16 /* enable or disable job requeue option */
Reservation string /* name of reservation to use */
Script string /* the actual job script, default NONE */
Shared uint16 /* 1 if job can share nodes with other jobs,
* 0 if job needs exclusive access to the node,
* or NO_VAL to accept the system default.
* SHARED_FORCE to eliminate user control. */
//char **spank_job_env; environment variables for job prolog/epilog
// * scripts as set by SPANK plugins
Spank_job_env_size uint32 /* element count in spank_env */
Task_dist uint16 /* see enum task_dist_state */
Time_limit uint32 /* maximum run time in minutes, default is
* partition limit */
Time_min uint32 /* minimum run time in minutes, default is
* time_limit */
User_id uint32 /* set only if different from current UID,
* can only be explicitly set by user root */
Wait_all_nodes uint16 /* 0 to start job immediately after allocation
* 1 to start job after all nodes booted
* or NO_VAL to use system default */
Warn_signal uint16 /* signal to send when approaching end time */
Warn_time uint16 /* time before end to send signal (seconds) */
Work_dir string /* pathname of working directory */
/* job constraints: */
Cpus_per_task uint16 /* number of processors required for
* each task */
Min_cpus uint32 /* minimum number of processors required,
* default=0 */
Max_cpus uint32 /* maximum number of processors required,
* default=0 */
Min_nodes uint32 /* minimum number of nodes required by job,
* default=0 */
Max_nodes uint32 /* maximum number of nodes usable by job,
* default=0 */
Boards_per_node uint16 /* boards per node required by job */
Sockets_per_board uint16 /* sockets per board required by job */
Sockets_per_node uint16 /* sockets per node required by job */
Cores_per_socket uint16 /* cores per socket required by job */
Threads_per_core uint16 /* threads per core required by job */
Ntasks_per_node uint16 /* number of tasks to invoke on each node */
Ntasks_per_socket uint16 /* number of tasks to invoke on
* each socket */
Ntasks_per_core uint16 /* number of tasks to invoke on each core */
Ntasks_per_board uint16 /* number of tasks to invoke on each board */
Pn_min_cpus uint16 /* minimum # CPUs per node, default=0 */
Pn_min_memory uint32 /* minimum real memory per node OR
* real memory per CPU | MEM_PER_CPU,
* default=0 (no limit) */
Pn_min_tmp_disk uint32 /* minimum tmp disk per node,
* default=0 */
/*
* The following parameters are only meaningful on a Blue Gene
* system at present. Some will be of value on other system. Don't remove these
* they are needed for LCRM and others that can't talk to the opaque data type
* select_jobinfo.
*/
//uint16_t geometry[HIGHEST_DIMENSIONS]; node count in various
// * dimensions, e.g. X, Y, and Z
//uint16_t conn_type[HIGHEST_DIMENSIONS]; see enum connection_type
Reboot uint16 /* force node reboot before startup */
Rotate uint16 /* permit geometry rotation if set */
//char *blrtsimage; /* BlrtsImage for block */
//char *linuximage; /* LinuxImage for block */
//char *mloaderimage; /* MloaderImage for block */
//char *ramdiskimage; /* RamDiskImage for block */
/* End of Blue Gene specific values */
Req_switch uint32 /* Minimum number of switches */
//dynamic_plugin_data_t *select_jobinfo; /* opaque data type,
// * SLURM internal use only */
Std_err string /* pathname of stderr */
Std_in string /* pathname of stdin */
Std_out string /* pathname of stdout */
Wait4switch uint32 /* Maximum time to wait for minimum switches */
Wckey string /* wckey for job */
}
type Submit_response_msg struct {
Job_id uint32
Step_id uint32
Error_code uint32
}
type Job_info struct {
account string /* charge to specified account */
alloc_node string /* local node making resource alloc */
alloc_sid uint32 /* local sid making resource alloc */
array_job_id uint32 /* job_id of a job array or 0 if N/A */
array_task_id uint16 /* task_id of a job array */
assoc_id uint32 /* association id for job */
batch_flag uint16 /* 1 if batch: queued job with script */
batch_host string /* name of host running batch script */
batch_script string /* contents of batch script */
command string /* command to be executed, built from submitted
* job's argv and NULL for salloc command */
comment string /* arbitrary comment (used by Moab scheduler) */
contiguous uint16 /* 1 if job requires contiguous nodes */
cpus_per_task uint16 /* number of processors required for
* each task */
dependency string /* synchronize job execution with other jobs */
derived_ec uint32 /* highest exit code of all job steps */
eligible_time int64 /* time job is eligible for running */
end_time int64 /* time of termination, actual or expected */
exc_nodes string /* comma separated list of excluded nodes */
exc_node_inx int32 /* excluded list index pairs into node_table:
* start_range_1, end_range_1,
* start_range_2, .., -1 */
exit_code uint32 /* exit code for job (status from wait call) */
features string /* comma separated list of required features */
gres string /* comma separated list of generic resources */
group_id uint32 /* group job sumitted as */
job_id uint32 /* job ID */
job_state uint16 /* state of the job, see enum job_states */
licenses string /* licenses required by the job */
max_cpus uint32 /* maximum number of cpus usable by job */
max_nodes uint32 /* maximum number of nodes usable by job */
boards_per_node uint16 /* boards per node required by job */
sockets_per_board uint16 /* sockets per board required by job */
sockets_per_node uint16 /* sockets per node required by job */
cores_per_socket uint16 /* cores per socket required by job */
threads_per_core uint16 /* threads per core required by job */
name string /* name of the job */
network string /* network specification */
nodes string /* list of nodes allocated to job */
nice uint16 /* requested priority change */
node_inx int32 /* list index pairs into node_table for *nodes:
* start_range_1, end_range_1,
* start_range_2, .., -1 */
ntasks_per_core uint16 /* number of tasks to invoke on each core */
ntasks_per_node uint16 /* number of tasks to invoke on each node */
ntasks_per_socket uint16 /* number of tasks to invoke on each socket*/
ntasks_per_board uint16 /* number of tasks to invoke on each board */
num_nodes uint32 /* minimum number of nodes required by job */
num_cpus uint32 /* minimum number of cpus required by job */
partition string /* name of assigned partition */
pn_min_memory uint32 /* minimum real memory per node, default=0 */
pn_min_cpus uint16 /* minimum # CPUs per node, default=0 */
pn_min_tmp_disk uint32 /* minimum tmp disk per node, default=0 */
pre_sus_time int64 /* time job ran prior to last suspend */
priority uint32 /* relative priority of the job,
* 0=held, 1=required nodes DOWN/DRAINED */
profile uint32 /* Level of acct_gather_profile {all | none} */
qos string /* Quality of Service */
req_nodes string /* comma separated list of required nodes */
req_node_inx int32 /* required list index pairs into node_table:
* start_range_1, end_range_1,
* start_range_2, .., -1 */
req_switch uint32 /* Minimum number of switches */
requeue uint16 /* enable or disable job requeue option */
resize_time int64 /* time of latest size change */
restart_cnt uint16 /* count of job restarts */
resv_name string /* reservation name */
/*dynamic_plugin_data_t *select_jobinfo;*/ /* opaque data type,
* process using
* slurm_get_select_jobinfo()
*/
/*job_resources_t *job_resrcs;*/ /* opaque data type, job resources */
shared uint16 /* 1 if job can share nodes with other jobs */
show_flags uint16 /* conveys level of details requested */
start_time int64 /* time execution begins, actual or expected */
state_desc string /* optional details for state_reason */
state_reason uint16 /* reason job still pending or failed, see
* slurm.h:enum job_state_reason */
submit_time int64 /* time of job submission */
suspend_time int64 /* time job last suspended or resumed */
time_limit uint32 /* maximum run time in minutes or INFINITE */
time_min uint32 /* minimum run time in minutes or INFINITE */
user_id uint32 /* user the job runs as */
preempt_time int64 /* preemption signal time */
wait4switch uint32 /* Maximum time to wait for minimum switches */
wckey string /* wckey for job */
work_dir string /* pathname of working directory */
}
type Job_info_msg struct {
Last_update int64
Record_count uint32
Job_list []Job_info
}