Merge remote-tracking branch 'origin/2.0' into 2.0

This commit is contained in:
qiwang 2023-06-12 20:32:26 +08:00
commit ce44f191c5
5 changed files with 106 additions and 29 deletions

View File

@ -65,6 +65,7 @@ func submitJob(infoList *pcmcoreclient.InfoListResp, submitJobLogic *CreateTrain
for index, _ := range infoList.AiInfoList {
if infoList.AiInfoList[index].Status == "Saved" {
submitReq := modelarts.CreateTrainingJobReq{
ModelArtsType: "cn-north-4.myhuawei",
Kind: "job",
ProjectId: "0a62ffb0d48026c12fbfc011b8d23f0b",
Metadata: &modelarts.MetadataS{

View File

@ -4,7 +4,7 @@ NacosConfig:
ServerConfigs:
# - IpAddr: 127.0.0.1
# Port: 8848
- IpAddr: nacos.jcce.dev
- IpAddr: 10.101.15.7
Port: 8848
ClientConfig:
NamespaceId: test

View File

@ -3,10 +3,15 @@ package logic
import (
"PCM/adaptor/PCM-HPC/PCM-AC/rpc/hpcAC"
"PCM/adaptor/PCM-HPC/PCM-AC/rpc/internal/svc"
"PCM/adaptor/PCM-HPC/PCM-AC/rpc/internal/util"
"context"
"github.com/bitly/go-simplejson"
"github.com/zeromicro/go-zero/core/logx"
"io"
"log"
"net/http"
"net/url"
"strconv"
"time"
)
type ListHistoryJobLogic struct {
@ -26,31 +31,99 @@ func NewListHistoryJobLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Li
// ListHistoryJob list all history jobs
func (l *ListHistoryJobLogic) ListHistoryJob(in *hpcAC.ListHistoryJobReq) (*hpcAC.ListHistoryJobResp, error) {
var resp hpcAC.ListHistoryJobResp
historyJobUrl := "hpc/openapi/v2/historyjobs?"
getTokenLogic := NewGetACTokenLogic(l.ctx, l.svcCtx)
tokenResp, _ := getTokenLogic.GetACToken(&hpcAC.ACTokenReq{})
token := tokenResp.GetData().Token
url := "hpc/openapi/v2/historyjobs"
resp := hpcAC.ListHistoryJobResp{}
params := map[string]string{
"strClusterNameList": in.StrClusterNameList,
"startTime": in.StartTime,
"endTime": in.EndTime,
"timeType": in.TimeType,
"queue": in.Queue,
"appType": in.AppType,
"sort": in.Sort,
"orderBy": in.OrderBy,
"jobId": in.JobId,
"jobState": in.JobState,
"hostName": in.HostName,
"strUser": in.StrUser,
"jobName": in.JobName,
"start": strconv.Itoa(int(in.Start)),
"limit": strconv.Itoa(int(in.Limit)),
"isQueryByQueueTime": in.IsQueryByQueueTime,
getACClusterIdLogic := NewGetACClusterIdLogic(l.ctx, l.svcCtx)
clusterIdResp, _ := getACClusterIdLogic.GetACClusterId(&hpcAC.ACClusterReq{Token: token})
clusterId := clusterIdResp.GetData().Id
c := http.Client{Timeout: time.Duration(3) * time.Second}
params := url.Values{}
params.Add("strClusterIDList", strconv.FormatInt(clusterId, 10))
params.Add("startTime", in.StartTime)
params.Add("endTime", in.EndTime)
params.Add("timeType", in.TimeType)
params.Add("start", string(in.Start))
params.Add("limit", string(in.Limit))
params.Add("isQueryByQueueTime", in.IsQueryByQueueTime)
reqUrl, err := http.NewRequest("GET", "https://api01.hpccube.com:65106/"+historyJobUrl+params.Encode(), nil)
if err != nil {
log.Fatal(err)
}
_, _ = util.Get(token, l.svcCtx.Config.ClusterUrl, url, nil, &params, &resp)
reqUrl.Header.Add("token", token)
respUrl, err := c.Do(reqUrl)
if err != nil {
log.Fatal(err)
}
body, err := io.ReadAll(respUrl.Body)
jsonResult, err := simplejson.NewJson(body)
jsonData := jsonResult.Get("data")
if jsonData.Get("total").MustInt() == 0 {
resp.Code = "200"
resp.Msg = "success"
return &resp, nil
}
historyJobList := jsonResult.Get("data").Get("list")
rows, err := historyJobList.Array()
if err != nil {
log.Fatal(err)
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
}
}(respUrl.Body)
var Jobs []*hpcAC.HistoryJobList
for index := range rows {
jobShuguang := historyJobList.GetIndex(index)
var job hpcAC.HistoryJobList
job.AppType = jobShuguang.Get("appType").MustString()
job.JobId = jobShuguang.Get("jobId").MustString()
job.JobName = jobShuguang.Get("jobName").MustString()
job.JobStartTime = jobShuguang.Get("jobStartTime").MustString()
job.Queue = jobShuguang.Get("queue").MustString()
job.JobState = jobShuguang.Get("jobState").MustString()
job.JobEndTime = jobShuguang.Get("jobEndTime").MustString()
job.JobExecHost = jobShuguang.Get("jobExecHost").MustString()
job.JobWalltimeUsed = jobShuguang.Get("jobWalltimeUsed").MustString()
job.UserName = jobShuguang.Get("userName").MustString()
job.JobExitStatus = int32(jobShuguang.Get("jobExitStatus").MustInt())
job.AcctTime = jobShuguang.Get("acctTime").MustString()
job.JobProcNum = int32(jobShuguang.Get("jobProcNum").MustInt())
job.Nodect = int32(jobShuguang.Get("nodect").MustInt())
job.Workdir = jobShuguang.Get("workdir").MustString()
startTime, err := time.Parse(l.svcCtx.Config.ShuguangConf.Layout, jobShuguang.Get("jobStartTime").MustString())
if err == nil {
job.JobStartTime = startTime.String()
}
Jobs = append(Jobs, &job)
}
if jsonResult.Get("code").MustInt() == 0 {
resp.Code = "200"
}
resp.Msg = jsonResult.Get("msg").MustString()
resp.Data.List = Jobs
return &resp, nil
}

View File

@ -68,8 +68,10 @@ func submitJob(infoList *pcmcoreclient.InfoListResp, submitJobLogic *SubmitJobLo
submitReq := hpcTH.SubmitJobReq{
Account: infoList.HpcInfoList[index].Account,
Name: infoList.HpcInfoList[index].Name,
WorkDir: "/root",
Script: infoList.HpcInfoList[index].CmdScript,
UserId: 123,
UserId: 0,
MinNodes: 1,
}
jobResult, _ := submitJobLogic.SubmitJob(&submitReq)
// 任务提交成功

View File

@ -13,4 +13,5 @@ spec:
protocol: TCP
port: 2001
targetPort: 2001
type: ClusterIP
nodePort: 31617
type: NodePort