diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..e570d62e --- /dev/null +++ b/Makefile @@ -0,0 +1,38 @@ +.PHONY: start build + +NOW = $(shell date -u '+%Y%m%d%I%M%S') + +RELEASE_VERSION = 5.1.0 + +APP = n9e +SERVER_BIN = ${APP} +# RELEASE_ROOT = release +# RELEASE_SERVER = release/${APP} +# GIT_COUNT = $(shell git rev-list --all --count) +# GIT_HASH = $(shell git rev-parse --short HEAD) +# RELEASE_TAG = $(RELEASE_VERSION).$(GIT_COUNT).$(GIT_HASH) + +all: build + +build: + @go build -ldflags "-w -s -X main.VERSION=$(RELEASE_VERSION)" -o $(SERVER_BIN) ./src + +# start: +# @go run -ldflags "-X main.VERSION=$(RELEASE_TAG)" ./cmd/${APP}/main.go web -c ./configs/config.toml -m ./configs/model.conf --menu ./configs/menu.yaml + +# swagger: +# @swag init --parseDependency --generalInfo ./cmd/${APP}/main.go --output ./internal/app/swagger + +# wire: +# @wire gen ./internal/app + +# test: +# cd ./internal/app/test && go test -v + +# clean: +# rm -rf data release $(SERVER_BIN) internal/app/test/data cmd/${APP}/data + +# pack: build +# rm -rf $(RELEASE_ROOT) && mkdir -p $(RELEASE_SERVER) +# cp -r $(SERVER_BIN) configs $(RELEASE_SERVER) +# cd $(RELEASE_ROOT) && tar -cvf $(APP).tar ${APP} && rm -rf ${APP} diff --git a/README.md b/README.md index 9a7f568d..de5ad770 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,18 @@ -## 基本信息 - -- 官网:[n9e.didiyun.com](https://n9e.didiyun.com/) 右上角切换版本 -- 招聘:前后端都要,base北京,薪资open,可将简历发至邮箱 `echo cWlueWVuaW5nQGRpZGlnbG9iYWwuY29t | base64 -d` 一起来做开源 - ## 大本营 -微信公众号:`__n9e__`(夜莺监控) +微信公号:`__n9e__`(夜莺监控) 知识星球:夜莺开源社区 +钉钉交流群: + + + +# todo + +- [x] deploy nightingale in docker +- [x] export /metrics endpoint +- [ ] notify.py support feishu +- [ ] notify.py support sms +- [ ] notify.py support voice + + diff --git a/alert/alert.go b/alert/alert.go deleted file mode 100644 index 838835aa..00000000 --- a/alert/alert.go +++ /dev/null @@ -1,9 +0,0 @@ -package alert - -import ( - "context" -) - -func Start(ctx context.Context) { - go popEvent() -} diff --git a/alert/consume.go b/alert/consume.go deleted file mode 100644 index 1c10707e..00000000 --- a/alert/consume.go +++ /dev/null @@ -1,325 +0,0 @@ -package alert - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "sort" - "strconv" - "strings" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/judge" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/net/httplib" - "github.com/toolkits/pkg/sys" -) - -func popEvent() { - sema := semaphore.NewSemaphore(config.Config.Alert.NotifyScriptConcurrency) - duration := time.Duration(100) * time.Millisecond - for { - events := judge.EventQueue.PopBackBy(200) - if len(events) < 1 { - time.Sleep(duration) - continue - } - consume(events, sema) - } -} - -func consume(events []interface{}, sema *semaphore.Semaphore) { - for i := range events { - if events[i] == nil { - continue - } - - event := events[i].(*models.AlertEvent) - - alertRule, exists := cache.AlertRules.Get(event.RuleId) - if !exists { - logger.Errorf("event_consume: alert rule not found, event:%+v", event) - continue - } - logger.Debugf("[event_consume_success][type:%v][event:%+v]", event.IsPromePull, event) - if isNoneffective(event, alertRule) { - // 告警规则非生效时段 - continue - } - - event.RuleName = alertRule.Name - event.RuleNote = alertRule.Note - event.NotifyChannels = alertRule.NotifyChannels - classpaths := cache.ResClasspath.GetValues(event.ResIdent) - sort.Strings(classpaths) - event.ResClasspaths = strings.Join(classpaths, " ") - enrichTag(event, alertRule) - - if isEventMute(event) && event.IsAlert() { - // 被屏蔽的事件 - event.MarkMuted() - - if config.Config.Alert.MutedAlertPersist { - persist(event) - } - - continue - } - - // 操作数据库 - persist(event) - - // 不管是告警还是恢复,都触发回调,接收端自己处理 - if alertRule.Callbacks != "" { - go callback(event, alertRule) - } - - uids := genNotifyUserIDs(alertRule) - if len(uids) == 0 { - logger.Warningf("event_consume: notify users not found, event_hash_id: %s, rule_id: %d, rule_name: %s, res_ident: %s", event.HashId, event.RuleId, event.RuleName, event.ResIdent) - continue - } - - users := cache.UserCache.GetByIds(uids) - if len(users) == 0 { - logger.Warningf("event_consume: notify users not found, event_hash_id: %s, rule_id: %d, rule_name: %s, res_ident: %s", event.HashId, event.RuleId, event.RuleName, event.ResIdent) - continue - } - - alertMsg := AlertMsg{ - Event: event, - Rule: alertRule, - Users: users, - } - - logger.Infof("event_consume: notify alert:%+v", alertMsg) - - sema.Acquire() - go func(alertMsg AlertMsg) { - defer sema.Release() - notify(alertMsg) - }(alertMsg) - } -} - -func genNotifyUserIDs(alertRule *models.AlertRule) []int64 { - uidMap := make(map[int64]struct{}) - - groupIds := strings.Fields(alertRule.NotifyGroups) - for _, groupId := range groupIds { - gid, err := strconv.ParseInt(groupId, 10, 64) - if err != nil { - logger.Warningf("event_consume: strconv groupid(%s) fail: %v", groupId, err) - continue - } - - um, exists := cache.UserGroupMember.Get(gid) - if !exists { - continue - } - - for uid := range um { - uidMap[uid] = struct{}{} - } - } - - userIds := strings.Fields(alertRule.NotifyUsers) - for _, userId := range userIds { - uid, err := strconv.ParseInt(userId, 10, 64) - if err != nil { - logger.Warningf("event_consume: strconv userid(%s) fail: %v", userId, err) - continue - } - - uidMap[uid] = struct{}{} - } - - uids := make([]int64, 0, len(uidMap)) - for uid := range uidMap { - uids = append(uids, uid) - } - - return uids -} - -// 如果是告警,就存库,如果是恢复,就从未恢复的告警表里删除 -func persist(event *models.AlertEvent) { - if event.IsRecov() { - logger.Debugf("[event.Recovery.db.DelByHashId]: delete recovery event:%+v", event) - err := event.DelByHashId() - if err != nil { - logger.Warningf("event_consume: delete recovery event err:%v, event:%+v", err, event) - } - } else { - err := event.Add() - if err != nil { - logger.Warningf("event_consume: insert alert event err:%v, event:%+v", err, event) - } - } - obj := ToHistoryAlertEvent(event) - err := obj.Add() - if err != nil { - logger.Warningf("event_consume: insert history alert event err:%v, event:%+v", err, event) - } -} - -type AlertMsg struct { - Event *models.AlertEvent `json:"event"` - Rule *models.AlertRule `json:"rule"` - Users []*models.User `json:"users"` -} - -func notify(alertMsg AlertMsg) { - //增加并发控制 - bs, err := json.Marshal(alertMsg) - if err != nil { - logger.Errorf("notify: marshal alert %+v err:%v", alertMsg, err) - } - - fpath := config.Config.Alert.NotifyScriptPath - cmd := exec.Command(fpath) - cmd.Stdin = bytes.NewReader(bs) - - // combine stdout and stderr - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - - err = cmd.Start() - if err != nil { - logger.Errorf("notify: run cmd err:%v", err) - return - } - - err, isTimeout := sys.WrapTimeout(cmd, time.Duration(10)*time.Second) - - if isTimeout { - if err == nil { - logger.Errorf("notify: timeout and killed process %s", fpath) - } - - if err != nil { - logger.Errorf("notify: kill process %s occur error %v", fpath, err) - } - - return - } - - if err != nil { - logger.Errorf("notify: exec script %s occur error: %v, output: %s", fpath, err, buf.String()) - return - } - - logger.Infof("notify: exec %s output: %s", fpath, buf.String()) -} - -func callback(event *models.AlertEvent, alertRule *models.AlertRule) { - urls := strings.Fields(alertRule.Callbacks) - for _, url := range urls { - if url == "" { - continue - } - - if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) { - url = "http://" + url - } - - resp, code, err := httplib.PostJSON(url, 5*time.Second, event, map[string]string{}) - if err != nil { - logger.Errorf("callback[%s] fail, callback content: %+v, resp: %s, err: %v, code:%d", url, event, string(resp), err, code) - } else { - logger.Infof("callback[%s] succ, callback content: %+v, resp: %s, code:%d", url, event, string(resp), code) - } - } -} - -func isNoneffective(event *models.AlertEvent, alertRule *models.AlertRule) bool { - // 生效时间过滤 - if alertRule.Status == models.ALERT_RULE_DISABLED { - logger.Debugf("event:%+v alert rule:%+v disable", event, alertRule) - return true - } - - tm := time.Unix(event.TriggerTime, 0) - triggerTime := tm.Format("15:04") - triggerWeek := strconv.Itoa(int(tm.Weekday())) - - if alertRule.EnableStime <= alertRule.EnableEtime { - if triggerTime < alertRule.EnableStime || triggerTime > alertRule.EnableEtime { - logger.Debugf("event:%+v alert rule:%+v triggerTime Noneffective", event, alertRule) - return true - } - } else { - if triggerTime < alertRule.EnableStime && triggerTime > alertRule.EnableEtime { - logger.Debugf("event:%+v alert rule:%+v triggerTime Noneffective", event, alertRule) - return true - } - } - - alertRule.EnableDaysOfWeek = strings.Replace(alertRule.EnableDaysOfWeek, "7", "0", 1) - - if !strings.Contains(alertRule.EnableDaysOfWeek, triggerWeek) { - logger.Debugf("event:%+v alert rule:%+v triggerWeek Noneffective", event, alertRule) - return true - } - - return false -} - -// 事件的tags有多种tags组成:ident作为一个tag,数据本身的tags(前期已经把res的tags也附到数据tags里了)、规则的tags -func enrichTag(event *models.AlertEvent, alertRule *models.AlertRule) { - if event.ResIdent != "" { - event.TagMap["ident"] = event.ResIdent - } - - if alertRule.AppendTags != "" { - appendTags := strings.Fields(alertRule.AppendTags) - for _, tag := range appendTags { - arr := strings.Split(tag, "=") - if len(arr) != 2 { - logger.Warningf("alertRule AppendTags:%+v illagel", alertRule.AppendTags) - continue - } - event.TagMap[arr[0]] = arr[1] - } - } - - var tagList []string - for key, value := range event.TagMap { - tagList = append(tagList, fmt.Sprintf("%s=%s", key, value)) - } - sort.Strings(tagList) - event.Tags = strings.Join(tagList, " ") -} - -func ToHistoryAlertEvent(ae *models.AlertEvent) *models.HistoryAlertEvent { - var obj models.HistoryAlertEvent - obj.RuleId = ae.RuleId - obj.RuleName = ae.RuleName - obj.RuleNote = ae.RuleNote - obj.HashId = ae.HashId - obj.IsPromePull = ae.IsPromePull - obj.ResClasspaths = ae.ResClasspaths - obj.ResIdent = ae.ResIdent - obj.Priority = ae.Priority - obj.Status = ae.Status - obj.IsRecovery = ae.IsRecovery - obj.HistoryPoints = ae.HistoryPoints - obj.TriggerTime = ae.TriggerTime - obj.Values = ae.Values - obj.NotifyChannels = ae.NotifyChannels - obj.NotifyGroups = ae.NotifyGroups - obj.NotifyUsers = ae.NotifyUsers - obj.RunbookUrl = ae.RunbookUrl - obj.ReadableExpression = ae.ReadableExpression - obj.Tags = ae.Tags - obj.NotifyGroupObjs = ae.NotifyGroupObjs - obj.NotifyUserObjs = ae.NotifyUserObjs - return &obj -} diff --git a/alert/mute.go b/alert/mute.go deleted file mode 100644 index 380940ce..00000000 --- a/alert/mute.go +++ /dev/null @@ -1,89 +0,0 @@ -package alert - -import ( - "strings" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/toolkits/pkg/logger" -) - -func isEventMute(event *models.AlertEvent) bool { - historyPoints, err := event.GetHistoryPoints() - if err != nil { - logger.Errorf("get event HistoryPoints:%+v failed, err: %v", event.HistoryPoints, err) - return false - } - - // 先去匹配一下metric为空的mute - if matchMute("", event.ResIdent, event.TagMap, event.ResClasspaths) { - return true - } - - // 如果是与条件,就会有多个metric,任一个匹配了屏蔽规则都算被屏蔽 - for i := 0; i < len(historyPoints); i++ { - if matchMute(historyPoints[i].Metric, event.ResIdent, event.TagMap, event.ResClasspaths) { - return true - } - } - - resAndTags, exists := cache.ResTags.Get(event.ResIdent) - if exists { - if event.TriggerTime > resAndTags.Resource.MuteBtime && event.TriggerTime < resAndTags.Resource.MuteEtime { - return true - } - } - - return false -} - -func matchMute(metric, ident string, tags map[string]string, classpaths string) bool { - filters, exists := cache.AlertMute.GetByKey(metric) - if !exists { - // 没有屏蔽规则跟这个事件相关 - return false - } - - // 只要有一个屏蔽规则命中,那这个事件就是被屏蔽了 - for _, filter := range filters { - if matchMuteOnce(filter, ident, tags, classpaths) { - return true - } - } - - return false -} - -func matchMuteOnce(filter cache.Filter, ident string, tags map[string]string, classpaths string) bool { - if len(filter.ClasspathPrefix) > 0 && !strings.HasPrefix(classpaths, filter.ClasspathPrefix) && !strings.Contains(classpaths, " "+filter.ClasspathPrefix) { - // 没配置分组屏蔽就不做后续比较 - // 比如事件的资源calsspath为“n9e.mon n9e.rdb ccp.web”,配置屏蔽为n9e.rdb - // 只要字符串前缀为n9e.rdb或者字符串包含“ n9e.rdb”即可判断所有alsspath中是否有前缀为n9e.rdb的 - // 只要有任一点不满足,那这个屏蔽规则也没有继续验证下去的必要 - return false - } - - if filter.ResReg != nil && !filter.ResReg.MatchString(ident) { - // 比如屏蔽规则配置的是:c3-ceph.* - // 当前事件的资源标识是:c4-ceph01.bj - // 只要有任一点不满足,那这个屏蔽规则也没有继续验证下去的必要 - return false - } - - // 每个mute中的tags都得出现在event.tags,否则就是不匹配 - return mapContains(tags, filter.TagsMap) -} - -func mapContains(big, small map[string]string) bool { - for tagk, tagv := range small { - val, exists := big[tagk] - if !exists { - return false - } - - if val != tagv { - return false - } - } - return true -} diff --git a/backend/datasource.go b/backend/datasource.go deleted file mode 100644 index 106c2fa0..00000000 --- a/backend/datasource.go +++ /dev/null @@ -1,89 +0,0 @@ -package backend - -import ( - "fmt" - - "github.com/prometheus/prometheus/promql" - - "github.com/didi/nightingale/v5/vos" - "github.com/toolkits/pkg/container/list" - - pp "github.com/didi/nightingale/v5/backend/prome" -) - -type BackendSection struct { - DataSource string `yaml:"datasource"` - Prometheus pp.PromeSection `yaml:"prometheus"` -} - -type DataSource interface { - PushEndpoint - - QueryData(inputs vos.DataQueryParam) []*vos.DataQueryResp // 查询一段时间 - QueryDataInstant(ql string) []*vos.DataQueryInstanceResp // 查询一个时间点数据 等同于prometheus instant_query - QueryTagKeys(recv vos.CommonTagQueryParam) *vos.TagKeyQueryResp // 获取标签的names - QueryTagValues(recv vos.CommonTagQueryParam) *vos.TagValueQueryResp // 根据一个label_name获取 values - QueryTagPairs(recv vos.CommonTagQueryParam) *vos.TagPairQueryResp // 根据匹配拿到所有 series 上面三个使用统一的结构体 - QueryMetrics(recv vos.MetricQueryParam) *vos.MetricQueryResp // 根据标签查 metric_names - QueryVector(ql string) promql.Vector // prometheus pull alert 所用,其他数据源留空即可 - CleanUp() // 数据源退出时需要做的清理工作 -} - -type PushEndpoint interface { - Push2Queue(items []*vos.MetricPoint) -} - -var ( - defaultDataSource string - registryDataSources = make(map[string]DataSource) - registryPushEndpoints = make(map[string]PushEndpoint) -) - -func Init(cfg BackendSection) { - defaultDataSource = cfg.DataSource - - // init prometheus - if cfg.Prometheus.Enable { - promeDs := &pp.PromeDataSource{ - Section: cfg.Prometheus, - PushQueue: list.NewSafeListLimited(10240000), - } - promeDs.Init() - RegisterDataSource(cfg.Prometheus.Name, promeDs) - } -} - -// get backend datasource -// (pluginId == "" for default datasource) -func GetDataSourceFor(pluginId string) (DataSource, error) { - if pluginId == "" { - pluginId = defaultDataSource - } - if source, exists := registryDataSources[pluginId]; exists { - return source, nil - } - return nil, fmt.Errorf("could not find datasource for plugin: %s", pluginId) -} - -func DatasourceCleanUp() { - for _, ds := range registryDataSources { - ds.CleanUp() - } -} - -// get all push endpoints -func GetPushEndpoints() ([]PushEndpoint, error) { - if len(registryPushEndpoints) > 0 { - items := make([]PushEndpoint, 0, len(registryPushEndpoints)) - for _, value := range registryPushEndpoints { - items = append(items, value) - } - return items, nil - } - return nil, fmt.Errorf("could not find any pushendpoint") -} - -func RegisterDataSource(pluginId string, datasource DataSource) { - registryDataSources[pluginId] = datasource - registryPushEndpoints[pluginId] = datasource -} diff --git a/backend/prome/convert.go b/backend/prome/convert.go deleted file mode 100644 index 089d4938..00000000 --- a/backend/prome/convert.go +++ /dev/null @@ -1,183 +0,0 @@ -package backend - -import ( - "bufio" - "bytes" - "context" - - "io" - "io/ioutil" - "net/http" - "regexp" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/golang/snappy" - "github.com/opentracing-contrib/go-stdlib/nethttp" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/prompb" - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/v5/vos" -) - -var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) - -type sample struct { - labels labels.Labels - t int64 - v float64 -} - -func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label { - result := buf[:0] - if cap(buf) < len(labels) { - result = make([]prompb.Label, 0, len(labels)) - } - for _, l := range labels { - result = append(result, prompb.Label{ - Name: l.Name, - Value: l.Value, - }) - } - return result -} - -func (pd *PromeDataSource) convertOne(item *vos.MetricPoint) (prompb.TimeSeries, error) { - pt := prompb.TimeSeries{} - pt.Samples = []prompb.Sample{{}} - s := sample{} - s.t = item.Time - s.v = item.Value - // name - if !MetricNameRE.MatchString(item.Metric) { - return pt, errors.New("invalid metrics name") - } - nameLs := labels.Label{ - Name: LABEL_NAME, - Value: item.Metric, - } - s.labels = append(s.labels, nameLs) - if item.Ident != "" { - identLs := labels.Label{ - Name: LABEL_IDENT, - Value: item.Ident, - } - s.labels = append(s.labels, identLs) - } - - for k, v := range item.TagsMap { - if model.LabelNameRE.MatchString(k) { - ls := labels.Label{ - Name: k, - Value: v, - } - s.labels = append(s.labels, ls) - - } - - } - - pt.Labels = labelsToLabelsProto(s.labels, pt.Labels) - // 时间赋值问题,使用毫秒时间戳 - tsMs := time.Unix(s.t, 0).UnixNano() / 1e6 - pt.Samples[0].Timestamp = tsMs - pt.Samples[0].Value = s.v - return pt, nil -} - -type RecoverableError struct { - error -} - -func remoteWritePost(c *HttpClient, req []byte) error { - httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req)) - if err != nil { - // Errors from NewRequest are from unparsable URLs, so are not - // recoverable. - return err - } - - httpReq.Header.Add("Content-Encoding", "snappy") - httpReq.Header.Set("Content-Type", "application/x-protobuf") - httpReq.Header.Set("User-Agent", "n9e-v5") - httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") - ctx, cancel := context.WithTimeout(context.Background(), c.timeout) - defer cancel() - - httpReq = httpReq.WithContext(ctx) - - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - var ht *nethttp.Tracer - httpReq, ht = nethttp.TraceRequest( - parentSpan.Tracer(), - httpReq, - nethttp.OperationName("Remote Store"), - nethttp.ClientTrace(false), - ) - defer ht.Finish() - } - - httpResp, err := c.Client.Do(httpReq) - if err != nil { - // Errors from Client.Do are from (for example) network errors, so are - // recoverable. - return RecoverableError{err} - } - defer func() { - io.Copy(ioutil.Discard, httpResp.Body) - httpResp.Body.Close() - }() - - if httpResp.StatusCode/100 != 2 { - scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, 512)) - line := "" - if scanner.Scan() { - line = scanner.Text() - } - - if httpResp.StatusCode == 400 { - //400的错误是客户端的问题,不返回给上层,输出到debug日志中 - logger.Debugf("server returned HTTP status %s: %s req:%v", httpResp.Status, line, getSamples(req)) - } else { - err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) - } - } - - if httpResp.StatusCode/100 == 5 { - return RecoverableError{err} - } - return err -} - -func (pd *PromeDataSource) buildWriteRequest(samples []prompb.TimeSeries) ([]byte, error) { - - req := &prompb.WriteRequest{ - Timeseries: samples, - Metadata: nil, - } - - data, err := proto.Marshal(req) - if err != nil { - return nil, err - } - - compressed := snappy.Encode(nil, data) - return compressed, nil -} - -func getSamples(compressed []byte) []prompb.TimeSeries { - var samples []prompb.TimeSeries - req := &prompb.WriteRequest{ - Timeseries: samples, - Metadata: nil, - } - - d, _ := snappy.Decode(nil, compressed) - proto.Unmarshal(d, req) - - return req.Timeseries -} diff --git a/backend/prome/prome.go b/backend/prome/prome.go deleted file mode 100644 index 54700b8e..00000000 --- a/backend/prome/prome.go +++ /dev/null @@ -1,257 +0,0 @@ -package backend - -import ( - "io/ioutil" - "net/http" - "net/url" - "os" - "time" - - "github.com/go-kit/kit/log" - "github.com/prometheus/client_golang/prometheus" - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - "github.com/prometheus/common/promlog" - pc "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/prompb" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/storage/remote" - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/logger" - "go.uber.org/atomic" - - "github.com/didi/nightingale/v5/vos" -) - -const ( - DefaultPopNum = 1000 -) - -type PromeSection struct { - Enable bool `yaml:"enable"` - Name string `yaml:"name"` - Batch int `yaml:"batch"` - MaxRetry int `yaml:"maxRetry"` - LookbackDeltaMinute int `yaml:"lookbackDeltaMinute"` - MaxConcurrentQuery int `yaml:"maxConcurrentQuery"` - MaxSamples int `yaml:"maxSamples"` - MaxFetchAllSeriesLimitMinute int64 `yaml:"maxFetchAllSeriesLimitMinute"` - SlowLogRecordSecond float64 `yaml:"slowLogRecordSecond"` - DefaultFetchSeriesQl string `yaml:"defaultFetchSeriesQl"` - RemoteWrite []RemoteConfig `yaml:"remoteWrite"` - RemoteRead []RemoteConfig `yaml:"remoteRead"` -} - -type RemoteConfig struct { - Name string `yaml:"name"` - Url string `yaml:"url"` - RemoteTimeoutSecond int `yaml:"remoteTimeoutSecond"` -} - -type PromeDataSource struct { - Section PromeSection - LocalTmpDir string - // 除了promql的查询,需要后端存储 - Queryable storage.SampleAndChunkQueryable - // promql相关查询 - QueryEngine *promql.Engine - PushQueue *list.SafeListLimited - WriteTargets []*HttpClient -} -type safePromQLNoStepSubqueryInterval struct { - value atomic.Int64 -} - -type HttpClient struct { - remoteName string // Used to differentiate clients in metrics. - url *url.URL - Client *http.Client - timeout time.Duration -} - -func durationToInt64Millis(d time.Duration) int64 { - return int64(d / time.Millisecond) -} -func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) { - i.value.Store(durationToInt64Millis(time.Duration(ev))) -} -func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 { - return i.value.Load() -} -func (pd *PromeDataSource) CleanUp() { - err := os.RemoveAll(pd.LocalTmpDir) - logger.Infof("[remove_prome_tmp_dir_err][dir:%+v][err: %v]", pd.LocalTmpDir, err) - -} -func (pd *PromeDataSource) Init() { - // 模拟创建本地存储目录 - dbDir, err := ioutil.TempDir("", "tsdb-api-ready") - if err != nil { - logger.Errorf("[error_create_local_tsdb_dir][err: %v]", err) - return - } - pd.LocalTmpDir = dbDir - - promlogConfig := promlog.Config{} - // 使用本地目录创建remote-storage - remoteS := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) { - return 0, nil - }, dbDir, 1*time.Minute, nil) - - // ApplyConfig 加载queryables - remoteReadC := make([]*pc.RemoteReadConfig, 0) - for _, u := range pd.Section.RemoteRead { - - ur, err := url.Parse(u.Url) - if err != nil { - logger.Errorf("[prome_ds_init_error][parse_url_error][url:%+v][err:%+v]", u.Url, err) - continue - } - - remoteReadC = append(remoteReadC, - &pc.RemoteReadConfig{ - URL: &config_util.URL{URL: ur}, - RemoteTimeout: model.Duration(time.Duration(u.RemoteTimeoutSecond) * time.Second), - ReadRecent: true, - }, - ) - } - if len(remoteReadC) == 0 { - logger.Errorf("[prome_ds_error_got_zero_remote_read_storage]") - return - } - err = remoteS.ApplyConfig(&pc.Config{RemoteReadConfigs: remoteReadC}) - if err != nil { - logger.Errorf("[error_load_remote_read_config][err: %v]", err) - return - } - pLogger := log.NewNopLogger() - - noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} - - queryQueueDir, err := ioutil.TempDir(dbDir, "prom_query_concurrency") - opts := promql.EngineOpts{ - Logger: log.With(pLogger, "component", "query engine"), - Reg: prometheus.DefaultRegisterer, - MaxSamples: pd.Section.MaxSamples, - Timeout: 30 * time.Second, - ActiveQueryTracker: promql.NewActiveQueryTracker(queryQueueDir, pd.Section.MaxConcurrentQuery, log.With(pLogger, "component", "activeQueryTracker")), - LookbackDelta: time.Duration(pd.Section.LookbackDeltaMinute) * time.Minute, - NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, - EnableAtModifier: true, - } - - queryEngine := promql.NewEngine(opts) - pd.QueryEngine = queryEngine - pd.Queryable = remoteS - - // 初始化writeClients - if len(pd.Section.RemoteWrite) == 0 { - logger.Warningf("[prome_ds_init_with_zero_RemoteWrite_target]") - logger.Infof("[successfully_init_prometheus_datasource][remote_read_num:%+v][remote_write_num:%+v]", - len(pd.Section.RemoteRead), - len(pd.Section.RemoteWrite), - ) - return - } - writeTs := make([]*HttpClient, 0) - for _, u := range pd.Section.RemoteWrite { - ur, err := url.Parse(u.Url) - if err != nil { - logger.Errorf("[prome_ds_init_error][parse_url_error][url:%+v][err:%+v]", u.Url, err) - continue - } - writeTs = append(writeTs, - &HttpClient{ - remoteName: u.Name, - url: ur, - Client: &http.Client{}, - timeout: time.Duration(u.RemoteTimeoutSecond) * time.Second, - }) - } - pd.WriteTargets = writeTs - // 开启prometheus 队列消费协程 - go pd.remoteWrite() - logger.Infof("[successfully_init_prometheus_datasource][remote_read_num:%+v][remote_write_num:%+v]", - len(remoteReadC), - len(writeTs), - ) -} - -func (pd *PromeDataSource) Push2Queue(points []*vos.MetricPoint) { - for _, point := range points { - pt, err := pd.convertOne(point) - if err != nil { - logger.Errorf("[prome_convertOne_error][point: %+v][err:%s]", point, err) - continue - } - ok := pd.PushQueue.PushFront(pt) - if !ok { - logger.Errorf("[prome_push_queue_error][point: %+v] ", point) - } - } -} - -func (pd *PromeDataSource) remoteWrite() { - batch := pd.Section.Batch // 一次发送,最多batch条数据 - if batch <= 0 { - batch = DefaultPopNum - } - for { - items := pd.PushQueue.PopBackBy(batch) - count := len(items) - if count == 0 { - time.Sleep(time.Millisecond * 100) - continue - } - - pbItems := make([]prompb.TimeSeries, count) - for i := 0; i < count; i++ { - pbItems[i] = items[i].(prompb.TimeSeries) - } - payload, err := pd.buildWriteRequest(pbItems) - if err != nil { - logger.Errorf("[prome_remote_write_error][pb_marshal_error][items: %+v][pb.err: %v]: ", items, err) - continue - } - pd.processWrite(payload) - - } -} - -func (pd *PromeDataSource) processWrite(payload []byte) { - - retry := pd.Section.MaxRetry - - for _, c := range pd.WriteTargets { - newC := c - go func(cc *HttpClient, payload []byte) { - sendOk := false - var rec bool - var finalErr error - for i := 0; i < retry; i++ { - err := remoteWritePost(cc, payload) - if err == nil { - sendOk = true - break - } - - _, rec = err.(RecoverableError) - - if !rec { - finalErr = err - break - } - logger.Warningf("[send prome fail recoverableError][retry: %d/%d][err:%v]", i+1, retry, err) - time.Sleep(time.Millisecond * 100) - } - if !sendOk { - logger.Errorf("send prome finally fail: %v", finalErr) - } else { - logger.Debugf("send to prome %s ok", cc.url.String()) - } - }(newC, payload) - } - -} diff --git a/backend/prome/query.go b/backend/prome/query.go deleted file mode 100644 index b27871e9..00000000 --- a/backend/prome/query.go +++ /dev/null @@ -1,754 +0,0 @@ -package backend - -import ( - "context" - "errors" - "fmt" - "math" - "sort" - "strings" - "time" - - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/vos" -) - -const ( - LABEL_IDENT = "ident" - LABEL_NAME = "__name__" - DEFAULT_STEP = 15 -) - -type commonQueryObj struct { - Idents []string - TagPairs []*vos.TagPair - Metric string - Start int64 - End int64 - MetricNameExact bool // metric_name精确匹配,在查询看图的时候为true - From string // 调用的来源 -} - -// 为查询索引或标签相关的转换,大部分都是正则匹配 -func convertToPromql(recv *commonQueryObj) string { - - qlStr := "" - qlStrFinal := "" - metricName := "" - labelIdent := "" - labelStrSlice := make([]string, 0) - // 匹配metric_name __name__=~"xx.*" - if recv.Metric != "" { - if recv.MetricNameExact { - metricName = fmt.Sprintf(`__name__="%s"`, recv.Metric) - } else { - metricName = fmt.Sprintf(`__name__=~".*%s.*"`, recv.Metric) - } - - labelStrSlice = append(labelStrSlice, metricName) - - } - // 匹配ident=~"k1|k2" - labelIdent = strings.Join(recv.Idents, "|") - if labelIdent != "" { - labelStrSlice = append(labelStrSlice, fmt.Sprintf(`ident=~"%s"`, labelIdent)) - } - // 匹配标签 - labelM := make(map[string]string) - for _, i := range recv.TagPairs { - if i.Key == "" { - continue - } - lastStr, _ := labelM[i.Key] - - lastStr += fmt.Sprintf(`.*%s.*|`, i.Value) - labelM[i.Key] = lastStr - } - for k, v := range labelM { - thisLabel := strings.TrimRight(v, "|") - labelStrSlice = append(labelStrSlice, fmt.Sprintf(`%s=~"%s"`, k, thisLabel)) - - } - - qlStr = strings.Join(labelStrSlice, ",") - qlStrFinal = fmt.Sprintf(`{%s}`, qlStr) - logger.Debugf("[convertToPromql][type=queryLabel][recv:%+v][qlStrFinal:%s]", recv, qlStrFinal) - - return qlStrFinal -} - -// 查询数据的转换,metrics_name和标签都是精确匹配 -func convertToPromqlForQueryData(recv *commonQueryObj) string { - - qlStr := "" - qlStrFinal := "" - metricName := "" - labelIdent := "" - labelStrSlice := make([]string, 0) - // 匹配metric_name __name__=~"xx.*" - if recv.Metric != "" { - metricName = fmt.Sprintf(`__name__="%s"`, recv.Metric) - - labelStrSlice = append(labelStrSlice, metricName) - - } - // 匹配ident=~"k1|k2" - labelIdent = strings.Join(recv.Idents, "|") - if labelIdent != "" { - labelStrSlice = append(labelStrSlice, fmt.Sprintf(`ident=~"%s"`, labelIdent)) - } - // 匹配标签 - labelM := make(map[string]string) - for _, i := range recv.TagPairs { - if i.Key == "" { - continue - } - lastStr, _ := labelM[i.Key] - - lastStr += fmt.Sprintf(`%s|`, i.Value) - labelM[i.Key] = lastStr - } - for k, v := range labelM { - thisLabel := strings.TrimRight(v, "|") - labelStrSlice = append(labelStrSlice, fmt.Sprintf(`%s=~"%s"`, k, thisLabel)) - - } - - qlStr = strings.Join(labelStrSlice, ",") - qlStrFinal = fmt.Sprintf(`{%s}`, qlStr) - logger.Debugf("[convertToPromql][type=queryData][recv:%+v][qlStrFinal:%s]", recv, qlStrFinal) - - return qlStrFinal -} - -func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) { - var matcherSets [][]*labels.Matcher - for _, s := range matchers { - matchers, err := parser.ParseMetricSelector(s) - if err != nil { - return nil, err - } - matcherSets = append(matcherSets, matchers) - } - -OUTER: - for _, ms := range matcherSets { - for _, lm := range ms { - if lm != nil && !lm.Matches("") { - continue OUTER - } - } - return nil, errors.New("match[] must contain at least one non-empty matcher") - } - return matcherSets, nil -} - -func (pd *PromeDataSource) QueryData(inputs vos.DataQueryParam) []*vos.DataQueryResp { - - respD := make([]*vos.DataQueryResp, 0) - for _, input := range inputs.Params { - var qlStrFinal string - - if input.PromeQl != "" { - qlStrFinal = input.PromeQl - } else { - if len(input.Idents) == 0 { - for i := range input.TagPairs { - if input.TagPairs[i].Key == "ident" { - input.Idents = append(input.Idents, input.TagPairs[i].Value) - } - } - } - - if len(input.Idents) == 0 && input.ClasspathId != 0 { - if input.ClasspathPrefix == 0 { - classpathAndRes, exists := cache.ClasspathRes.Get(input.ClasspathId) - if exists { - input.Idents = classpathAndRes.Res - } - } else { - classpath, err := models.ClasspathGet("id=?", input.ClasspathId) - if err != nil { - continue - } - cps, _ := models.ClasspathGetsByPrefix(classpath.Path) - for _, classpath := range cps { - classpathAndRes, exists := cache.ClasspathRes.Get(classpath.Id) - if exists { - idents := classpathAndRes.Res - input.Idents = append(input.Idents, idents...) - } - } - } - } - - cj := &commonQueryObj{ - Idents: input.Idents, - TagPairs: input.TagPairs, - Metric: input.Metric, - Start: inputs.Start, - End: inputs.End, - MetricNameExact: true, - } - qlStrFinal = convertToPromqlForQueryData(cj) - - } - - logger.Debugf("[input:%+v][qlStrFinal:%s]\n", input, qlStrFinal) - // 转化为utc时间 - startT := tsToUtcTs(inputs.Start) - endT := tsToUtcTs(inputs.End) - - resolution := time.Second * time.Duration(inputs.Step) - if inputs.Step == 0 { - // step==0 说明要自己算 grafana和prometheus ui都是前端传入 - delta := (inputs.End - inputs.Start) / 3600 - if delta <= 0 { - delta = 1 - } - resolution = time.Second * time.Duration(delta*DEFAULT_STEP) - } - q, err := pd.QueryEngine.NewRangeQuery(pd.Queryable, qlStrFinal, startT, endT, resolution) - if err != nil { - logger.Errorf("[prome_query_error][QueryData_error_may_be_parse_ql_error][args:%+v][err:%+v]", input, err) - continue - } - ctx, _ := context.WithTimeout(context.Background(), time.Second*30) - res := q.Exec(ctx) - if res.Err != nil { - logger.Errorf("[prome_query_error][rangeQuery_exec_error][args:%+v][err:%+v]", input, res.Err) - q.Close() - continue - } - mat, ok := res.Value.(promql.Matrix) - if !ok { - logger.Errorf("[promql.Engine.exec: invalid expression type %q]", res.Value.Type()) - q.Close() - continue - } - if res.Err != nil { - logger.Errorf("[prome_query_error][res.Matrix_error][args:%+v][err:%+v]", input, res.Err) - q.Close() - continue - } - for index, m := range mat { - if inputs.Limit > 0 && index+1 > inputs.Limit { - continue - } - tagStr := "" - oneResp := &vos.DataQueryResp{} - - ident := m.Metric.Get(LABEL_IDENT) - name := m.Metric.Get(LABEL_NAME) - oneResp.Metric = name - oneResp.Ident = ident - pNum := len(m.Points) - interval := int64(resolution / time.Second) - pNumExpect := int((inputs.End - inputs.Start) / interval) - - remotePIndex := 0 - for i := 0; i <= pNumExpect; i++ { - - // 先准备好null的point - tsLocal := inputs.Start + interval*int64(i) - tmpP := &vos.Point{ - Timestamp: tsLocal, - Value: vos.JsonFloat(math.NaN()), - } - //说明points数组还没越界 - //去m.Points获取一个 - if remotePIndex < pNum { - pointOne := m.Points[remotePIndex] - tsRemote := pointOne.T / 1e3 - // 判断时间戳 ,前后相差1秒认为时间戳对齐了 - if math.Abs(float64(tsRemote-tsLocal)) <= 1 { - tmpP.Timestamp = tsRemote - tmpP.Value = vos.JsonFloat(pointOne.V) - // 说明远端的这个索引的值已经被pop了,移动索引 - remotePIndex++ - } - - } - - oneResp.Values = append(oneResp.Values, tmpP) - - } - - for _, x := range m.Metric { - if x.Name == LABEL_NAME { - continue - } - tagStr += fmt.Sprintf("%s=%s,", x.Name, x.Value) - } - tagStr = strings.TrimRight(tagStr, ",") - oneResp.Tags = tagStr - oneResp.Resolution = interval - oneResp.PNum = pNum - respD = append(respD, oneResp) - - } - q.Close() - - } - return respD -} - -func tsToUtcTs(s int64) time.Time { - return time.Unix(s, 0).UTC() -} -func timeParse(ts int64) time.Time { - t := float64(ts) - s, ns := math.Modf(t) - ns = math.Round(ns*1000) / 1000 - return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC() -} - -func millisecondTs(t time.Time) int64 { - return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) -} -func tsToStr(timestamp int64) string { - timeNow := time.Unix(timestamp, 0) - return timeNow.Format("2006-01-02 15:04:05") -} - -func (pd *PromeDataSource) CommonQuerySeries(cj *commonQueryObj) storage.SeriesSet { - qlStrFinal := convertToPromql(cj) - - if qlStrFinal == "{}" { - qlStrFinal = pd.Section.DefaultFetchSeriesQl - reqMinute := (cj.End - cj.Start) / 60 - // 如果前端啥都没传,要限制下查询series的时间范围,防止高基础查询 - if reqMinute > pd.Section.MaxFetchAllSeriesLimitMinute { - // 时间超长,用配置文件中的限制一下 - now := time.Now().Unix() - cj.End = now - cj.Start = now - pd.Section.MaxFetchAllSeriesLimitMinute*60 - logger.Debugf("[CommonQuerySeries.FetchAllSeries.LimitQueryTimeRange][start:%v][end:%v]", cj.Start, cj.End) - } - } - - matcherSets, err := parseMatchersParam([]string{qlStrFinal}) - if err != nil { - logger.Errorf("[prome_query_error][parse_label_match_error][err:%+v]", err) - return nil - } - now := time.Now().Unix() - if cj.Start == 0 { - cj.Start = now - 60*pd.Section.MaxFetchAllSeriesLimitMinute - } - if cj.End == 0 { - cj.End = now - } - - startT := millisecondTs(timeParse(cj.Start)) - endT := millisecondTs(timeParse(cj.End)) - - ctx, _ := context.WithTimeout(context.Background(), time.Second*30) - q, err := pd.Queryable.Querier(ctx, startT, endT) - if err != nil { - - logger.Errorf("[prome_query_error][get_querier_errro]") - return nil - } - logger.Debugf("[CommonQuerySeries.Result][from:%s][cj.start_ts:%+v cj.start_str:%+v SelectHints.startT:%+v][cj.end_ts:%+v cj.end_str:%+v SelectHints.endT:%+v][qlStrFinal:%s][cj:%+v]", - cj.From, - cj.Start, - tsToStr(cj.Start), - startT, - cj.End, - tsToStr(cj.End), - endT, - qlStrFinal, - cj, - ) - - defer q.Close() - - hints := &storage.SelectHints{ - Start: startT, - End: endT, - Func: "series", // There is no series function, this token is used for lookups that don't need samples. - } - - // Get all series which match matchers. - startTs := time.Now() - s := q.Select(true, hints, matcherSets[0]...) - timeTookSecond := time.Since(startTs).Seconds() - if timeTookSecond > pd.Section.SlowLogRecordSecond { - logger.Warningf("[prome_remote_read_show_slow_log_CommonQuerySeries_select][threshold:%v][timeTookSecond:%v][from:%v][args:%+v][promql:%v]", - pd.Section.SlowLogRecordSecond, - timeTookSecond, - cj.From, - cj, - qlStrFinal, - ) - } - - return s - -} - -// 全部转化为 {__name__="a",label_a!="b",label_b=~"d|c",label_c!~"d"} -// 对应prometheus 中的 /api/v1/labels -// TODO 等待prometheus官方对 remote_read label_values 的支持 -// Implement: https://github.com/prometheus/prometheus/issues/3351 -func (pd *PromeDataSource) QueryTagKeys(recv vos.CommonTagQueryParam) *vos.TagKeyQueryResp { - startTs := time.Now() - respD := &vos.TagKeyQueryResp{ - Keys: make([]string, 0), - } - - labelNamesSet := make(map[string]struct{}) - if len(recv.Params) == 0 { - recv.Params = append(recv.Params, vos.TagPairQueryParamOne{ - Idents: []string{}, - Metric: "", - }) - } - resultSeries := "" - for _, x := range recv.Params { - cj := &commonQueryObj{ - Idents: x.Idents, - TagPairs: recv.TagPairs, - Metric: x.Metric, - Start: recv.Start, - End: recv.End, - From: "QueryTagKeys", - } - - s := pd.CommonQuerySeries(cj) - if s.Warnings() != nil { - logger.Warningf("[prome_query_error][series_set_iter_error][warning:%+v]", s.Warnings()) - - } - - if err := s.Err(); err != nil { - logger.Errorf("[prome_query_error][series_set_iter_error][err:%+v]", err) - continue - } - thisSeriesNum := 0 - for s.Next() { - series := s.At() - thisSeriesNum++ - for _, lb := range series.Labels() { - if lb.Name == LABEL_NAME { - continue - - } - if recv.TagKey != "" { - if !strings.Contains(lb.Name, recv.TagKey) { - continue - } - } - labelNamesSet[lb.Name] = struct{}{} - } - } - resultSeries += fmt.Sprintf(" %d ", thisSeriesNum) - - } - names := make([]string, len(labelNamesSet)) - i := 0 - for key := range labelNamesSet { - names[i] = key - i++ - } - sort.Strings(names) - // 因为map中的key是无序的,必须这样才能稳定输出 - if recv.Limit > 0 && len(names) > recv.Limit { - names = names[:recv.Limit] - } - - respD.Keys = names - timeTookSecond := time.Since(startTs).Seconds() - if timeTookSecond > pd.Section.SlowLogRecordSecond { - logger.Warningf("[prome_remote_read_show_slow_log][threshold:%v][timeTookSecond:%v][func:QueryTagKeys][args:%+v][resultSeries:%v]", pd.Section.SlowLogRecordSecond, timeTookSecond, recv, resultSeries) - } - return respD - -} - -// 对应prometheus 中的 /api/v1/label//values -func (pd *PromeDataSource) QueryTagValues(recv vos.CommonTagQueryParam) *vos.TagValueQueryResp { - startTs := time.Now() - labelValuesSet := make(map[string]struct{}) - - if len(recv.Params) == 0 { - recv.Params = append(recv.Params, vos.TagPairQueryParamOne{ - Idents: []string{}, - Metric: "", - }) - } - resultSeries := "" - for _, x := range recv.Params { - cj := &commonQueryObj{ - Idents: x.Idents, - Metric: x.Metric, - TagPairs: recv.TagPairs, - Start: recv.Start, - End: recv.End, - From: "QueryTagValues", - } - - s := pd.CommonQuerySeries(cj) - if s.Warnings() != nil { - logger.Warningf("[prome_query_error][series_set_iter_error][warning:%+v]", s.Warnings()) - - } - - if err := s.Err(); err != nil { - logger.Errorf("[prome_query_error][series_set_iter_error][err:%+v]", err) - continue - } - thisSeriesNum := 0 - for s.Next() { - series := s.At() - thisSeriesNum++ - for _, lb := range series.Labels() { - if lb.Name == recv.TagKey { - if recv.TagValue != "" { - if !strings.Contains(lb.Value, recv.TagValue) { - continue - } - } - - labelValuesSet[lb.Value] = struct{}{} - } - } - } - resultSeries += fmt.Sprintf(" %d ", thisSeriesNum) - } - vals := make([]string, len(labelValuesSet)) - i := 0 - for val := range labelValuesSet { - vals[i] = val - i++ - } - sort.Strings(vals) - if recv.Limit > 0 && len(vals) > recv.Limit { - vals = vals[:recv.Limit] - } - respD := &vos.TagValueQueryResp{} - respD.Values = vals - timeTookSecond := time.Since(startTs).Seconds() - if timeTookSecond > pd.Section.SlowLogRecordSecond { - logger.Warningf("[prome_remote_read_show_slow_log][threshold:%v][timeTookSecond:%v][func:QueryTagValues][args:%+v][resultSeries:%v]", pd.Section.SlowLogRecordSecond, timeTookSecond, recv, resultSeries) - } - return respD - -} - -// 对应prometheus 中的 /api/v1/label//values label_name == __name__ -func (pd *PromeDataSource) QueryMetrics(recv vos.MetricQueryParam) *vos.MetricQueryResp { - startTs := time.Now() - cj := &commonQueryObj{ - Idents: recv.Idents, - Metric: recv.Metric, - TagPairs: recv.TagPairs, - Start: recv.Start, - End: recv.End, - From: "QueryMetrics", - } - - respD := &vos.MetricQueryResp{} - respD.Metrics = make([]string, 0) - s := pd.CommonQuerySeries(cj) - if s.Warnings() != nil { - logger.Warningf("[prome_query_error][series_set_iter_error][warning:%+v]", s.Warnings()) - - } - - if err := s.Err(); err != nil { - logger.Errorf("[prome_query_error][series_set_iter_error][err:%+v]", err) - return respD - } - - var sets []storage.SeriesSet - sets = append(sets, s) - set := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) - labelValuesSet := make(map[string]struct{}) - resultSeries := "" - thisSeriesNum := 0 - for set.Next() { - series := set.At() - thisSeriesNum++ - for _, lb := range series.Labels() { - if lb.Name == LABEL_NAME { - labelValuesSet[lb.Value] = struct{}{} - } - } - } - resultSeries += fmt.Sprintf(" %d ", thisSeriesNum) - vals := make([]string, len(labelValuesSet)) - i := 0 - for val := range labelValuesSet { - vals[i] = val - i++ - } - - sort.Strings(vals) - - if recv.Limit > 0 && len(vals) > recv.Limit { - vals = vals[:recv.Limit] - } - respD.Metrics = vals - timeTookSecond := time.Since(startTs).Seconds() - if timeTookSecond > pd.Section.SlowLogRecordSecond { - logger.Warningf("[prome_remote_read_show_slow_log][threshold:%v][timeTookSecond:%v][func:QueryMetrics][args:%+v][resultSeries:%v]", pd.Section.SlowLogRecordSecond, timeTookSecond, recv, resultSeries) - } - return respD -} - -// 对应prometheus 中的 /api/v1/series -func (pd *PromeDataSource) QueryTagPairs(recv vos.CommonTagQueryParam) *vos.TagPairQueryResp { - startTs := time.Now() - respD := &vos.TagPairQueryResp{ - TagPairs: make([]string, 0), - Idents: make([]string, 0), - } - tps := make(map[string]struct{}) - if len(recv.Params) == 0 { - recv.Params = append(recv.Params, vos.TagPairQueryParamOne{ - Idents: []string{}, - Metric: "", - }) - } - resultSeries := "" - for _, x := range recv.Params { - cj := &commonQueryObj{ - Idents: x.Idents, - TagPairs: recv.TagPairs, - Metric: x.Metric, - Start: recv.Start, - End: recv.End, - From: "QueryTagPairs", - } - - s := pd.CommonQuerySeries(cj) - if s.Warnings() != nil { - logger.Warningf("[prome_query_error][series_set_iter_error][warning:%+v]", s.Warnings()) - - } - - if err := s.Err(); err != nil { - logger.Errorf("[prome_query_error][series_set_iter_error][err:%+v]", err) - continue - } - - var sets []storage.SeriesSet - sets = append(sets, s) - set := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) - - labelIdents := make([]string, 0) - thisSeriesNum := 0 - for set.Next() { - series := s.At() - thisSeriesNum++ - labelsS := series.Labels() - for _, i := range labelsS { - - if i.Name == LABEL_NAME { - continue - } - if i.Name == LABEL_IDENT { - labelIdents = append(labelIdents, i.Value) - } - if recv.Search != "" { - // 如果配置了搜索字符串,则key value中任意匹配到即可 - if strings.Contains(i.Name, recv.Search) || strings.Contains(i.Value, recv.Search) { - tps[fmt.Sprintf("%s=%s", i.Name, i.Value)] = struct{}{} - } - } else { - tps[fmt.Sprintf("%s=%s", i.Name, i.Value)] = struct{}{} - } - } - - } - resultSeries += fmt.Sprintf(" %d ", thisSeriesNum) - - } - - newTags := make([]string, len(tps)) - i := 0 - for k := range tps { - newTags[i] = k - i++ - } - - sort.Strings(newTags) - if recv.Limit > 0 && len(newTags) > recv.Limit { - newTags = newTags[:recv.Limit] - } - - respD.TagPairs = newTags - timeTookSecond := time.Since(startTs).Seconds() - if timeTookSecond > pd.Section.SlowLogRecordSecond { - logger.Warningf("[prome_remote_read_show_slow_log][threshold:%v][timeTookSecond:%v][func:QueryTagPairs][args:%+v][resultSeries:%v]", pd.Section.SlowLogRecordSecond, timeTookSecond, recv, resultSeries) - } - return respD -} - -func (pd *PromeDataSource) QueryDataInstant(ql string) []*vos.DataQueryInstanceResp { - respD := make([]*vos.DataQueryInstanceResp, 0) - pv := pd.QueryVector(ql) - if pv == nil { - - return respD - } - - for _, s := range pv { - metricOne := make(map[string]interface{}) - valueOne := make([]float64, 0) - - for _, l := range s.Metric { - if l.Name == LABEL_NAME { - continue - } - metricOne[l.Name] = l.Value - } - // 毫秒时间时间戳转 秒时间戳 - valueOne = append(valueOne, float64(s.Point.T)/1e3) - valueOne = append(valueOne, s.Point.V) - respD = append(respD, &vos.DataQueryInstanceResp{ - Metric: metricOne, - Value: valueOne, - }) - - } - return respD -} - -func (pd *PromeDataSource) QueryVector(ql string) promql.Vector { - t := time.Now() - q, err := pd.QueryEngine.NewInstantQuery(pd.Queryable, ql, t) - if err != nil { - logger.Errorf("[prome_query_error][new_insQuery_error][err:%+v][ql:%+v]", err, ql) - return nil - } - ctx := context.Background() - res := q.Exec(ctx) - if res.Err != nil { - logger.Errorf("[prome_query_error][insQuery_exec_error][err:%+v][ql:%+v]", err, ql) - return nil - } - defer q.Close() - switch v := res.Value.(type) { - case promql.Vector: - return v - case promql.Scalar: - return promql.Vector{promql.Sample{ - Point: promql.Point(v), - Metric: labels.Labels{}, - }} - default: - logger.Errorf("[prome_query_error][insQuery_res_error rule result is not a vector or scalar][err:%+v][ql:%+v]", err, ql) - return nil - } - -} diff --git a/build.sh b/build.sh deleted file mode 100755 index 3da6198a..00000000 --- a/build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# release version -version=5.0.0-rc7-1 - -#export GO111MODULE=on -#export GOPROXY=https://goproxy.cn -go build -ldflags "-X github.com/didi/nightingale/v5/config.Version=${version}" -o n9e-server main.go - diff --git a/cache/alert_mute.go b/cache/alert_mute.go deleted file mode 100644 index cfbfe0ad..00000000 --- a/cache/alert_mute.go +++ /dev/null @@ -1,33 +0,0 @@ -package cache - -import ( - "regexp" - "sync" -) - -type AlertMuteMap struct { - sync.RWMutex - Data map[string][]Filter -} -type Filter struct { - ClasspathPrefix string - ResReg *regexp.Regexp - TagsMap map[string]string -} - -var AlertMute = &AlertMuteMap{Data: make(map[string][]Filter)} - -func (a *AlertMuteMap) SetAll(m map[string][]Filter) { - a.Lock() - defer a.Unlock() - a.Data = m -} - -func (a *AlertMuteMap) GetByKey(key string) ([]Filter, bool) { - a.RLock() - defer a.RUnlock() - - value, exists := a.Data[key] - - return value, exists -} diff --git a/cache/alert_rule.go b/cache/alert_rule.go deleted file mode 100644 index 45fb013a..00000000 --- a/cache/alert_rule.go +++ /dev/null @@ -1,75 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -type AlertRulesByMetricCache struct { - sync.RWMutex - Data map[string][]*models.AlertRule // key是metric,便于后续检索 - MaxUpdateTs int64 // 从数据库拿到的最大update_at - RuleNum int64 // 从数据库中统计到的行数 - LastSync int64 // 保存上次全量同步时间 -} - -var ( - AlertRulesByMetric = &AlertRulesByMetricCache{Data: make(map[string][]*models.AlertRule)} -) - -func (a *AlertRulesByMetricCache) GetBy(instance string) []*models.AlertRule { - a.RLock() - defer a.RUnlock() - - return a.Data[instance] -} - -func (a *AlertRulesByMetricCache) SetAll(alertRulesMap map[string][]*models.AlertRule, lastUpdateTs, ruleNum, lastSync int64) { - a.Lock() - defer a.Unlock() - - a.Data = alertRulesMap - a.MaxUpdateTs = lastUpdateTs - a.RuleNum = ruleNum - a.LastSync = lastSync -} - -type AlertRulesTotalCache struct { - sync.RWMutex - Data map[int64]*models.AlertRule -} - -var AlertRules = &AlertRulesTotalCache{Data: make(map[int64]*models.AlertRule)} - -func (a *AlertRulesTotalCache) Get(id int64) (*models.AlertRule, bool) { - a.RLock() - defer a.RUnlock() - - alertRule, exists := a.Data[id] - return alertRule, exists -} - -func (a *AlertRulesTotalCache) SetAll(alertRulesMap map[int64]*models.AlertRule) { - a.Lock() - defer a.Unlock() - - a.Data = alertRulesMap -} - -// 获取所有PULL型规则的列表 -func (a *AlertRulesTotalCache) Pulls() []*models.AlertRule { - a.RLock() - defer a.RUnlock() - - cnt := len(a.Data) - ret := make([]*models.AlertRule, 0, cnt) - - for _, rule := range a.Data { - if rule.Type == models.PULL { - ret = append(ret, rule) - } - } - - return ret -} diff --git a/cache/cache.go b/cache/cache.go deleted file mode 100644 index 14ae80ed..00000000 --- a/cache/cache.go +++ /dev/null @@ -1,7 +0,0 @@ -package cache - -import ( - cmap "github.com/orcaman/concurrent-map" -) - -var MetricDescMapper = cmap.New() diff --git a/cache/classpath_prefix.go b/cache/classpath_prefix.go deleted file mode 100644 index 0f9afa2e..00000000 --- a/cache/classpath_prefix.go +++ /dev/null @@ -1,27 +0,0 @@ -package cache - -import ( - "sync" -) - -type ClasspathPrefixMap struct { - sync.RWMutex - Data map[int64][]int64 -} - -var ClasspathPrefix = &ClasspathPrefixMap{Data: make(map[int64][]int64)} - -func (c *ClasspathPrefixMap) Get(id int64) ([]int64, bool) { - c.RLock() - defer c.RUnlock() - ids, exists := c.Data[id] - return ids, exists -} - -func (c *ClasspathPrefixMap) SetAll(data map[int64][]int64) { - c.Lock() - defer c.Unlock() - - c.Data = data - return -} diff --git a/cache/classpath_res.go b/cache/classpath_res.go deleted file mode 100644 index 6339f951..00000000 --- a/cache/classpath_res.go +++ /dev/null @@ -1,33 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -type ClasspathResMap struct { - sync.RWMutex - Data map[int64]*ClasspathAndRes -} - -type ClasspathAndRes struct { - Res []string - Classpath *models.Classpath -} - -// classpath_id -> classpath & res_idents -var ClasspathRes = &ClasspathResMap{Data: make(map[int64]*ClasspathAndRes)} - -func (c *ClasspathResMap) Get(id int64) (*ClasspathAndRes, bool) { - c.RLock() - defer c.RUnlock() - resources, exists := c.Data[id] - return resources, exists -} - -func (c *ClasspathResMap) SetAll(collectRulesMap map[int64]*ClasspathAndRes) { - c.Lock() - defer c.Unlock() - c.Data = collectRulesMap -} diff --git a/cache/collect_rule.go b/cache/collect_rule.go deleted file mode 100644 index 0b64ae37..00000000 --- a/cache/collect_rule.go +++ /dev/null @@ -1,32 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -type CollectRuleOfIdentMap struct { - sync.RWMutex - Data map[string][]*models.CollectRule -} - -var CollectRulesOfIdent = &CollectRuleOfIdentMap{Data: make(map[string][]*models.CollectRule)} - -func (c *CollectRuleOfIdentMap) GetBy(ident string) []*models.CollectRule { - c.RLock() - defer c.RUnlock() - return c.Data[ident] -} - -func (c *CollectRuleOfIdentMap) Set(node string, collectRules []*models.CollectRule) { - c.Lock() - defer c.Unlock() - c.Data[node] = collectRules -} - -func (c *CollectRuleOfIdentMap) SetAll(collectRulesMap map[string][]*models.CollectRule) { - c.Lock() - defer c.Unlock() - c.Data = collectRulesMap -} diff --git a/cache/res_classpath.go b/cache/res_classpath.go deleted file mode 100644 index 94b07ca3..00000000 --- a/cache/res_classpath.go +++ /dev/null @@ -1,76 +0,0 @@ -package cache - -import ( - "sync" -) - -type SafeDoubleMap struct { - sync.RWMutex - M map[string]map[string]struct{} -} - -// res_ident -> classpath_path -> struct{}{} -var ResClasspath = &SafeDoubleMap{M: make(map[string]map[string]struct{})} - -func (s *SafeDoubleMap) GetKeys() []string { - s.RLock() - defer s.RUnlock() - - keys := make([]string, 0, len(s.M)) - for key := range s.M { - keys = append(keys, key) - } - - return keys -} - -func (s *SafeDoubleMap) GetValues(key string) []string { - s.RLock() - defer s.RUnlock() - - valueMap, exists := s.M[key] - if !exists { - return []string{} - } - - values := make([]string, 0, len(valueMap)) - - for value := range valueMap { - values = append(values, value) - } - - return values -} - -func (s *SafeDoubleMap) Exists(key string, value string) bool { - s.RLock() - defer s.RUnlock() - - if _, exists := s.M[key]; !exists { - return false - } - - if _, exists := s.M[key][value]; !exists { - return false - } - - return true -} - -func (s *SafeDoubleMap) Set(key string, value string) { - s.Lock() - defer s.Unlock() - - if _, exists := s.M[key]; !exists { - s.M[key] = make(map[string]struct{}) - } - - s.M[key][value] = struct{}{} -} - -func (s *SafeDoubleMap) SetAll(data map[string]map[string]struct{}) { - s.Lock() - defer s.Unlock() - - s.M = data -} diff --git a/cache/res_tags.go b/cache/res_tags.go deleted file mode 100644 index 79330411..00000000 --- a/cache/res_tags.go +++ /dev/null @@ -1,36 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -// resource_ident -> tags_map -// 监控数据上报的时候,要把资源的tags附到指标数据上 -type ResTagsMap struct { - sync.RWMutex - Data map[string]ResourceAndTags -} - -type ResourceAndTags struct { - Tags map[string]string - Resource models.Resource -} - -var ResTags = &ResTagsMap{Data: make(map[string]ResourceAndTags)} - -func (r *ResTagsMap) SetAll(m map[string]ResourceAndTags) { - r.Lock() - defer r.Unlock() - r.Data = m -} - -func (r *ResTagsMap) Get(key string) (ResourceAndTags, bool) { - r.RLock() - defer r.RUnlock() - - value, exists := r.Data[key] - - return value, exists -} diff --git a/cache/user.go b/cache/user.go deleted file mode 100644 index 3be2ab14..00000000 --- a/cache/user.go +++ /dev/null @@ -1,48 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -type UserMap struct { - sync.RWMutex - Data map[int64]*models.User -} - -var UserCache = &UserMap{Data: make(map[int64]*models.User)} - -func (s *UserMap) GetBy(id int64) *models.User { - s.RLock() - defer s.RUnlock() - - return s.Data[id] -} - -func (s *UserMap) GetByIds(ids []int64) []*models.User { - s.RLock() - defer s.RUnlock() - var users []*models.User - for _, id := range ids { - if s.Data[id] == nil { - continue - } - users = append(users, s.Data[id]) - } - - return users -} - -func (s *UserMap) GetById(id int64) *models.User { - s.RLock() - defer s.RUnlock() - - return s.Data[id] -} - -func (s *UserMap) SetAll(users map[int64]*models.User) { - s.Lock() - defer s.Unlock() - s.Data = users -} diff --git a/cache/user_group.go b/cache/user_group.go deleted file mode 100644 index 56e751f0..00000000 --- a/cache/user_group.go +++ /dev/null @@ -1,41 +0,0 @@ -package cache - -import ( - "sync" - - "github.com/didi/nightingale/v5/models" -) - -type UserGroupMap struct { - sync.RWMutex - Data map[int64]*models.UserGroup -} - -var UserGroupCache = &UserGroupMap{Data: make(map[int64]*models.UserGroup)} - -func (s *UserGroupMap) GetBy(id int64) *models.UserGroup { - s.RLock() - defer s.RUnlock() - - return s.Data[id] -} - -func (s *UserGroupMap) GetByIds(ids []int64) []*models.UserGroup { - s.RLock() - defer s.RUnlock() - var userGroups []*models.UserGroup - for _, id := range ids { - if s.Data[id] == nil { - continue - } - userGroups = append(userGroups, s.Data[id]) - } - - return userGroups -} - -func (s *UserGroupMap) SetAll(userGroups map[int64]*models.UserGroup) { - s.Lock() - defer s.Unlock() - s.Data = userGroups -} diff --git a/cache/user_group_member.go b/cache/user_group_member.go deleted file mode 100644 index 69024c78..00000000 --- a/cache/user_group_member.go +++ /dev/null @@ -1,38 +0,0 @@ -package cache - -import ( - "sync" -) - -type UserGroupMemberMap struct { - sync.RWMutex - Data map[int64]map[int64]struct{} -} - -// groupid -> userid -var UserGroupMember = &UserGroupMemberMap{Data: make(map[int64]map[int64]struct{})} - -func (m *UserGroupMemberMap) Get(id int64) (map[int64]struct{}, bool) { - m.RLock() - defer m.RUnlock() - ids, exists := m.Data[id] - return ids, exists -} - -func (m *UserGroupMemberMap) Exists(gid, uid int64) bool { - m.RLock() - defer m.RUnlock() - uidMap, exists := m.Data[gid] - if !exists { - return false - } - - _, exists = uidMap[uid] - return exists -} - -func (m *UserGroupMemberMap) SetAll(data map[int64]map[int64]struct{}) { - m.Lock() - defer m.Unlock() - m.Data = data -} diff --git a/changelog b/changelog deleted file mode 100644 index 54addade..00000000 --- a/changelog +++ /dev/null @@ -1,186 +0,0 @@ -3.1.1 -影响模块:n9e-job -更新内容:job模块之前给监控用的callback地址method误设置为了get,是不对的,改成了post - -3.1.2 -影响模块:n9e-rdb -更新内容:子节点修改的时候,不允许修改为租户节点 - -3.1.3 -影响模块:n9e-monapi -更新内容:对于P2、P3的告警会发送重复的两条 - -3.1.4 -影响模块:n9e-index n9e-judge n9e-monapi n9e-rdb n9e-transfer n9e-tsdb -更新内容:把hbs的逻辑从monapi挪到rdb,拆分监控的权限点 - -3.1.5 -影响模块:n9e-monapi -更新内容:清理策略的时候会空指针,node删除了,策略还在,此时会复现 - -3.1.6 -影响模块:n9e-ams etc/gop.yml -更新内容:主机设备增加了扩展字段的管理,用于维护一些位置信息、过保信息,增加了新的sql:sql/n9e_ams_3.1.6.sql - -3.2.0 -影响模块:n9e-agent etc/agent.yml -更新内容:agent支持metrics指标采集能力,这个版本是为商业版本服务的,开源用户无需更新 - -3.3.0 -影响模块:n9e-rdb n9e-transfer n9e-judge n9e-ams n9e-monapi sql/n9e_rdb_3.3.0.sql etc/*.tpl -更新内容:增强安全性:密码复杂度、cookie处理优化等;支持M3DB作为存储后端(如果要尝试M3需要修改transfer、monapi配置文件);修复告警引擎与条件串数的问题;为主机设备增加自定义字段的能力 - -3.3.1 -影响模块:n9e-job n9e-rdb n9e-agent n9e-ams n9e-judge -更新内容:修复job模块的一个调度bug;rdb支持根据org搜索user;agent在fields变化时及时感知,fields和host扩展字段联动;解决上个版本引入的judge处理nodata的问题 - -3.4.0 -升级内容: - - 增强了安全性,引入了session机制,写入cookie的内容从user.uuid变更为随机session.id - - 修复部分sql注入漏洞 - - 告警引擎函数优化,all、c_avg_rate_abs等 - - 告警消息内容优化,可以展示设备名称和设备备注,感谢冯骐的PR - - 增加了大盘导入导出功能 -升级方法: - - 除了agent、tsdb、index的二进制不用升级,其他所有模块的二进制都要升级 - - job ams monapi rdb 四个模块的配置文件中的cookieName全部换成ecmc-sid - - rdb的配置文件发生了较大变化,需要对照升级 - - sql目录下有几个3.4.0的sql,需要导入 - -3.4.1 -升级内容: - - 修复日志监控采集策略配置了tag但是无法编辑的问题 -升级方法: - - 更新monapi的二进制即可 - -3.5.0 -升级内容: - - 引入了组件监控模块prober,内置了mysql、redis、mongo监控采集能力 - - 引入了内置监控大盘和内置告警策略,可以在任意节点一键导入内置大盘和策略 -升级方法: - - n9e-monapi n9e-rdb n9e-transfer n9e-ams n9e-job 的二进制要升级 - - n9e-agent也可以升级,解决了进程监控的性能问题,如果不在意可以不升级 - - n9e-prober 模块需要新部署 - - sql目录下有个3.5.0的sql patch文件,需要导入 - - etc目录下新增了screen、alert两个目录,需要拷贝到生产环境 - - etc目录下新增了plugins目录,需要随着prober模块走 - - etc/address.yml里增加prober的配置 - -3.5.1 -升级内容: - - monapi里的alarmEnabled默认值设置为true - - agent进程采集,忽略EOF日志 - - agent增加一个接口获取endpoint - - agent日志监控支持一种新的日志时间格式 - - 修复组件监控调整采集频率不生效的问题 -升级方法: - - 替换n9e-monapi n9e-prober n9e-agent二进制,升级pub下的前端资源文件 - -3.5.2 -升级内容: - - prober模板支持匿名结构体,结构体嵌套 - - prober插件添加了对TLS的支持 - - 修复prober上报没有port的问题 -升级方法: - - 替换n9e-prober n9e-monapi二进制,升级pub下的前端资源文件 - -3.6.0 -升级内容: - - prober模块支持nginx、elasticsearch、prometheus的监控采集,prometheus转换时姑且干掉了 Histogram 和 Summary - - 告警消息中节点挂载关系做了去重处理 -升级方法: - - 替换n9e-prober n9e-monapi二进制 - -3.7.0 -升级内容: - - 调整session清理频率 - - 新增zookeeper、tengine、rabbitmq、haproxy、ping、telnet相关采集工具 - - bugfix:集群部署的时候,多个redis实例,judge只能识别最后一个实例的问题 -升级方法: - - sql/n9e_rdb-v3.7.0.sql 有个新的表结构,需要导入一下 - - 替换n9e-rdb n9e-prober n9e-judge n9e-monapi二进制,前端没有升级 - - 将etc/plugins里zookeeper.yml,tengine.yml等新增的yml文件复制到配置文件里 - -3.7.1 -升级内容: - - prober采集增加dryrun测试方法,可以测试是否真的能采集到数据 - - 增加dns_query插件,对dns做监控 - - 内置大盘,增加n9e内置模块大盘 - - 如果存储使用m3,支持在transfer配置一次查询每条线最多返回的原始点数 - - 日志监控,可以把最后一条日志放到extra字段,报警的时候可以展示,需要升级n9e-agent n9e-monapi - - 修复agent对进程监控采集的bug,进程cpu使用采集的不准确 - - 修改告警策略配置多个团队的时候不生效的问题 - - monapi支持一个新的timestamp格式 -升级方法: - - sql/n9e_mon-v3.7.1.sql变更了表结构,需要执行一下 - - 将etc/plugins里的dns_query.yml放到生产环境的etc/plugins目录下 - - 将etc/screen/n9e_modules放到生产环境的etc/screen目录下 - - 替换n9e-rdb n9e-prober n9e-monapi n9e-transfer n9e-agent二进制 - -3.8.0 -升级内容: - - monapi优化告警策略中用户信息补全逻辑 - - rdb新增接口,查询项目下用户拥有的资源权限点 - - transfer查询索引接口支持指定时间范围 - - prober去掉组件采集默认的白名单设置 -升级方法: - - 替换n9e-rdb n9e-prober n9e-monapi n9e-transfer二进制 - - 将etc/password-changed-email.tpl放到生产环境的etc目录下 - -4.0.0 -升级内容: - - 服务端模块合并为一个模块 - - agentd和server的调用全部走rpc - -重新安装:见 https://n9e.didiyun.com/v4/docs/install/ - -升级方法: - - 使用新的etc替换掉原来的etc - - 使用etc/nginx.conf替换原来的nginx.conf - - n9e-prober替换旧的n9e-prober - - n9e-agentd替换n9e-agent - - n9e-server替换n9e-rdb、n9e-ams、n9e-job、n9e-monapi、n9e-transfer、n9e-judge - -4.0.1 -升级内容: - - 修复消息通知的问题 - -重新安装:见 https://n9e.didiyun.com/v4/docs/install/ - -升级方法: - - 将 *.tpl 文件放到 etc/tpl 下 - - 替换etc/server.yml - - 替换n9e-server - -4.0.2 -升级内容: - - 优化告警接收人补全逻辑 - - 增加pospostgresql监控插件 - -重新安装:见 https://n9e.didiyun.com/v4/docs/install/ - -升级方法: - - 替换n9e-server n9e-prober - -4.0.3 -升级内容: - - 修复nodata恢复告警重复问题 - -升级方法: - - 替换n9e-server - -5.0.0-rc1 -升级内容: - - 发布v5预览版 - -部署方式: - - 见文档 https://n9e.didiyun.com/docs/install/ - -5.0.0-rc2 -升级内容: - - 修复若干问题 - - 新增告警策略,监控大盘导入、导出和内置模板功能 - - 新增概览页面 - -部署方式: - - 见文档 https://n9e.didiyun.com/docs/install/ \ No newline at end of file diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 8e39e47f..00000000 --- a/config/config.go +++ /dev/null @@ -1,176 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "net" - "os" - "strings" - - "github.com/spf13/viper" - "github.com/toolkits/pkg/file" - - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/pkg/i18n" - "github.com/didi/nightingale/v5/pkg/iconf" - "github.com/didi/nightingale/v5/pkg/ilog" -) - -type ConfigStruct struct { - Logger ilog.Config `yaml:"logger"` - HTTP httpSection `yaml:"http"` - RPC rpcSection `yaml:"rpc"` - LDAP models.LdapSection `yaml:"ldap"` - MySQL models.MysqlSection `yaml:"mysql"` - Heartbeat heartbeatSection `yaml:"heartbeat"` - I18N i18n.Config `yaml:"i18n"` - Judge judgeSection `yaml:"judge"` - Alert alertSection `yaml:"alert"` - Trans transSection `yaml:"trans"` - ContactKeys []contactKey `yaml:"contactKeys"` - NotifyChannels []string `yaml:"notifyChannels"` - Tpl tplSection `yaml:"tpl"` -} - -type tplSection struct { - AlertRulePath string `yaml:"alertRulePath"` - DashboardPath string `yaml:"dashboardPath"` -} - -type alertSection struct { - NotifyScriptPath string `yaml:"notifyScriptPath"` - NotifyScriptConcurrency int `yaml:"notifyScriptConcurrency"` - MutedAlertPersist bool `yaml:"mutedAlertPersist"` -} - -type transSection struct { - Enable bool `yaml:"enable"` - Backend backend.BackendSection `yaml:"backend"` -} - -type judgeSection struct { - ReadBatch int `yaml:"readBatch"` - ConnTimeout int `yaml:"connTimeout"` - CallTimeout int `yaml:"callTimeout"` - WriterNum int `yaml:"writerNum"` - ConnMax int `yaml:"connMax"` - ConnIdle int `yaml:"connIdle"` -} - -type heartbeatSection struct { - IP string `yaml:"ip"` - LocalAddr string `yaml:"-"` - Interval int64 `yaml:"interval"` -} - -type httpSection struct { - Mode string `yaml:"mode"` - Access bool `yaml:"access"` - Listen string `yaml:"listen"` - Pprof bool `yaml:"pprof"` - CookieName string `yaml:"cookieName"` - CookieDomain string `yaml:"cookieDomain"` - CookieSecure bool `yaml:"cookieSecure"` - CookieHttpOnly bool `yaml:"cookieHttpOnly"` - CookieMaxAge int `yaml:"cookieMaxAge"` - CookieSecret string `yaml:"cookieSecret"` - CsrfSecret string `yaml:"csrfSecret"` -} - -type rpcSection struct { - Listen string `yaml:"listen"` -} - -type contactKey struct { - Label string `yaml:"label" json:"label"` - Key string `yaml:"key" json:"key"` -} - -var Config *ConfigStruct - -func Parse() error { - ymlFile := iconf.GetYmlFile("server") - if ymlFile == "" { - return fmt.Errorf("configuration file of server not found") - } - - bs, err := file.ReadBytes(ymlFile) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - viper.SetConfigType("yaml") - err = viper.ReadConfig(bytes.NewBuffer(bs)) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - // default value settings - viper.SetDefault("i18n.lang", "zh") - viper.SetDefault("heartbeat.interval", 1000) - viper.SetDefault("judge.readBatch", 2000) - viper.SetDefault("judge.connTimeout", 2000) - viper.SetDefault("judge.callTimeout", 5000) - viper.SetDefault("judge.writerNum", 256) - viper.SetDefault("judge.connMax", 2560) - viper.SetDefault("judge.connIdle", 256) - viper.SetDefault("alert.notifyScriptPath", "./etc/script/notify.py") - viper.SetDefault("alert.notifyScriptConcurrency", 200) - viper.SetDefault("alert.mutedAlertPersist", true) - viper.SetDefault("trans.backend.prometheus.lookbackDeltaMinute", 2) - viper.SetDefault("trans.backend.prometheus.maxConcurrentQuery", 30) - viper.SetDefault("trans.backend.prometheus.maxSamples", 50000000) - viper.SetDefault("trans.backend.prometheus.maxFetchAllSeriesLimitMinute", 5) - viper.SetDefault("trans.backend.prometheus.slowLogRecordSecond", 3) - viper.SetDefault("trans.backend.prometheus.defaultFetchSeriesQl", `{__name__=~"system.*"}`) - viper.SetDefault("tpl.alertRulePath", "./etc/alert_rule") - viper.SetDefault("tpl.dashboardPath", "./etc/dashboard") - - err = viper.Unmarshal(&Config) - if err != nil { - return fmt.Errorf("cannot read yml[%s]: %v", ymlFile, err) - } - - fmt.Println("config.file:", ymlFile) - - if Config.Heartbeat.IP == "" { - // auto detect - Config.Heartbeat.IP = fmt.Sprint(GetOutboundIP()) - - if Config.Heartbeat.IP == "" { - fmt.Println("heartbeat ip auto got is blank") - os.Exit(1) - } - - } - // 用户在配置文件中指定了heartbeat.ip ,用于本机没有网络,下面的报错,那么需要将Config.Heartbeat.LocalAddr设置一下 - // auto get outbound ip fail: dial udp 8.8.8.8:80: connect: network is unreachable - - port := strings.Split(Config.RPC.Listen, ":")[1] - Config.Heartbeat.LocalAddr = Config.Heartbeat.IP + ":" + port - - // 正常情况肯定不是127.0.0.1,但是,如果就是单机部署,并且这个机器没有网络,比如本地调试并且本机没网的时候 - // if Config.Heartbeat.IP == "127.0.0.1" { - // fmt.Println("heartbeat ip is 127.0.0.1 and it is useless, so, exit") - // os.Exit(1) - // } - - fmt.Println("heartbeat.ip:", Config.Heartbeat.IP) - fmt.Printf("heartbeat.interval: %dms\n", Config.Heartbeat.Interval) - return nil -} - -// Get preferred outbound ip of this machine -func GetOutboundIP() net.IP { - conn, err := net.Dial("udp", "8.8.8.8:80") - if err != nil { - fmt.Println("auto get outbound ip fail:", err) - os.Exit(1) - } - defer conn.Close() - - localAddr := conn.LocalAddr().(*net.UDPAddr) - - return localAddr.IP -} diff --git a/config/const.go b/config/const.go deleted file mode 100644 index b452bf26..00000000 --- a/config/const.go +++ /dev/null @@ -1,6 +0,0 @@ -package config - -// Server周期性去数据库心跳,给自己起的名字 -const EndpointName = "server_rpc" - -var Version = "not specified" diff --git a/config/i18n.go b/config/i18n.go deleted file mode 100644 index c50047f9..00000000 --- a/config/i18n.go +++ /dev/null @@ -1,71 +0,0 @@ -package config - -import "github.com/didi/nightingale/v5/pkg/i18n" - -var ( - dict = map[string]string{ - "Login fail, check your username and password": "登录失败,请检查您的用户名和密码", - "Internal server error, try again later please": "系统内部错误,请稍后再试", - "Each user has at most two tokens": "每个用户至多创建两个密钥", - "No such token": "密钥不存在", - "Username is blank": "用户名不能为空", - "Username has invalid characters": "用户名含有非法字符", - "Nickname has invalid characters": "用户昵称含有非法字符", - "Phone invalid": "手机号格式有误", - "Email invalid": "邮箱格式有误", - "Incorrect old password": "旧密码错误", - "Username %s already exists": "用户名(%s)已存在", - "No such user": "用户不存在", - "UserGroup %s already exists": "用户组(%s)已存在", - "Group name has invalid characters": "分组名称含有非法字符", - "Group note has invalid characters": "分组备注含有非法字符", - "No such user group": "用户组不存在", - "Classpath path has invalid characters": "机器分组路径含有非法字符", - "Classpath note has invalid characters": "机器分组路径备注含有非法字符", - "There are still resources under the classpath": "机器分组路径下仍然挂有资源", - "There are still collect rules under the classpath": "机器分组路径下仍然存在采集策略", - "No such classpath": "机器分组路径不存在", - "Classpath %s already exists": "机器分组路径(%s)已存在", - "Preset classpath %s cannot delete": "内置机器分组(%s)不允许删除", - "No such mute config": "此屏蔽配置不存在", - "DashboardGroup name has invalid characters": "大盘分组名称含有非法字符", - "DashboardGroup name is blank": "大盘分组名称为空", - "DashboardGroup %s already exists": "大盘分组(%s)已存在", - "No such dashboard group": "大盘分组不存在", - "Dashboard name has invalid characters": "大盘名称含有非法字符", - "Dashboard %s already exists": "监控大盘(%s)已存在", - "ChartGroup name has invalid characters": "图表分组名称含有非法字符", - "No such dashboard": "监控大盘不存在", - "No such chart group": "图表分组不存在", - "No such chart": "图表不存在", - "There are still dashboards under the group": "分组下面仍然存在监控大盘,请先从组内移出", - "AlertRuleGroup name has invalid characters": "告警规则分组含有非法字符", - "AlertRuleGroup %s already exists": "告警规则分组(%s)已存在", - "There are still alert rules under the group": "分组下面仍然存在告警规则", - "AlertRule name has invalid characters": "告警规则含有非法字符", - "No such alert rule": "告警规则不存在", - "No such alert rule group": "告警规则分组不存在", - "No such alert event": "告警事件不存在", - "Alert rule %s already exists": "告警规则(%s)已存在", - "No such collect rule": "采集规则不存在", - "Decoded metric description empty": "导入的指标释义列表为空", - "User disabled": "用户已被禁用", - "Tags(%s) invalid": "标签(%s)格式不合法", - "Resource filter(Func:%s)'s param invalid": "资源过滤条件(函数:%s)参数不合法(为空或包含空格都不合法)", - "Tags filter(Func:%s)'s param invalid": "标签过滤条件(函数:%s)参数不合法(为空或包含空格都不合法)", - "Regexp: %s cannot be compiled": "正则表达式(%s)不合法,无法编译", - "AppendTags(%s) invalid": "附件标签(%s)格式不合法", - "Regexp %s matching failed": "正则表达式 %s 匹配失败", - "Regexp %s matched, but cannot get substring()": "主正则 %s 匹配成功,但无法匹配到子串", - "TagKey or TagValue contains illegal characters[:,/=\r\n\t]": "标签KEY或者标签值包含非法字符串[:,/=\r\n\t]", - "Resource cannot delete in preset classpath": "预置分组不能删除资源", - "No such resource %s": "不存在该资源(%s)", - } - langDict = map[string]map[string]string{ - "zh": dict, - } -) - -func init() { - i18n.DictRegister(langDict) -} diff --git a/docker/.dockerignore b/docker/.dockerignore new file mode 100644 index 00000000..128291fa --- /dev/null +++ b/docker/.dockerignore @@ -0,0 +1,7 @@ +ibexetc +initsql +mysqletc +n9eetc +prometc +build.sh +docker-compose.yaml diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..c9667d24 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:21.04 + +WORKDIR /app +ADD n9e /app +RUN chmod +x n9e + +EXPOSE 19000 +EXPOSE 18000 + +CMD ["/app/n9e", "-h"] \ No newline at end of file diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 00000000..88ca29d4 --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +if [ $# -ne 1 ]; then + echo "$0 " + exit 0 +fi + +tag=$1 + +echo "tag: ${tag}" + +rm -rf n9e && cp ../n9e . && docker build -t nightingale:${tag} . + +docker tag nightingale:${tag} ulric2019/nightingale:${tag} +docker push ulric2019/nightingale:${tag} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 00000000..47eaff05 --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,175 @@ +version: "3.7" + +networks: + nightingale: + driver: bridge + +services: + mysql: + image: "mysql:5.7" + container_name: mysql + hostname: mysql + restart: always + ports: + - "3306:3306" + environment: + TZ: Asia/Shanghai + MYSQL_ROOT_PASSWORD: 1234 + volumes: + - ./mysqldata:/var/lib/mysql/ + - ./initsql:/docker-entrypoint-initdb.d/ + - ./mysqletc/my.cnf:/etc/my.cnf + networks: + - nightingale + + redis: + image: "redis:6.2" + container_name: redis + hostname: redis + restart: always + ports: + - "6379:6379" + environment: + TZ: Asia/Shanghai + networks: + - nightingale + + prometheus: + image: prom/prometheus + container_name: prometheus + hostname: prometheus + restart: always + environment: + TZ: Asia/Shanghai + volumes: + - ./prometc:/etc/prometheus + ports: + - "9090:9090" + networks: + - nightingale + command: + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/usr/share/prometheus/console_libraries" + - "--web.console.templates=/usr/share/prometheus/consoles" + - "--enable-feature=remote-write-receiver" + - "--query.lookback-delta=2m" + + ibex: + image: ulric2019/ibex:0.2 + container_name: ibex + hostname: ibex + restart: always + environment: + GIN_MODE: release + TZ: Asia/Shanghai + ports: + - "10090:10090" + - "20090:20090" + volumes: + - ./ibexetc:/app/etc + networks: + - nightingale + depends_on: + - mysql + links: + - mysql:mysql + command: + - "/app/ibex" + - "server" + + nwebapi: + image: ulric2019/nightingale:0.4 + container_name: nwebapi + hostname: nwebapi + restart: always + environment: + GIN_MODE: release + TZ: Asia/Shanghai + volumes: + - ./n9eetc:/app/etc + ports: + - "18000:18000" + networks: + - nightingale + depends_on: + - mysql + - redis + - prometheus + - ibex + links: + - mysql:mysql + - redis:redis + - prometheus:prometheus + - ibex:ibex + command: + - "/app/n9e" + - "webapi" + + nserver: + image: ulric2019/nightingale:0.4 + container_name: nserver + hostname: nserver + restart: always + environment: + GIN_MODE: release + TZ: Asia/Shanghai + volumes: + - ./n9eetc:/app/etc + ports: + - "19000:19000" + networks: + - nightingale + depends_on: + - mysql + - redis + - prometheus + - ibex + links: + - mysql:mysql + - redis:redis + - prometheus:prometheus + - ibex:ibex + command: + - "/app/n9e" + - "server" + + telegraf: + image: "telegraf:1.20.3" + container_name: "telegraf" + hostname: "telegraf01" + restart: always + environment: + TZ: Asia/Shanghai + volumes: + - ./telegrafetc/telegraf.conf:/etc/telegraf/telegraf.conf + ports: + - "8125:8125/udp" + - "8092:8092/udp" + - "8094:8094/tcp" + networks: + - nightingale + depends_on: + - nserver + links: + - nserver:nserver + + agentd: + image: ulric2019/ibex:0.2 + container_name: agentd + hostname: agentd + restart: always + environment: + GIN_MODE: release + TZ: Asia/Shanghai + volumes: + - ./ibexetc:/app/etc + networks: + - nightingale + depends_on: + - ibex + links: + - ibex:ibex + command: + - "/app/ibex" + - "agentd" diff --git a/docker/ibexetc/agentd.conf b/docker/ibexetc/agentd.conf new file mode 100644 index 00000000..2789d02d --- /dev/null +++ b/docker/ibexetc/agentd.conf @@ -0,0 +1,38 @@ +# debug, release +RunMode = "release" + +# task meta storage dir +MetaDir = "./meta" + +[HTTP] +Enable = true +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 2090 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = true +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +[Heartbeat] +# unit: ms +Interval = 1000 +# rpc servers +Servers = ["ibex:20090"] +# $ip or $hostname or specified string +Host = "telegraf01" \ No newline at end of file diff --git a/docker/ibexetc/server.conf b/docker/ibexetc/server.conf new file mode 100644 index 00000000..f8b10af9 --- /dev/null +++ b/docker/ibexetc/server.conf @@ -0,0 +1,97 @@ +# debug, release +RunMode = "release" + +[Log] +# log write dir +Dir = "logs-server" +# log level: DEBUG INFO WARNING ERROR +Level = "DEBUG" +# stdout, stderr, file +Output = "stdout" +# # rotate by time +# KeepHours: 4 +# # rotate by size +# RotateNum = 3 +# # unit: MB +# RotateSize = 256 + +[HTTP] +Enable = true +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 10090 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = true +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +[BasicAuth] +# using when call apis +ibex = "ibex" + +[RPC] +Listen = "0.0.0.0:20090" + +[Heartbeat] +# auto detect if blank +IP = "" +# unit: ms +Interval = 1000 + +[Output] +# database | remote +ComeFrom = "database" +AgtdPort = 2090 + +[Gorm] +# enable debug mode or not +Debug = false +# mysql postgres +DBType = "mysql" +# unit: s +MaxLifetime = 7200 +# max open connections +MaxOpenConns = 150 +# max idle connections +MaxIdleConns = 50 +# table prefix +TablePrefix = "" + +[MySQL] +# mysql address host:port +Address = "mysql:3306" +# mysql username +User = "root" +# mysql password +Password = "1234" +# database name +DBName = "ibex" +# connection params +Parameters = "charset=utf8mb4&parseTime=True&loc=Local&allowNativePasswords=true" + +[Postgres] +# pg address host:port +Address = "postgres:5432" +# pg user +User = "root" +# pg password +Password = "1234" +# database name +DBName = "ibex" +# ssl mode +SSLMode = "disable" diff --git a/docker/initsql/ibex.sql b/docker/initsql/ibex.sql new file mode 100644 index 00000000..220ee7c1 --- /dev/null +++ b/docker/initsql/ibex.sql @@ -0,0 +1,1362 @@ +set names utf8mb4; + +drop database if exists ibex; +create database ibex; +use ibex; + +CREATE TABLE `task_meta` +( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `title` varchar(255) not null default '', + `account` varchar(64) not null, + `batch` int unsigned not null default 0, + `tolerance` int unsigned not null default 0, + `timeout` int unsigned not null default 0, + `pause` varchar(255) not null default '', + `script` text not null, + `args` varchar(512) not null default '', + `creator` varchar(64) not null default '', + `created` timestamp not null default CURRENT_TIMESTAMP, + PRIMARY KEY (`id`), + KEY (`creator`), + KEY (`created`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +/* start|cancel|kill|pause */ +CREATE TABLE `task_action` +( + `id` bigint unsigned not null, + `action` varchar(32) not null, + `clock` bigint not null default 0, + PRIMARY KEY (`id`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_scheduler` +( + `id` bigint unsigned not null, + `scheduler` varchar(128) not null default '', + KEY (`id`, `scheduler`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_scheduler_health` +( + `scheduler` varchar(128) not null, + `clock` bigint not null, + UNIQUE KEY (`scheduler`), + KEY (`clock`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_host_doing` +( + `id` bigint unsigned not null, + `host` varchar(128) not null, + `clock` bigint not null default 0, + `action` varchar(16) not null, + KEY (`id`), + KEY (`host`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_0 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_1 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_2 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_3 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_4 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_5 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_6 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_7 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_8 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_9 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_10 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_11 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_12 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_13 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_14 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_15 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_16 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_17 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_18 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_19 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_20 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_21 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_22 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_23 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_24 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_25 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_26 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_27 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_28 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_29 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_30 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_31 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_32 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_33 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_34 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_35 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_36 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_37 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_38 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_39 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_40 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_41 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_42 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_43 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_44 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_45 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_46 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_47 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_48 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_49 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_50 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_51 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_52 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_53 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_54 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_55 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_56 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_57 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_58 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_59 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_60 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_61 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_62 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_63 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_64 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_65 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_66 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_67 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_68 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_69 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_70 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_71 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_72 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_73 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_74 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_75 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_76 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_77 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_78 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_79 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_80 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_81 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_82 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_83 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_84 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_85 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_86 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_87 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_88 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_89 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_90 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_91 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_92 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_93 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_94 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_95 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_96 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_97 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_98 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; + +CREATE TABLE task_host_99 +( + `ii` bigint unsigned NOT NULL AUTO_INCREMENT, + `id` bigint unsigned not null, + `host` varchar(128) not null, + `status` varchar(32) not null, + `stdout` text, + `stderr` text, + UNIQUE KEY (`id`, `host`), + PRIMARY KEY (`ii`) +) ENGINE = InnoDB + DEFAULT CHARSET = utf8mb4; diff --git a/docker/initsql/init.sql b/docker/initsql/init.sql new file mode 100644 index 00000000..f5f69b8a --- /dev/null +++ b/docker/initsql/init.sql @@ -0,0 +1,3 @@ +GRANT ALL ON *.* TO 'root'@'127.0.0.1' IDENTIFIED BY '1234'; +GRANT ALL ON *.* TO 'root'@'localhost' IDENTIFIED BY '1234'; +GRANT ALL ON *.* TO 'root'@'%' IDENTIFIED BY '1234'; \ No newline at end of file diff --git a/docker/initsql/n9e.sql b/docker/initsql/n9e.sql new file mode 100644 index 00000000..098e6dae --- /dev/null +++ b/docker/initsql/n9e.sql @@ -0,0 +1,372 @@ +set names utf8mb4; + +drop database if exists n9e_v5; +create database n9e_v5; +use n9e_v5; + +CREATE TABLE `user` ( + `id` bigint unsigned not null auto_increment, + `username` varchar(64) not null comment 'login name, cannot rename', + `nickname` varchar(64) not null comment 'display name, chinese name', + `password` varchar(128) not null default '', + `phone` varchar(16) not null default '', + `email` varchar(64) not null default '', + `portrait` varchar(255) not null default '' comment 'portrait image url', + `roles` varchar(255) not null comment 'Admin | Standard | Guest, split by space', + `contacts` varchar(1024) comment 'json e.g. {wecom:xx, dingtalk_robot_token:yy}', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + UNIQUE KEY (`username`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into `user`(id, username, nickname, password, roles, create_at, create_by, update_at, update_by) values(1, 'root', '超管', 'root.2020', 'Admin', unix_timestamp(now()), 'system', unix_timestamp(now()), 'system'); + +CREATE TABLE `user_group` ( + `id` bigint unsigned not null auto_increment, + `name` varchar(128) not null default '', + `note` varchar(255) not null default '', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`create_by`), + KEY (`update_at`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into user_group(id, name, create_at, create_by, update_at, update_by) values(1, 'demo-root-group', unix_timestamp(now()), 'root', unix_timestamp(now()), 'root'); + +CREATE TABLE `user_group_member` ( + `group_id` bigint unsigned not null, + `user_id` bigint unsigned not null, + KEY (`group_id`), + KEY (`user_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into user_group_member(group_id, user_id) values(1, 1); + +CREATE TABLE `configs` ( + `id` bigint unsigned not null auto_increment, + `ckey` varchar(191) not null, + `cval` varchar(1024) not null default '', + PRIMARY KEY (`id`), + UNIQUE KEY (`ckey`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `role` ( + `id` bigint unsigned not null auto_increment, + `name` varchar(191) not null default '', + `note` varchar(255) not null default '', + PRIMARY KEY (`id`), + UNIQUE KEY (`name`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into `role`(name, note) values('Admin', 'Administrator role'); +insert into `role`(name, note) values('Standard', 'Ordinary user role'); +insert into `role`(name, note) values('Guest', 'Readonly user role'); + +CREATE TABLE `role_operation`( + `role_name` varchar(128) not null, + `operation` varchar(191) not null, + KEY (`role_name`), + KEY (`operation`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +-- Admin is special, who has no concrete operation but can do anything. +insert into `role_operation`(role_name, operation) values('Guest', 'menu_prom_dash'); +insert into `role_operation`(role_name, operation) values('Guest', 'menu_target_dash'); +insert into `role_operation`(role_name, operation) values('Guest', 'menu_dashboard'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_prom_dash'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_target_dash'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_dashboard'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_user'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_user_group'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_busi_group'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_target'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_alert_rule'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_alert_mute'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_alert_subscribe'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_alert_cur_event'); +insert into `role_operation`(role_name, operation) values('Standard', 'menu_alert_his_event'); + +-- for alert_rule | collect_rule | mute | dashboard grouping +CREATE TABLE `busi_group` ( + `id` bigint unsigned not null auto_increment, + `name` varchar(191) not null, + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + UNIQUE KEY (`name`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into busi_group(id, name, create_at, create_by, update_at, update_by) values(1, 'Default Business Group', unix_timestamp(now()), 'root', unix_timestamp(now()), 'root'); + +CREATE TABLE `busi_group_member` ( + `id` bigint unsigned not null auto_increment, + `busi_group_id` bigint not null comment 'busi group id', + `user_group_id` bigint not null comment 'user group id', + `perm_flag` char(2) not null comment 'ro | rw', + PRIMARY KEY (`id`), + KEY (`busi_group_id`), + KEY (`user_group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +insert into busi_group_member(busi_group_id, user_group_id, perm_flag) values(1, 1, "rw"); + +CREATE TABLE `dashboard` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint not null default 0 comment 'busi group id', + `name` varchar(191) not null, + `tags` varchar(255) not null comment 'split by space', + `configs` varchar(4096) comment 'dashboard variables', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + UNIQUE KEY (`group_id`, `name`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +-- auto create the first subclass 'Default chart group' of dashboard +CREATE TABLE `chart_group` ( + `id` bigint unsigned not null auto_increment, + `dashboard_id` bigint unsigned not null, + `name` varchar(255) not null, + `weight` int not null default 0, + PRIMARY KEY (`id`), + KEY (`dashboard_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `chart` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint unsigned not null comment 'chart group id', + `configs` varchar(8192), + `weight` int not null default 0, + PRIMARY KEY (`id`), + KEY (`group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `chart_share` ( + `id` bigint unsigned not null auto_increment, + `cluster` varchar(128) not null, + `configs` varchar(8192), + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + primary key (`id`), + key (`create_at`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `alert_rule` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint not null default 0 comment 'busi group id', + `cluster` varchar(128) not null, + `name` varchar(255) not null, + `note` varchar(255) not null, + `severity` tinyint(1) not null comment '0:Emergency 1:Warning 2:Notice', + `disabled` tinyint(1) not null comment '0:enabled 1:disabled', + `prom_for_duration` int not null comment 'prometheus for, unit:s', + `prom_ql` varchar(4096) not null comment 'promql', + `prom_eval_interval` int not null comment 'evaluate interval', + `enable_stime` char(5) not null default '00:00', + `enable_etime` char(5) not null default '23:59', + `enable_days_of_week` varchar(32) not null default '' comment 'split by space: 0 1 2 3 4 5 6', + `notify_recovered` tinyint(1) not null comment 'whether notify when recovery', + `notify_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom', + `notify_groups` varchar(255) not null default '' comment 'split by space: 233 43', + `notify_repeat_step` int not null default 0 comment 'unit: min', + `callbacks` varchar(255) not null default '' comment 'split by space: http://a.com/api/x http://a.com/api/y', + `runbook_url` varchar(255), + `append_tags` varchar(255) not null default '' comment 'split by space: service=n9e mod=api', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`group_id`), + KEY (`update_at`) +) ENGINE=InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `alert_mute` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint not null default 0 comment 'busi group id', + `cluster` varchar(128) not null, + `tags` varchar(2048) not null default '' comment 'json,map,tagkey->regexp|value', + `cause` varchar(255) not null default '', + `btime` bigint not null default 0 comment 'begin time', + `etime` bigint not null default 0 comment 'end time', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`create_at`), + KEY (`group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `alert_subscribe` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint not null default 0 comment 'busi group id', + `cluster` varchar(128) not null, + `rule_id` bigint not null default 0, + `tags` varchar(2048) not null default '' comment 'json,map,tagkey->regexp|value', + `redefine_severity` tinyint(1) default 0 comment 'is redefine severity?', + `new_severity` tinyint(1) not null comment '0:Emergency 1:Warning 2:Notice', + `redefine_channels` tinyint(1) default 0 comment 'is redefine channels?', + `new_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom', + `user_group_ids` varchar(250) not null comment 'split by space 1 34 5, notify cc to user_group_ids', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`update_at`), + KEY (`group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `target` ( + `id` bigint unsigned not null auto_increment, + `group_id` bigint not null default 0 comment 'busi group id', + `cluster` varchar(128) not null comment 'append to alert event as field', + `ident` varchar(191) not null comment 'target id', + `note` varchar(255) not null default '' comment 'append to alert event as field', + `tags` varchar(512) not null default '' comment 'append to series data as tags, split by space, append external space at suffix', + `update_at` bigint not null default 0, + PRIMARY KEY (`id`), + UNIQUE KEY (`ident`), + KEY (`group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +-- case1: target_idents; case2: target_tags +-- CREATE TABLE `collect_rule` ( +-- `id` bigint unsigned not null auto_increment, +-- `group_id` bigint not null default 0 comment 'busi group id', +-- `cluster` varchar(128) not null, +-- `target_idents` varchar(512) not null default '' comment 'ident list, split by space', +-- `target_tags` varchar(512) not null default '' comment 'filter targets by tags, split by space', +-- `name` varchar(191) not null default '', +-- `note` varchar(255) not null default '', +-- `step` int not null, +-- `type` varchar(64) not null comment 'e.g. port proc log plugin', +-- `data` text not null, +-- `append_tags` varchar(255) not null default '' comment 'split by space: e.g. mod=n9e dept=cloud', +-- `create_at` bigint not null default 0, +-- `create_by` varchar(64) not null default '', +-- `update_at` bigint not null default 0, +-- `update_by` varchar(64) not null default '', +-- PRIMARY KEY (`id`), +-- KEY (`group_id`, `type`, `name`) +-- ) ENGINE=InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `alert_cur_event` ( + `id` bigint unsigned not null comment 'use alert_his_event.id', + `cluster` varchar(128) not null, + `group_id` bigint unsigned not null comment 'busi group id of rule', + `hash` varchar(64) not null comment 'rule_id + vector_pk', + `rule_id` bigint unsigned not null, + `rule_name` varchar(255) not null, + `rule_note` varchar(512) not null default 'alert rule note', + `severity` tinyint(1) not null comment '0:Emergency 1:Warning 2:Notice', + `prom_for_duration` int not null comment 'prometheus for, unit:s', + `prom_ql` varchar(4096) not null comment 'promql', + `prom_eval_interval` int not null comment 'evaluate interval', + `callbacks` varchar(255) not null default '' comment 'split by space: http://a.com/api/x http://a.com/api/y', + `runbook_url` varchar(255), + `notify_recovered` tinyint(1) not null comment 'whether notify when recovery', + `notify_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom', + `notify_groups` varchar(255) not null default '' comment 'split by space: 233 43', + `notify_repeat_next` bigint not null default 0 comment 'next timestamp to notify, get repeat settings from rule', + `target_ident` varchar(191) not null default '' comment 'target ident, also in tags', + `target_note` varchar(191) not null default '' comment 'target note', + `trigger_time` bigint not null, + `trigger_value` varchar(255) not null, + `tags` varchar(1024) not null default '' comment 'merge data_tags rule_tags, split by ,,', + PRIMARY KEY (`id`), + KEY (`hash`), + KEY (`rule_id`), + KEY (`trigger_time`, `group_id`), + KEY (`notify_repeat_next`) +) ENGINE=InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `alert_his_event` ( + `id` bigint unsigned not null AUTO_INCREMENT, + `is_recovered` tinyint(1) not null, + `cluster` varchar(128) not null, + `group_id` bigint unsigned not null comment 'busi group id of rule', + `hash` varchar(64) not null comment 'rule_id + vector_pk', + `rule_id` bigint unsigned not null, + `rule_name` varchar(255) not null, + `rule_note` varchar(512) not null default 'alert rule note', + `severity` tinyint(1) not null comment '0:Emergency 1:Warning 2:Notice', + `prom_for_duration` int not null comment 'prometheus for, unit:s', + `prom_ql` varchar(4096) not null comment 'promql', + `prom_eval_interval` int not null comment 'evaluate interval', + `callbacks` varchar(255) not null default '' comment 'split by space: http://a.com/api/x http://a.com/api/y', + `runbook_url` varchar(255), + `notify_recovered` tinyint(1) not null comment 'whether notify when recovery', + `notify_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom', + `notify_groups` varchar(255) not null default '' comment 'split by space: 233 43', + `target_ident` varchar(191) not null default '' comment 'target ident, also in tags', + `target_note` varchar(191) not null default '' comment 'target note', + `trigger_time` bigint not null, + `trigger_value` varchar(255) not null, + `tags` varchar(1024) not null default '' comment 'merge data_tags rule_tags, split by ,,', + PRIMARY KEY (`id`), + KEY (`hash`), + KEY (`rule_id`), + KEY (`trigger_time`, `group_id`) +) ENGINE=InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_tpl` +( + `id` int unsigned NOT NULL AUTO_INCREMENT, + `group_id` int unsigned not null comment 'busi group id', + `title` varchar(255) not null default '', + `account` varchar(64) not null, + `batch` int unsigned not null default 0, + `tolerance` int unsigned not null default 0, + `timeout` int unsigned not null default 0, + `pause` varchar(255) not null default '', + `script` text not null, + `args` varchar(512) not null default '', + `tags` varchar(255) not null default '' comment 'split by space', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + `update_at` bigint not null default 0, + `update_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`group_id`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_tpl_host` +( + `ii` int unsigned NOT NULL AUTO_INCREMENT, + `id` int unsigned not null comment 'task tpl id', + `host` varchar(128) not null comment 'ip or hostname', + PRIMARY KEY (`ii`), + KEY (`id`, `host`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; + +CREATE TABLE `task_record` +( + `id` bigint unsigned not null comment 'ibex task id', + `group_id` bigint not null comment 'busi group id', + `ibex_address` varchar(128) not null, + `ibex_auth_user` varchar(128) not null default '', + `ibex_auth_pass` varchar(128) not null default '', + `title` varchar(255) not null default '', + `account` varchar(64) not null, + `batch` int unsigned not null default 0, + `tolerance` int unsigned not null default 0, + `timeout` int unsigned not null default 0, + `pause` varchar(255) not null default '', + `script` text not null, + `args` varchar(512) not null default '', + `create_at` bigint not null default 0, + `create_by` varchar(64) not null default '', + PRIMARY KEY (`id`), + KEY (`create_at`, `group_id`), + KEY (`create_by`) +) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4; diff --git a/docker/mysqletc/my.cnf b/docker/mysqletc/my.cnf new file mode 100644 index 00000000..0ac96821 --- /dev/null +++ b/docker/mysqletc/my.cnf @@ -0,0 +1,5 @@ +[mysqld] +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/run/mysqld/mysqld.sock +datadir = /var/lib/mysql +bind-address = 0.0.0.0 \ No newline at end of file diff --git a/docker/n9e b/docker/n9e new file mode 100755 index 00000000..990fa26d Binary files /dev/null and b/docker/n9e differ diff --git a/docker/n9eetc/metrics.yaml b/docker/n9eetc/metrics.yaml new file mode 100644 index 00000000..59887bcc --- /dev/null +++ b/docker/n9eetc/metrics.yaml @@ -0,0 +1 @@ +cpu_usage_idle: CPU空闲率(单位:%) \ No newline at end of file diff --git a/docker/n9eetc/script/notify.py b/docker/n9eetc/script/notify.py new file mode 100755 index 00000000..f6d2115a --- /dev/null +++ b/docker/n9eetc/script/notify.py @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +import sys +import json +import urllib2 +import smtplib +from email.mime.text import MIMEText + +notify_channel_funcs = { + "email":"email", + "sms":"sms", + "voice":"voice", + "dingtalk":"dingtalk", + "wecom":"wecom" +} + +mail_host = "smtp.163.com" +mail_port = 994 +mail_user = "ulricqin" +mail_pass = "password" +mail_from = "ulricqin@163.com" + +class Sender(object): + @classmethod + def send_email(cls, payload): + users = payload.get('event').get("notify_users_obj") + + emails = {} + for u in users: + if u.get("email"): + emails[u.get("email")] = 1 + + if not emails: + return + + recipients = emails.keys() + mail_body = payload.get('tpls').get("mailbody.tpl", "mailbody.tpl not found") + message = MIMEText(mail_body, 'html', 'utf-8') + message['From'] = mail_from + message['To'] = ", ".join(recipients) + message["Subject"] = payload.get('tpls').get("subject.tpl", "subject.tpl not found") + + try: + smtp = smtplib.SMTP_SSL(mail_host, mail_port) + smtp.login(mail_user, mail_pass) + smtp.sendmail(mail_from, recipients, message.as_string()) + smtp.close() + except smtplib.SMTPException, error: + print(error) + + @classmethod + def send_wecom(cls, payload): + users = payload.get('event').get("notify_users_obj") + + tokens = {} + + for u in users: + contacts = u.get("contacts") + if contacts.get("wecom_robot_token", ""): + tokens[contacts.get("wecom_robot_token", "")] = 1 + + opener = urllib2.build_opener(urllib2.HTTPHandler()) + method = "POST" + + for t in tokens: + url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={}".format(t) + body = { + "msgtype": "markdown", + "markdown": { + "content": payload.get('tpls').get("wecom.tpl", "wecom.tpl not found") + } + } + request = urllib2.Request(url, data=json.dumps(body)) + request.add_header("Content-Type",'application/json;charset=utf-8') + request.get_method = lambda: method + try: + connection = opener.open(request) + print(connection.read()) + except urllib2.HTTPError, error: + print(error) + + @classmethod + def send_dingtalk(cls, payload): + users = payload.get('event').get("notify_users_obj") + + tokens = {} + phones = {} + + for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + + contacts = u.get("contacts") + if contacts.get("dingtalk_robot_token", ""): + tokens[contacts.get("dingtalk_robot_token", "")] = 1 + + opener = urllib2.build_opener(urllib2.HTTPHandler()) + method = "POST" + + for t in tokens: + url = "https://oapi.dingtalk.com/robot/send?access_token={}".format(t) + body = { + "msgtype": "text", + "text": { + "content": payload.get('tpls').get("dingtalk.tpl", "dingtalk.tpl not found") + }, + "at": { + "atMobiles": phones.keys(), + "isAtAll": False + } + } + request = urllib2.Request(url, data=json.dumps(body)) + request.add_header("Content-Type",'application/json;charset=utf-8') + request.get_method = lambda: method + try: + connection = opener.open(request) + print(connection.read()) + except urllib2.HTTPError, error: + print(error) + + @classmethod + def send_sms(cls, payload): + users = payload.get('event').get("notify_users_obj") + phones = {} + for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + if phones: + print("send_sms not implemented, phones: {}".format(phones.keys())) + + @classmethod + def send_voice(cls, payload): + users = payload.get('event').get("notify_users_obj") + phones = {} + for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + if phones: + print("send_voice not implemented, phones: {}".format(phones.keys())) + +def main(): + payload = json.load(sys.stdin) + with open(".payload", 'w') as f: + f.write(json.dumps(payload, indent=4)) + for ch in payload.get('event').get('notify_channels'): + send_func_name = "send_{}".format(notify_channel_funcs.get(ch.strip())) + if not hasattr(Sender, send_func_name): + print("function: {} not found", send_func_name) + continue + send_func = getattr(Sender, send_func_name) + send_func(payload) + +def hello(): + print("hello nightingale") + +if __name__ == "__main__": + if len(sys.argv) == 1: + main() + elif sys.argv[1] == "hello": + hello() + else: + print("I am confused") \ No newline at end of file diff --git a/docker/n9eetc/server.conf b/docker/n9eetc/server.conf new file mode 100644 index 00000000..40ccac96 --- /dev/null +++ b/docker/n9eetc/server.conf @@ -0,0 +1,188 @@ +# debug, release +RunMode = "release" + +# my cluster name +ClusterName = "Default" + +[Log] +# log write dir +Dir = "logs" +# log level: DEBUG INFO WARNING ERROR +Level = "INFO" +# stdout, stderr, file +Output = "stdout" +# # rotate by time +# KeepHours: 4 +# # rotate by size +# RotateNum = 3 +# # unit: MB +# RotateSize = 256 + +[HTTP] +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 19000 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = false +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +# [BasicAuth] +# user002 = "ccc26da7b9aba533cbb263a36c07dcc9" + +[Heartbeat] +# auto detect if blank +IP = "" +# unit ms +Interval = 1000 + +[Alerting] +NotifyScriptPath = "./etc/script/notify.py" +NotifyConcurrency = 100 + +[Alerting.RedisPub] +Enable = false +# complete redis key: ${ChannelPrefix} + ${Cluster} +ChannelPrefix = "/alerts/" + +[NoData] +Metric = "target_up" +# unit: second +Interval = 15 + +[Ibex] +# callback: ${ibex}/${tplid}/${host} +Address = "ibex:10090" +# basic auth +BasicAuthUser = "ibex" +BasicAuthPass = "ibex" +# unit: ms +Timeout = 3000 + +[Redis] +# address, ip:port +Address = "redis:6379" +# requirepass +Password = "" +# # db +# DB = 0 + +[Gorm] +# enable debug mode or not +Debug = false +# mysql postgres +DBType = "mysql" +# unit: s +MaxLifetime = 7200 +# max open connections +MaxOpenConns = 150 +# max idle connections +MaxIdleConns = 50 +# table prefix +TablePrefix = "" +# enable auto migrate or not +EnableAutoMigrate = false + +[MySQL] +# mysql address host:port +Address = "mysql:3306" +# mysql username +User = "root" +# mysql password +Password = "1234" +# database name +DBName = "n9e_v5" +# connection params +Parameters = "charset=utf8mb4&parseTime=True&loc=Local&allowNativePasswords=true" + +[Postgres] +# pg address host:port +Address = "postgres:5432" +# pg user +User = "root" +# pg password +Password = "1234" +# database name +DBName = "n9e_v5" +# ssl mode +SSLMode = "disable" + +[Reader] +# prometheus base url +Url = "http://prometheus:9090" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 10 + +[WriterOpt] +# queue max size +QueueMaxSize = 10000000 +# once pop samples number from queue +QueuePopSize = 2000 +# unit: ms +SleepInterval = 50 + +[[Writers]] +Name = "prom" +Url = "http://prometheus:9090/api/v1/write" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 100 + +# [[Writers]] +# Name = "m3db" +# Url = "http://m3db:7201/api/v1/prom/remote/write" +# # Basic auth username +# BasicAuthUser = "" +# # Basic auth password +# BasicAuthPass = "" +# timeout settings, unit: ms +# Timeout = 30000 +# DialTimeout = 10000 +# TLSHandshakeTimeout = 30000 +# ExpectContinueTimeout = 1000 +# IdleConnTimeout = 90000 +# # time duration, unit: ms +# KeepAlive = 30000 +# MaxConnsPerHost = 0 +# MaxIdleConns = 100 +# MaxIdleConnsPerHost = 100 \ No newline at end of file diff --git a/docker/n9eetc/template/dingtalk.tpl b/docker/n9eetc/template/dingtalk.tpl new file mode 100644 index 00000000..95019eba --- /dev/null +++ b/docker/n9eetc/template/dingtalk.tpl @@ -0,0 +1,6 @@ +级别状态: S{{.Severity}} {{if .IsRecovered}}Recovered{{else}}Triggered{{end}} +规则名称: {{.RuleName}}{{if .RuleNote}} +规则备注: {{.RuleNote}}{{end}} +监控指标: {{.TagsJSON}} +触发时间: {{timeformat .TriggerTime}} +触发时值: {{.TriggerValue}} \ No newline at end of file diff --git a/etc/script/tpl/mail.tpl b/docker/n9eetc/template/mailbody.tpl similarity index 75% rename from etc/script/tpl/mail.tpl rename to docker/n9eetc/template/mailbody.tpl index bb734301..a45e35b6 100644 --- a/etc/script/tpl/mail.tpl +++ b/docker/n9eetc/template/mailbody.tpl @@ -129,7 +129,7 @@
-

{{Sname}}

+

{{.RuleName}}

@@ -138,61 +138,45 @@
- % if IsAlert: - - - - - % else: + {{if .IsRecovered}} - + - % end + {{else}} + + + + + {{end}} - % if IsMachineDep: - - + + - - + + - % end - + - - - - - - - - - - + + - - - - - - + +
级别状态:{{Status}}
级别状态:{{Status}}S{{.Severity}} Recovered
级别状态:S{{.Severity}} Triggered
告警设备:{{Ident}}策略备注:{{.RuleNote}}
所属分组: - {{Classpath}}
-
设备备注:{{.TargetNote}}
监控指标:{{Metric}}{{.TagsJSON}}
tags:{{Tags}}
当前值:{{Value}}
报警说明: - {{ReadableExpression}} - 触发时值:{{.TriggerValue}}
触发时间: - {{TriggerTime}} + {{timeformat .TriggerTime}}
报警详情:{{Elink}}
报警策略:{{Slink}}PromQL: + {{.PromQl}} +
@@ -200,11 +184,6 @@
- @@ -213,4 +192,4 @@
- + \ No newline at end of file diff --git a/docker/n9eetc/template/subject.tpl b/docker/n9eetc/template/subject.tpl new file mode 100644 index 00000000..ec241bc3 --- /dev/null +++ b/docker/n9eetc/template/subject.tpl @@ -0,0 +1 @@ +{{if .IsRecovered}}Recovered{{else}}Triggered{{end}}: {{.RuleName}} {{.TagsJSON}} \ No newline at end of file diff --git a/docker/n9eetc/template/wecom.tpl b/docker/n9eetc/template/wecom.tpl new file mode 100644 index 00000000..1513cbd5 --- /dev/null +++ b/docker/n9eetc/template/wecom.tpl @@ -0,0 +1,6 @@ +**级别状态**: {{if .IsRecovered}}S{{.Severity}} Recovered{{else}}S{{.Severity}} Triggered{{end}} +**规则标题**: {{.RuleName}}{{if .RuleNote}} +**规则备注**: {{.RuleNote}}{{end}} +**监控指标**: {{.TagsJSON}} +**触发时间**: {{timeformat .TriggerTime}} +**触发时值**: {{.TriggerValue}} \ No newline at end of file diff --git a/docker/n9eetc/webapi.conf b/docker/n9eetc/webapi.conf new file mode 100644 index 00000000..9223bca2 --- /dev/null +++ b/docker/n9eetc/webapi.conf @@ -0,0 +1,166 @@ +# debug, release +RunMode = "release" + +# # custom i18n dict config +# I18N = "./etc/i18n.json" + +# do not change +AdminRole = "Admin" + +# Linkage with notify.py script +NotifyChannels = [ "email", "dingtalk", "wecom" ] + +[[ContactKeys]] +Label = "Wecom Robot Token" +Key = "wecom_robot_token" + +[[ContactKeys]] +Label = "Dingtalk Robot Token" +Key = "dingtalk_robot_token" + +[Log] +# log write dir +Dir = "logs" +# log level: DEBUG INFO WARNING ERROR +Level = "DEBUG" +# stdout, stderr, file +Output = "stdout" +# # rotate by time +# KeepHours: 4 +# # rotate by size +# RotateNum = 3 +# # unit: MB +# RotateSize = 256 + +[HTTP] +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 18000 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = true +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +[JWTAuth] +# signing key +SigningKey = "5b94a0fd640fe2765af826acfe42d151" +# unit: min +AccessExpired = 1500 +# unit: min +RefreshExpired = 10080 +RedisKeyPrefix = "/jwt/" + +[BasicAuth] +user001 = "ccc26da7b9aba533cbb263a36c07dcc5" + +[LDAP] +Enable = false +Host = "ldap.example.org" +Port = 389 +BaseDn = "dc=example,dc=org" +# AD: manange@example.org +BindUser = "cn=manager,dc=example,dc=org" +BindPass = "*******" +# openldap format e.g. (&(uid=%s)) +# AD format e.g. (&(sAMAccountName=%s)) +AuthFilter = "(&(uid=%s))" +CoverAttributes = true +TLS = false +StartTLS = true + +[LDAP.Attributes] +Nickname = "cn" +Phone = "mobile" +Email = "mail" + +[Redis] +# address, ip:port +Address = "redis:6379" +# requirepass +Password = "" +# # db +# DB = 0 + +[Gorm] +# enable debug mode or not +Debug = true +# mysql postgres +DBType = "mysql" +# unit: s +MaxLifetime = 7200 +# max open connections +MaxOpenConns = 150 +# max idle connections +MaxIdleConns = 50 +# table prefix +TablePrefix = "" +# enable auto migrate or not +EnableAutoMigrate = false + +[MySQL] +# mysql address host:port +Address = "mysql:3306" +# mysql username +User = "root" +# mysql password +Password = "1234" +# database name +DBName = "n9e_v5" +# connection params +Parameters = "charset=utf8mb4&parseTime=True&loc=Local&allowNativePasswords=true" + +[Postgres] +# pg address host:port +Address = "postgres:5432" +# pg user +User = "root" +# pg password +Password = "1234" +# database name +DBName = "n9e_v5" +# ssl mode +SSLMode = "disable" + +[[Clusters]] +# Prometheus cluster name +Name = "Default" +# Prometheus APIs base url +Prom = "http://prometheus:9090" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 100 + +[Ibex] +Address = "http://ibex:10090" +# basic auth +BasicAuthUser = "ibex" +BasicAuthPass = "ibex" +# unit: ms +Timeout = 3000 \ No newline at end of file diff --git a/docker/prometc/prometheus.yml b/docker/prometc/prometheus.yml new file mode 100644 index 00000000..af33d870 --- /dev/null +++ b/docker/prometc/prometheus.yml @@ -0,0 +1,29 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['localhost:9090'] diff --git a/docker/telegrafetc/telegraf.conf b/docker/telegrafetc/telegraf.conf new file mode 100644 index 00000000..d308c39b --- /dev/null +++ b/docker/telegrafetc/telegraf.conf @@ -0,0 +1,8982 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "15s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# Configuration for sending metrics to InfluxDB +# [[outputs.influxdb]] + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## Program to run as daemon +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 + + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:8086"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communication to Instrumental +# debug = false + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## DEPRECATED: PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. +# ## This allows for scaling across multiple shards in a stream. +# ## This will cause issues with ordering. +# use_random_partitionkey = false +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Defines the maximum length of time that the broker and client may not communicate. +# ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a +# ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. +# ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. +# # keep_alive = 0 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## New Relic Insights API key +# insights_key = "insights api key" +# +# ## Prefix to add to add to metric name for easy identification. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + +[[outputs.opentsdb]] +host = "http://nserver" +port = 19000 +http_batch_size = 50 +http_path = "/opentsdb/put" +debug = false +separator = "_" + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# listen = ":9273" +# +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for packet-based connections (i.e. UDP, unixgram). +# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# +# ## Define a mapping, namespaced by metric prefix, from string values to numeric values +# ## deprecated in 1.9; use the enum processor plugin +# #[[outputs.wavefront.string_to_number.elasticsearch]] +# # green = 1.0 +# # yellow = 0.5 +# # red = 0.0 + + +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = true + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, Telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Collects conntrack stats from the configured directories and files. +# [[inputs.conntrack]] +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] +# +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +# +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Reads metrics from DPDK applications using v2 telemetry interface. +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local socket access +# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. Applications can register their own commands +# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify +# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +# [[inputs.intel_powerstat]] +# ## All global metrics are always collected by Intel PowerStat plugin. +# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level +# ## telemetry will be exposed by Intel PowerStat plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" +# # cpu_metrics = [] + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Gather packets and bytes throughput from iptables +# [[inputs.iptables]] +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow telegraf user to run iptables with no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") +# use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] + + +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# # DEPRECATED: the jolokia plugin has been deprecated in favor of the +# # jolokia2 plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Get kernel statistics from /proc/vmstat +# [[inputs.kernel_vmstat]] +# # no configuration + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # ] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + +[[inputs.net]] + ignore_protocol_stats = true + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/telegraf/ca.pem" +# tls_cert = "/etc/telegraf/cert.cer" +# tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Measure postfix queue statistics +# [[inputs.postfix]] +# ## Postfix queue directory. If not provided, telegraf will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Monitor sensors, requires lm-sensors package +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Sysstat metrics collector +# [[inputs.sysstat]] +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# +# ## Options for the sadf command. The values on the left represent the sadf +# ## options and the values on the right their description (which are used for +# ## grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your +# ## sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# +# ## Device tags can be used to add additional tags for devices. +# ## For example the configuration below adds a tag vg with value rootvg for +# ## all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually append after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# ## By default, don't gather zpool stats +# # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Intel Resource Director Technology plugin +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# ## timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats and alerts logs +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# ## Detect alert logs +# # alerts = false + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 096edb31..00000000 --- a/docs/index.html +++ /dev/null @@ -1,11 +0,0 @@ - - - - Nightingale - - - -

Hello, Nightingale

- - - \ No newline at end of file diff --git a/docs/plugin.example.sh b/docs/plugin.example.sh deleted file mode 100755 index 6d764ab8..00000000 --- a/docs/plugin.example.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh - -now=$(date +%s) - -echo '[ - { - "metric": "plugin_example_gauge", - "tags": { - "type": "testcase", - "author": "ulric" - }, - "value": '${now}', - "type": "gauge" - }, - { - "metric": "plugin_example_rate", - "tags": { - "type": "testcase", - "author": "ulric" - }, - "value": '${now}', - "type": "rate" - }, - { - "metric": "plugin_example_increase", - "tags": { - "type": "testcase", - "author": "ulric" - }, - "value": '${now}', - "type": "increase" - } -]' \ No newline at end of file diff --git a/etc/alert_rule/blackbox_exporter b/etc/alert_rule/blackbox_exporter deleted file mode 100644 index e5b7c9a7..00000000 --- a/etc/alert_rule/blackbox_exporter +++ /dev/null @@ -1,191 +0,0 @@ -[ - { - "name": "dns解析时间超过2秒", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_dns_lookup_time_seconds>2" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 3, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935980, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "https证书过期时间小于7天", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "(probe_ssl_earliest_cert_expiry - time()) / 3600 / 24 <7" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935909, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "http响应数据传输占比超过70%", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "100 * avg(probe_http_duration_seconds{phase=\"transfer\"})by(instance) / sum(probe_http_duration_seconds) by(instance) >70" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626936324, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "http接口探测失败", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_success{job=~\".*http.*\"}==0" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935627, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "http接口探测耗时超过3秒", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "sum(probe_http_duration_seconds) by (instance) >3\n" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 3, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626936059, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "http接口返回状态码4xx/5xx错误", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_http_status_code >=400" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626936145, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "icmp探测失败", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_success{job=~\".*icmp.*\"}==0" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935855, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "tcp端口探测失败", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_success{job=~\".*tcp.*\"}==0" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935874, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "机器ssh探测失败", - "type": 1, - "expression": { - "evaluation_interval": 10, - "promql": "probe_success{job=~\".*ssh.*\"}==0\n" - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626935827, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - } -] \ No newline at end of file diff --git a/etc/alert_rule/linux_host b/etc/alert_rule/linux_host deleted file mode 100644 index 5099b4cc..00000000 --- a/etc/alert_rule/linux_host +++ /dev/null @@ -1,271 +0,0 @@ -[ - { - "name": "cpu使用率大于85%", - "type": 0, - "expression": { - "together_or_any": 0, - "trigger_conditions": [ - { - "optr": ">", - "func": "all", - "metric": "system_cpu_util", - "params": [], - "threshold": 85 - } - ], - "tags_filters": [] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626517658, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "内存利用率大于75%", - "type": 0, - "expression": { - "together_or_any": 0, - "trigger_conditions": [ - { - "func": "all", - "metric": "system_mem_used_percent", - "optr": ">", - "params": [], - "threshold": 75 - } - ], - "tags_filters": [] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "sms email", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "机器loadavg大于16", - "type": 0, - "expression": { - "tags_filters": [], - "trigger_conditions": [ - { - "func": "all", - "metric": "system_cpu_load1", - "optr": ">", - "params": [], - "threshold": 16 - } - ] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "sms email", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "磁盘利用率达到85%", - "type": 0, - "expression": { - "tags_filters": [], - "trigger_conditions": [ - { - "func": "all", - "metric": "system_disk_used_percent", - "optr": ">", - "params": [], - "threshold": 85 - } - ] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 3, - "notify_channels": "email", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "磁盘利用率达到88%", - "type": 0, - "expression": { - "tags_filters": [], - "trigger_conditions": [ - { - "func": "all", - "metric": "system_disk_used_percent", - "optr": ">", - "params": [], - "threshold": 88 - } - ] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "email sms", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "磁盘利用率达到92%", - "type": 0, - "expression": { - "tags_filters": [], - "trigger_conditions": [ - { - "func": "all", - "metric": "system_disk_used_percent", - "optr": ">", - "params": [], - "threshold": 88 - } - ] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 1, - "notify_channels": "email sms voice", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "端口挂了", - "type": 0, - "expression": { - "tags_filters": [], - "trigger_conditions": [ - { - "func": "all", - "metric": "proc_port_listen", - "optr": "<", - "params": [], - "threshold": 1 - } - ] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "sms email", - "runbook_url": "", - "note": "", - "create_at": 1626517103, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "网卡入方向错包", - "type": 0, - "expression": { - "together_or_any": 0, - "trigger_conditions": [ - { - "optr": ">", - "func": "all", - "metric": "system_net_packets_in_error", - "params": [ - 1 - ], - "threshold": 3 - } - ], - "tags_filters": [] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626517809, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - }, - { - "name": "网卡出方向错包", - "type": 0, - "expression": { - "together_or_any": 0, - "trigger_conditions": [ - { - "optr": ">", - "func": "all", - "metric": "system_net_packets_out_error", - "params": [ - 1 - ], - "threshold": 3 - } - ], - "tags_filters": [] - }, - "status": 0, - "enable_stime": "00:00", - "enable_etime": "23:59", - "enable_days_of_week": "1 2 3 4 5 6 7", - "recovery_notify": 0, - "priority": 2, - "notify_channels": "", - "runbook_url": "", - "note": "", - "create_at": 1626517838, - "alert_duration": 60, - "notify_users_detail": null, - "notify_groups_detail": null - } -] \ No newline at end of file diff --git a/etc/dashboard/blackbox_exporter b/etc/dashboard/blackbox_exporter deleted file mode 100644 index 84ff9a58..00000000 --- a/etc/dashboard/blackbox_exporter +++ /dev/null @@ -1,226 +0,0 @@ -[ - { - "id": 0, - "name": "blackbox_exporter", - "tags": "", - "configs": "{\"tags\":[{\"tagName\":\"http_probe_job\",\"key\":\"job\",\"value\":\"blackbox-http\",\"prefix\":false},{\"tagName\":\"http_probe_instance\",\"key\":\"instance\",\"value\":\"*\",\"prefix\":false}]}", - "chart_groups": [ - { - "id": 0, - "dashboard_id": 0, - "name": "http接口探测", - "weight": 0, - "charts": [ - { - "id": 440, - "group_id": 109, - "configs": "{\"name\":\"https的探测\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_ssl==1\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 441, - "group_id": 109, - "configs": "{\"name\":\"http的探测\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_ssl==0\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 442, - "group_id": 109, - "configs": "{\"name\":\"https探测目标个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_http_ssl==1)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 443, - "group_id": 109, - "configs": "{\"name\":\"http探测目标个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_http_ssl==0)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 446, - "group_id": 109, - "configs": "{\"name\":\"http探测成功个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*http.*\\\"}==1)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 447, - "group_id": 109, - "configs": "{\"name\":\"http探测失败列表\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_success{job=~\\\".*http.*\\\"}==0\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":2,\"i\":\"5\"}}", - "weight": 0 - }, - { - "id": 448, - "group_id": 109, - "configs": "{\"name\":\"http探测失败个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*http.*\\\"}==0)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":2,\"i\":\"6\"}}", - "weight": 0 - }, - { - "id": 449, - "group_id": 109, - "configs": "{\"name\":\"http探测总耗时 单位秒\",\"mode\":\"promethues\",\"prome_ql\":[\"sum(probe_http_duration_seconds) by (instance)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":2,\"i\":\"7\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "https接口探测汇总", - "weight": 1, - "charts": [ - { - "id": 444, - "group_id": 110, - "configs": "{\"name\":\"tls版本信息\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_tls_version_info\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 445, - "group_id": 110, - "configs": "{\"name\":\"tls证书过期时间 单位:天\",\"mode\":\"promethues\",\"prome_ql\":[\"(probe_ssl_earliest_cert_expiry - time()) / 3600 / 24\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "http接口各阶段耗时详情", - "weight": 2, - "charts": [ - { - "id": 450, - "group_id": 111, - "configs": "{\"name\":\"单个目标的各阶段耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 451, - "group_id": 111, - "configs": "{\"name\":\"[阶段1] dns解析时间\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\",phase=\\\"resolve\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 452, - "group_id": 111, - "configs": "{\"name\":\"[可无]tls握手时间\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\",phase=\\\"tls\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 453, - "group_id": 111, - "configs": "{\"name\":\"[阶段2] tcp连接耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\",phase=\\\"connect\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 454, - "group_id": 111, - "configs": "{\"name\":\"[阶段3] 服务端处理耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\",phase=\\\"processing\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 455, - "group_id": 111, - "configs": "{\"name\":\"[阶段4] 传输响应耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_http_duration_seconds{instance=~\\\"$instance\\\",phase=\\\"transfer\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":2,\"i\":\"5\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "ssh存活探测(配置了ssh探测job才有)", - "weight": 3, - "charts": [ - { - "id": 456, - "group_id": 112, - "configs": "{\"name\":\"ssh探测成功个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*ssh.*\\\"}==1)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 457, - "group_id": 112, - "configs": "{\"name\":\"ssh探测失败个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*ssh.*\\\"}==0)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 458, - "group_id": 112, - "configs": "{\"name\":\"ssh探测失败详情\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_success{job=~\\\".*ssh.*\\\"}==0\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 459, - "group_id": 112, - "configs": "{\"name\":\"ssh探测耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_duration_seconds{job=~\\\".*ssh.*\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "icmp探测(配置了icmp探测job才有)", - "weight": 4, - "charts": [ - { - "id": 460, - "group_id": 113, - "configs": "{\"name\":\"icmp探测成功个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*icmp.*\\\"}==1)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 461, - "group_id": 113, - "configs": "{\"name\":\"icmp探测失败个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*icmp.*\\\"}==0)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 462, - "group_id": 113, - "configs": "{\"name\":\"icmp探测失败详情\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_success{job=~\\\".*icmp.*\\\"}==0\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 463, - "group_id": 113, - "configs": "{\"name\":\"icmp探测总耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_duration_seconds{job=~\\\".*icmp.*\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "tcp端口探测(配置了tcp探测job才有)", - "weight": 5, - "charts": [ - { - "id": 464, - "group_id": 114, - "configs": "{\"name\":\"tcp端口探测成功个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*tcp.*\\\"}==1)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 465, - "group_id": 114, - "configs": "{\"name\":\"tcp端口探测失败个数\",\"mode\":\"promethues\",\"prome_ql\":[\"count(probe_success{job=~\\\".*tcp.*\\\"}==0)\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 466, - "group_id": 114, - "configs": "{\"name\":\"tcp端口探测失败列表\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_success{job=~\\\".*tcp.*\\\"}==0\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 467, - "group_id": 114, - "configs": "{\"name\":\"tcp端口探测耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"probe_duration_seconds{job=~\\\".*tcp.*\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - } - ] - } - ] - } -] \ No newline at end of file diff --git a/etc/dashboard/jmx_exporter b/etc/dashboard/jmx_exporter deleted file mode 100644 index 4a04ec66..00000000 --- a/etc/dashboard/jmx_exporter +++ /dev/null @@ -1,306 +0,0 @@ -[ - { - "id": 0, - "name": "jmx_exporter", - "tags": "", - "configs": "{\"tags\":[{\"tagName\":\"java_app\",\"key\":\"java_app\",\"value\":\"*\",\"prefix\":false}]}", - "chart_groups": [ - { - "id": 0, - "dashboard_id": 0, - "name": "jvm统计", - "weight": 1, - "charts": [ - { - "id": 278, - "group_id": 75, - "configs": "{\"name\":\"jvm版本信息\",\"mode\":\"promethues\",\"prome_ql\":[\"avg(jvm_info{java_app=~\\\"$java_app\\\"}) without (runtime,vendor)\"],\"layout\":{\"h\":2,\"w\":12,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 309, - "group_id": 75, - "configs": "{\"name\":\"java进程启动时间 单位:小时\",\"mode\":\"promethues\",\"prome_ql\":[\"(time() - process_start_time_seconds{java_app=~\\\"$java_app\\\"})/3600\"],\"layout\":{\"h\":2,\"w\":12,\"x\":12,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "jvm内存使用", - "weight": 2, - "charts": [ - { - "id": 279, - "group_id": 76, - "configs": "{\"name\":\"jvm内存使用 - nonheap 非堆区\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_used{java_app=~\\\"$java_app\\\",area=\\\"nonheap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 280, - "group_id": 76, - "configs": "{\"name\":\"jvm内存使用 - heap堆区\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_used{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 281, - "group_id": 76, - "configs": "{\"name\":\"提交给 Java虚拟机使用的内存量 heap 堆区\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_committed{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 282, - "group_id": 76, - "configs": "{\"name\":\"提交给 Java虚拟机使用的内存量 nonheap 非堆区\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_committed{java_app=~\\\"$java_app\\\",area=\\\"nonheap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 283, - "group_id": 76, - "configs": "{\"name\":\"jvm最大内存 \",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_max{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 285, - "group_id": 76, - "configs": "{\"name\":\"jvm 初始化内存\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_bytes_init{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":2,\"i\":\"5\"}}", - "weight": 0 - }, - { - "id": 286, - "group_id": 76, - "configs": "{\"name\":\"jvm内存使用百分比% heap堆区 \",\"mode\":\"promethues\",\"prome_ql\":[\"100 * jvm_memory_bytes_used{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}/jvm_memory_bytes_max{java_app=~\\\"$java_app\\\",area=\\\"heap\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":2,\"i\":\"6\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "jvm内存池", - "weight": 3, - "charts": [ - { - "id": 287, - "group_id": 77, - "configs": "{\"name\":\"jvm内存池分pool展示\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_memory_pool_bytes_max{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":24,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 316, - "group_id": 77, - "configs": "{\"name\":\" JVM 缓冲池使用缓存大小\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_buffer_pool_used_bytes{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":2,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 317, - "group_id": 77, - "configs": "{\"name\":\"JVM 缓冲池的字节容量\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_buffer_pool_capacity_bytes{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":2,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 318, - "group_id": 77, - "configs": "{\"name\":\"JVM 缓冲池使用的字节大小\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_buffer_pool_used_bytes{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":2,\"i\":\"3\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "jvm gc情况", - "weight": 4, - "charts": [ - { - "id": 288, - "group_id": 78, - "configs": "{\"name\":\"新生代gc耗时 1分钟\",\"mode\":\"promethues\",\"prome_ql\":[\"increase(jvm_gc_collection_seconds_sum{java_app=~\\\"$java_app\\\",gc=\\\"G1 Young Generation\\\" }[1m])\"],\"layout\":{\"h\":2,\"w\":8,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 289, - "group_id": 78, - "configs": "{\"name\":\"老生代gc耗时 1分钟\",\"mode\":\"promethues\",\"prome_ql\":[\"increase(jvm_gc_collection_seconds_sum{java_app=~\\\"$java_app\\\",gc=\\\"G1 Old Generation\\\" }[1m])\"],\"layout\":{\"h\":2,\"w\":8,\"x\":8,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 290, - "group_id": 78, - "configs": "{\"name\":\"新生代gc次数 1分钟\",\"mode\":\"promethues\",\"prome_ql\":[\"increase(jvm_gc_collection_seconds_count{java_app=~\\\"$java_app\\\",gc=\\\"G1 Young Generation\\\" }[1m])\"],\"layout\":{\"h\":2,\"w\":8,\"x\":16,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 291, - "group_id": 78, - "configs": "{\"name\":\"老生代gc次数 1分钟\",\"mode\":\"promethues\",\"prome_ql\":[\"increase(jvm_gc_collection_seconds_count{java_app=~\\\"$java_app\\\",gc=\\\"G1 Old Generation\\\" }[1m])\"],\"layout\":{\"h\":2,\"w\":8,\"x\":0,\"y\":2,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 292, - "group_id": 78, - "configs": "{\"name\":\"新生代平均gc耗时 秒\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_gc_collection_seconds_sum{java_app=~\\\"$java_app\\\",gc=\\\"G1 Young Generation\\\" }/jvm_gc_collection_seconds_count{java_app=~\\\"$java_app\\\",gc=\\\"G1 Young Generation\\\" }\"],\"layout\":{\"h\":2,\"w\":8,\"x\":8,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 293, - "group_id": 78, - "configs": "{\"name\":\"老生代平均gc耗时\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_gc_collection_seconds_sum{java_app=~\\\"$java_app\\\",gc=\\\"G1 Old Generation\\\"}/jvm_gc_collection_seconds_count{java_app=~\\\"$java_app\\\",gc=\\\"G1 Old Generation\\\" }\"],\"layout\":{\"h\":2,\"w\":8,\"x\":16,\"y\":2,\"i\":\"5\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "jvm线程情况", - "weight": 5, - "charts": [ - { - "id": 294, - "group_id": 79, - "configs": "{\"name\":\"当前线程数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_current{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 295, - "group_id": 79, - "configs": "{\"name\":\"守护线程数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_daemon{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 296, - "group_id": 79, - "configs": "{\"name\":\"死锁线程数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_deadlocked{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 297, - "group_id": 79, - "configs": "{\"name\":\"活动线程峰值\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_peak{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 298, - "group_id": 79, - "configs": "{\"name\":\"自JVM启动后,启动的线程总量(包括daemon,non-daemon和终止了的)\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_started_total{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 299, - "group_id": 79, - "configs": "{\"name\":\"当前TERMINATED线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"TERMINATED\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":4,\"i\":\"5\"}}", - "weight": 0 - }, - { - "id": 300, - "group_id": 79, - "configs": "{\"name\":\"当前RUNNABLE线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"RUNNABLE\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":2,\"i\":\"6\"}}", - "weight": 0 - }, - { - "id": 301, - "group_id": 79, - "configs": "{\"name\":\"当前NEW线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"NEW\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":2,\"i\":\"7\"}}", - "weight": 0 - }, - { - "id": 302, - "group_id": 79, - "configs": "{\"name\":\"当前TIMED_WAITING线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"TIMED_WAITING\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":4,\"i\":\"8\"}}", - "weight": 0 - }, - { - "id": 303, - "group_id": 79, - "configs": "{\"name\":\"当前BLOCKED线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"BLOCKED\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":4,\"i\":\"9\"}}", - "weight": 0 - }, - { - "id": 304, - "group_id": 79, - "configs": "{\"name\":\"当前WAITING线程个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\",state=\\\"WAITING\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":4,\"i\":\"10\"}}", - "weight": 0 - }, - { - "id": 305, - "group_id": 79, - "configs": "{\"name\":\"当前线程状态汇总\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_threads_state{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":2,\"i\":\"11\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "加载类情况", - "weight": 6, - "charts": [ - { - "id": 306, - "group_id": 80, - "configs": "{\"name\":\"jvm 当前加载的类个数 \",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_classes_loaded{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 307, - "group_id": 80, - "configs": "{\"name\":\"jvm启动以来加载的类总个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_classes_loaded_total{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":8,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 308, - "group_id": 80, - "configs": "{\"name\":\"jvm启动以来卸载的类总个数\",\"mode\":\"promethues\",\"prome_ql\":[\"jvm_classes_unloaded_total{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":16,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - } - ] - }, - { - "id": 0, - "dashboard_id": 0, - "name": "机器指标(配置了java.lang才有)", - "weight": 7, - "charts": [ - { - "id": 311, - "group_id": 81, - "configs": "{\"name\":\"java进程打开fd数\",\"mode\":\"promethues\",\"prome_ql\":[\"os_open_file_descriptor_count{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 312, - "group_id": 81, - "configs": "{\"name\":\"机器总内存\",\"mode\":\"promethues\",\"prome_ql\":[\"os_total_memory_size{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":8,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 313, - "group_id": 81, - "configs": "{\"name\":\"机器可用内存数\",\"mode\":\"promethues\",\"prome_ql\":[\"os_free_memory_size{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":16,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 314, - "group_id": 81, - "configs": "{\"name\":\"机器近期cpu使用率\",\"mode\":\"promethues\",\"link\":\"https://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html#getSystemCpuLoad()\",\"prome_ql\":[\"100 * os_system_cpu_load{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":0,\"y\":2,\"i\":\"3\"}}", - "weight": 0 - }, - { - "id": 315, - "group_id": 81, - "configs": "{\"name\":\"java进程cpu使用\",\"mode\":\"promethues\",\"link\":\"https://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html#getProcessCpuLoad()\",\"prome_ql\":[\"os_process_cpu_load{java_app=~\\\"$java_app\\\"}\"],\"layout\":{\"h\":2,\"w\":8,\"x\":8,\"y\":2,\"i\":\"4\"}}", - "weight": 0 - }, - { - "id": 319, - "group_id": 81, - "configs": "{\"name\":\"jvm cpu百分比\",\"mode\":\"promethues\",\"prome_ql\":[\"100 *(os_process_cpu_load{java_app=~\\\"$java_app\\\"}/os_system_cpu_load{java_app=~\\\"$java_app\\\"})\"],\"layout\":{\"h\":2,\"w\":8,\"x\":16,\"y\":2,\"i\":\"5\"}}", - "weight": 0 - } - ] - } - ] - } -] \ No newline at end of file diff --git a/etc/dashboard/linux_host b/etc/dashboard/linux_host deleted file mode 100644 index a61ec15e..00000000 --- a/etc/dashboard/linux_host +++ /dev/null @@ -1,42 +0,0 @@ -[ - { - "id": 0, - "name": "linux_host", - "tags": "", - "configs": "{\"tags\":[{\"tagName\":\"ident\",\"key\":\"ident\",\"value\":\"*\",\"prefix\":false}]}", - "chart_groups": [ - { - "id": 0, - "dashboard_id": 0, - "name": "Default chart group", - "weight": 0, - "charts": [ - { - "id": 1, - "group_id": 1, - "configs": "{\"name\":\"CPU使用率\",\"mode\":\"nightingale\",\"metric\":[\"system_cpu_util\"],\"tags\":{},\"layout\":{\"h\":2,\"w\":6,\"x\":0,\"y\":0,\"i\":\"0\"}}", - "weight": 0 - }, - { - "id": 2, - "group_id": 1, - "configs": "{\"name\":\"硬盘使用率\",\"mode\":\"nightingale\",\"metric\":[\"system_disk_used_percent\"],\"tags\":{},\"layout\":{\"h\":2,\"w\":6,\"x\":6,\"y\":0,\"i\":\"1\"}}", - "weight": 0 - }, - { - "id": 3, - "group_id": 1, - "configs": "{\"name\":\"内存使用率\",\"mode\":\"nightingale\",\"metric\":[\"system_mem_used_percent\"],\"tags\":{},\"layout\":{\"h\":2,\"w\":6,\"x\":12,\"y\":0,\"i\":\"2\"}}", - "weight": 0 - }, - { - "id": 4, - "group_id": 1, - "configs": "{\"name\":\"IO使用率\",\"mode\":\"nightingale\",\"metric\":[\"system_io_util\"],\"tags\":{},\"layout\":{\"h\":2,\"w\":6,\"x\":18,\"y\":0,\"i\":\"3\"}}", - "weight": 0 - } - ] - } - ] - } -] \ No newline at end of file diff --git a/etc/metrics.yaml b/etc/metrics.yaml new file mode 100644 index 00000000..59887bcc --- /dev/null +++ b/etc/metrics.yaml @@ -0,0 +1 @@ +cpu_usage_idle: CPU空闲率(单位:%) \ No newline at end of file diff --git a/etc/script/notify.py b/etc/script/notify.py index 7b5c749c..52896556 100755 --- a/etc/script/notify.py +++ b/etc/script/notify.py @@ -1,36 +1,12 @@ -#!/usr/bin/python +#!/usr/bin/env python # -*- coding: UTF-8 -*- -# -# n9e-server把告警事件通过stdin的方式传入notify.py,notify.py从事件中解析出接收人信息、拼出通知内容,发送通知 -# 脚本的灵活性高,要接入短信、电话、jira、飞书等,都非常容易,只要有接口,notify.py去调用即可 -# import sys import json -import os +import urllib2 import smtplib -import time -import requests from email.mime.text import MIMEText -from email.header import Header -from bottle import template -reload(sys) # reload 才能调用 setdefaultencoding 方法 -sys.setdefaultencoding('utf-8') # 设置 'utf-8' - -################################ -## 邮件告警,修改下面的配置 ## -################################ -mail_host = "smtp.163.com" -mail_port = 994 -mail_user = "ulricqin" -mail_pass = "password" -mail_from = "ulricqin@163.com" - -# 本地告警event json存储目录 -LOCAL_EVENT_FILE_DIR = ".alerts" -NOTIFY_CHANNELS_SPLIT_STR = " " - -NOTIFY_CHANNEL_DICT = { +notify_channel_funcs = { "email":"email", "sms":"sms", "voice":"voice", @@ -38,290 +14,149 @@ NOTIFY_CHANNEL_DICT = { "wecom":"wecom" } -# stdin 告警json实例 -TEST_ALERT_JSON = { - "event": { - "alert_duration": 10, - "notify_channels": "dingtalk", - "res_classpaths": "all", - "id": 4, - "notify_group_objs": None, - "rule_note": "", - "history_points": [ - { - "metric": "go_goroutines", - "points": [ - { - "t": 1625213114, - "v": 33.0 - } - ], - "tags": { - "instance": "localhost:9090", - "job": "prometheus" - } - } - ], - "priority": 1, - "last_sent": True, - "tag_map": { - "instance": "localhost:9090", - "job": "prometheus" - }, - "hash_id": "ecb258d2ca03454ee390a352913c461b", - "status": 0, - "tags": "instance=localhost:9090 job=prometheus", - "trigger_time": 1625213114, - "res_ident": "ident1", - "rule_name": "alert_test", - "is_prome_pull": 1, - "notify_users": "1", - "notify_groups": "", - "runbook_url": "", - "values": "[vector={__name__=\"go_goroutines\", instance=\"localhost:9090\", job=\"prometheus\"}]: [value=33.000000]", - "readable_expression": "go_goroutines>0", - "notify_user_objs": None, - "is_recovery": 1, - "rule_id": 1 - }, - "rule": { - "alert_duration": 10, - "notify_channels": "dingtalk", - "enable_stime": "00:00", - "id": 1, - "note": "", - "create_by": "root", - "append_tags": "", - "priority": 1, - "update_by": "root", - "type": 1, - "status": 0, - "recovery_notify": 0, - "enable_days_of_week": "1 2 3 4 5 6 7", - "callbacks": "localhost:10000", - "notify_users": "1", - "notify_groups": "", - "runbook_url": "", - "name": "a", - "update_at": 1625211576, - "create_at": 1625211576, - "enable_etime": "23:59", - "group_id": 1, - "expression": { - "evaluation_interval": 4, - "promql": "go_goroutines>0" - } - }, - "users": [ - { - "username": "root", - "status": 0, - "contacts": { - "dingtalk_robot_token": "xxxxxx" - }, - "create_by": "system", - "update_at": 1625211432, - "create_at": 1624871926, - "email": "", - "phone": "", - "role": "Admin", - "update_by": "root", - "portrait": "", - "nickname": "\u8d85\u7ba1", - "id": 1 - } - ] -} +mail_host = "smtp.163.com" +mail_port = 994 +mail_user = "ulricqin" +mail_pass = "password" +mail_from = "ulricqin@163.com" - -def main(): - payload = json.load(sys.stdin) - trigger_time = payload['event']['trigger_time'] - event_id = payload['event']['id'] - rule_id = payload['rule']['id'] - notify_channels = payload['event'].get('notify_channels').strip().split(NOTIFY_CHANNELS_SPLIT_STR) - if len(notify_channels) == 0: - msg = "notify_channels_empty" - print(msg) - return - # 持久化到本地json文件 - persist(payload, rule_id, event_id, trigger_time) - # 生成告警内容 - alert_content = sms_content_gen(values_gen(payload)) - for ch in notify_channels: - send_func_name = "send_{}".format(NOTIFY_CHANNEL_DICT.get(ch.strip())) - has_func = hasattr(Send, send_func_name) - - if not has_func: - msg = "[send_func_name_err][func_not_found_in_Send_class:{}]".format(send_func_name) - print(msg) - continue - send_func = getattr(Send, send_func_name) - send_func(alert_content, payload) - -def values_gen(payload): - event_obj = payload.get("event") - values = { - "IsAlert": event_obj.get("is_recovery") == 0, - "IsMachineDep": event_obj.get("res_classpaths") != "", - "Status": status_gen(event_obj.get("priority"),event_obj.get("is_recovery")), - "Sname": event_obj.get("rule_name"), - "Ident": event_obj.get("res_ident"), - "Classpath": event_obj.get("res_classpaths"), - "Metric": metric_gen(event_obj.get("history_points")), - "Tags": event_obj.get("tags"), - "Value": event_obj.get("values"), - "ReadableExpression": event_obj.get("readable_expression"), - "TriggerTime": time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(event_obj.get("trigger_time"))), - "Elink": "http://n9e.didiyun.com/strategy/edit/{}".format(event_obj.get("rule_id")), - "Slink": "http://n9e.didiyun.com/event/{}".format(event_obj.get("id")) - } - - return values - -def email_content_gen(values): - return template('etc/script/tpl/mail.tpl', values) - -def sms_content_gen(values): - return template('etc/script/tpl/sms.tpl', values) - -def status_gen(priority,is_recovery): - is_recovery_str_m = {1: "恢复", 0: "告警"} - status = "P{} {}".format(priority, is_recovery_str_m.get(is_recovery)) - return status - -def subject_gen(priority,is_recovery,rule_name): - is_recovery_str_m = {1: "恢复", 0: "告警"} - subject = "P{} {} {}".format(priority, is_recovery_str_m.get(is_recovery), rule_name) - return subject - -def metric_gen(history_points): - metrics = [] - for item in history_points: - metrics.append(item.get("metric")) - return ",".join(metrics) - -def persist(payload, rule_id, event_id, trigger_time): - if not os.path.exists(LOCAL_EVENT_FILE_DIR): - os.makedirs(LOCAL_EVENT_FILE_DIR) - - filename = '%d_%d_%d' % (rule_id, event_id, trigger_time) - filepath = os.path.join(LOCAL_EVENT_FILE_DIR, filename) - with open(filepath, 'w') as f: - f.write(json.dumps(payload, indent=4)) - - -class Send(object): +class Sender(object): @classmethod - def send_email(cls, alert_content, payload): - users = payload.get("users") - emails = [x.get("email") for x in users] + def send_email(cls, payload): + users = payload.get('event').get("notify_users_obj") + + emails = {} + for u in users: + if u.get("email"): + emails[u.get("email")] = 1 + if not emails: return - recipients = emails - mail_body = email_content_gen(values_gen(payload)) + recipients = emails.keys() + mail_body = payload.get('tpls').get("mailbody.tpl", "mailbody.tpl not found") message = MIMEText(mail_body, 'html', 'utf-8') message['From'] = mail_from message['To'] = ", ".join(recipients) - message["Subject"] = subject_gen(payload.get("event").get("priority"),payload.get("event").get("is_recovery"),payload.get("event").get("rule_name")) + message["Subject"] = payload.get('tpls').get("subject.tpl", "subject.tpl not found") - smtp = smtplib.SMTP_SSL(mail_host, mail_port) - smtp.login(mail_user, mail_pass) - smtp.sendmail(mail_from, recipients, message.as_string()) - smtp.close() - - print("send_mail_success") + try: + smtp = smtplib.SMTP_SSL(mail_host, mail_port) + smtp.login(mail_user, mail_pass) + smtp.sendmail(mail_from, recipients, message.as_string()) + smtp.close() + except smtplib.SMTPException, error: + print(error) @classmethod - def send_wecom(cls, alert_content, payload): - users = payload.get("users") + def send_wecom(cls, payload): + users = payload.get('event').get("notify_users_obj") + + tokens = {} for u in users: contacts = u.get("contacts") - wecom_robot_token = contacts.get("wecom_robot_token", "") + if contacts.get("wecom_robot_token", ""): + tokens[contacts.get("wecom_robot_token", "")] = 1 - if wecom_robot_token == "": - continue + opener = urllib2.build_opener(urllib2.HTTPHandler()) + method = "POST" - wecom_api_url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={}".format(wecom_robot_token) - atMobiles = [u.get("phone")] - headers = {'Content-Type': 'application/json;charset=utf-8'} - payload = { - "msgtype": "text", - "text": { - "content": alert_content - }, - "at": { - "atMobiles": atMobiles, - "isAtAll": False + for t in tokens: + url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={}".format(t) + body = { + "msgtype": "markdown", + "markdown": { + "content": payload.get('tpls').get("wecom.tpl", "wecom.tpl not found") } } - res = requests.post(wecom_api_url, json.dumps(payload), headers=headers) - print(res.status_code) - print(res.text) - print("send_wecom") - + request = urllib2.Request(url, data=json.dumps(body)) + request.add_header("Content-Type",'application/json;charset=utf-8') + request.get_method = lambda: method + try: + connection = opener.open(request) + print(connection.read()) + except urllib2.HTTPError, error: + print(error) @classmethod - def send_dingtalk(cls, alert_content, payload): - # 钉钉发群信息需要群的webhook机器人 token,这个信息可以在user的contacts map中 + def send_dingtalk(cls, payload): + users = payload.get('event').get("notify_users_obj") - users = payload.get("users") + tokens = {} + phones = {} for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + contacts = u.get("contacts") + if contacts.get("dingtalk_robot_token", ""): + tokens[contacts.get("dingtalk_robot_token", "")] = 1 - dingtalk_robot_token = contacts.get("dingtalk_robot_token", "") + opener = urllib2.build_opener(urllib2.HTTPHandler()) + method = "POST" - if dingtalk_robot_token == "": - print("dingtalk_robot_token_not_found") - continue - - dingtalk_api_url = "https://oapi.dingtalk.com/robot/send?access_token={}".format(dingtalk_robot_token) - atMobiles = [u.get("phone")] - headers = {'Content-Type': 'application/json;charset=utf-8'} - payload = { + for t in tokens: + url = "https://oapi.dingtalk.com/robot/send?access_token={}".format(t) + body = { "msgtype": "text", "text": { - "content": alert_content + "content": payload.get('tpls').get("dingtalk.tpl", "dingtalk.tpl not found") }, "at": { - "atMobiles": atMobiles, + "atMobiles": phones.keys(), "isAtAll": False } } - res = requests.post(dingtalk_api_url, json.dumps(payload), headers=headers) - print(res.status_code) - print(res.text) + request = urllib2.Request(url, data=json.dumps(body)) + request.add_header("Content-Type",'application/json;charset=utf-8') + request.get_method = lambda: method + try: + connection = opener.open(request) + print(connection.read()) + except urllib2.HTTPError, error: + print(error) - print("send_dingtalk") + @classmethod + def send_sms(cls, payload): + users = payload.get('event').get("notify_users_obj") + phones = {} + for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + if phones: + print("send_sms not implemented, phones: {}".format(phones.keys())) + @classmethod + def send_voice(cls, payload): + users = payload.get('event').get("notify_users_obj") + phones = {} + for u in users: + if u.get("phone"): + phones[u.get("phone")] = 1 + if phones: + print("send_voice not implemented, phones: {}".format(phones.keys())) -def mail_test(): - print("mail_test_todo") +def main(): + payload = json.load(sys.stdin) + with open(".payload", 'w') as f: + f.write(json.dumps(payload, indent=4)) + for ch in payload.get('event').get('notify_channels'): + send_func_name = "send_{}".format(notify_channel_funcs.get(ch.strip())) + if not hasattr(Sender, send_func_name): + print("function: {} not found", send_func_name) + continue + send_func = getattr(Sender, send_func_name) + send_func(payload) - recipients = ["ulricqin@qq.com", "ulric@163.com"] - - payload = json.loads(json.dumps(TEST_ALERT_JSON)) - mail_body = email_content_gen(values_gen(payload)) - message = MIMEText(mail_body, 'html', 'utf-8') - message['From'] = mail_from - message['To'] = ", ".join(recipients) - message["Subject"] = subject_gen(payload.get("event").get("priority"),payload.get("event").get("is_recovery"),payload.get("event").get("rule_name")) - - smtp = smtplib.SMTP_SSL(mail_host, mail_port) - smtp.login(mail_user, mail_pass) - smtp.sendmail(mail_from, recipients, message.as_string()) - smtp.close() - - print("mail_test_done") +def hello(): + print("hello nightingale") if __name__ == "__main__": if len(sys.argv) == 1: main() - elif sys.argv[1] == "mail": - mail_test() + elif sys.argv[1] == "hello": + hello() else: - print("I am confused") - + print("I am confused") \ No newline at end of file diff --git a/etc/script/tpl/sms.tpl b/etc/script/tpl/sms.tpl deleted file mode 100644 index cfd70448..00000000 --- a/etc/script/tpl/sms.tpl +++ /dev/null @@ -1,14 +0,0 @@ -级别状态:{{Status}} -策略名称:{{Sname}} -% if IsMachineDep: -告警设备:{{Ident}} -挂载节点:{{Classpath}} -% end -监控指标:{{Metric}} -指标标签:{{Tags}} -当前值:{{!Value}} -报警说明:{{!ReadableExpression}} -触发时间:{{TriggerTime}} -报警详情:{{Elink}} -报警策略:{{Slink}} - diff --git a/etc/server.conf b/etc/server.conf new file mode 100644 index 00000000..b39ef8f2 --- /dev/null +++ b/etc/server.conf @@ -0,0 +1,188 @@ +# debug, release +RunMode = "release" + +# my cluster name +ClusterName = "Default" + +[Log] +# log write dir +Dir = "logs" +# log level: DEBUG INFO WARNING ERROR +Level = "INFO" +# stdout, stderr, file +Output = "stdout" +# # rotate by time +# KeepHours: 4 +# # rotate by size +# RotateNum = 3 +# # unit: MB +# RotateSize = 256 + +[HTTP] +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 19000 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = false +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +# [BasicAuth] +# user002 = "ccc26da7b9aba533cbb263a36c07dcc9" + +[Heartbeat] +# auto detect if blank +IP = "" +# unit ms +Interval = 1000 + +[Alerting] +NotifyScriptPath = "./etc/script/notify.py" +NotifyConcurrency = 100 + +[Alerting.RedisPub] +Enable = false +# complete redis key: ${ChannelPrefix} + ${Cluster} +ChannelPrefix = "/alerts/" + +[NoData] +Metric = "target_up" +# unit: second +Interval = 15 + +[Ibex] +# callback: ${ibex}/${tplid}/${host} +Address = "127.0.0.1:10090" +# basic auth +BasicAuthUser = "ibex" +BasicAuthPass = "ibex" +# unit: ms +Timeout = 3000 + +[Redis] +# address, ip:port +Address = "127.0.0.1:6379" +# requirepass +Password = "" +# # db +# DB = 0 + +[Gorm] +# enable debug mode or not +Debug = false +# mysql postgres +DBType = "mysql" +# unit: s +MaxLifetime = 7200 +# max open connections +MaxOpenConns = 150 +# max idle connections +MaxIdleConns = 50 +# table prefix +TablePrefix = "" +# enable auto migrate or not +EnableAutoMigrate = false + +[MySQL] +# mysql address host:port +Address = "127.0.0.1:3306" +# mysql username +User = "root" +# mysql password +Password = "1234" +# database name +DBName = "n9e_v5" +# connection params +Parameters = "charset=utf8mb4&parseTime=True&loc=Local&allowNativePasswords=true" + +[Postgres] +# pg address host:port +Address = "127.0.0.1:5432" +# pg user +User = "root" +# pg password +Password = "1234" +# database name +DBName = "n9e_v5" +# ssl mode +SSLMode = "disable" + +[Reader] +# prometheus base url +Url = "http://127.0.0.1:9090" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 10 + +[WriterOpt] +# queue max size +QueueMaxSize = 10000000 +# once pop samples number from queue +QueuePopSize = 2000 +# unit: ms +SleepInterval = 50 + +[[Writers]] +Name = "prom" +Url = "http://127.0.0.1:9090/api/v1/write" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 100 + +# [[Writers]] +# Name = "m3db" +# Url = "http://127.0.0.1:7201/api/v1/prom/remote/write" +# # Basic auth username +# BasicAuthUser = "" +# # Basic auth password +# BasicAuthPass = "" +# timeout settings, unit: ms +# Timeout = 30000 +# DialTimeout = 10000 +# TLSHandshakeTimeout = 30000 +# ExpectContinueTimeout = 1000 +# IdleConnTimeout = 90000 +# # time duration, unit: ms +# KeepAlive = 30000 +# MaxConnsPerHost = 0 +# MaxIdleConns = 100 +# MaxIdleConnsPerHost = 100 \ No newline at end of file diff --git a/etc/server.yml b/etc/server.yml deleted file mode 100644 index aa15695a..00000000 --- a/etc/server.yml +++ /dev/null @@ -1,126 +0,0 @@ -logger: - dir: logs - level: DEBUG - # # rotate by time - # keepHours: 4 - # rotate by size - rotatenum: 3 - rotatesize: 256 # unit: MB - -http: - mode: release - # whether print access log to DEBUG.log - access: false - listen: 0.0.0.0:8000 - pprof: false - cookieName: n9e - cookieDomain: "" - cookieMaxAge: 86400 - cookieSecure: false - cookieHttpOnly: true - cookieSecret: 4696709ab8cc3ff2fea17b930158516b - csrfSecret: 15b8ea164b5d3d9254677053c72a19f1 - -rpc: - listen: 0.0.0.0:9000 - -mysql: - addr: "root:1234@tcp(127.0.0.1:3306)/n9e?charset=utf8&parseTime=True&loc=Asia%2FShanghai" - max: 128 - idle: 16 - debug: false - -# i18n: -# # zh | en -# lang: zh -# dictPath: etc/i18n.json - -# heartbeat: -# # auto detect if blank -# ip: "" -# # unit: ms -# interval: 1000 - -# ldap: -# enable: false -# host: ldap.example.org -# port: 389 -# baseDn: "dc=example,dc=org" -# # AD: manange@example.org -# bindUser: "cn=manager,dc=example,dc=org" -# bindPass: "*******" -# # openldap: (&(uid=%s)) -# # AD: (&(sAMAccountName=%s)) -# authFilter: "(&(uid=%s))" -# attributes: -# nickname: "cn" -# email: "mail" -# phone: "mobile" -# coverAttributes: false -# autoRegist: true -# tls: false -# startTLS: false - -# judge: -# readBatch: 2000 -# connTimeout: 2000 -# callTimeout: 5000 -# writerNum: 256 -# connMax: 2560 -# connIdle: 256 - -# alert: -# notifyScriptPath: ./etc/script/notify.py -# notifyConcurrency: 200 -# mutedAlertPersist: true - -trans: - enable: true - backend: - datasource: "prometheus" - prometheus: - enable: true - name: prometheus - batch: 100000 - maxRetry: 5 - # prometheus 查询返回最大点数query.max-samples - maxSamples: 50000000 - # prometheus并发的查询 query.max-concurrency - maxConcurrentQuery: 20 - # prometheus 回查窗口 query.lookback-delta - lookbackDeltaMinute: 2 - # 查询全量索引时时间窗口限制,降低高基数 - maxFetchAllSeriesLimitMinute: 5 - # 查询接口耗时超过多少秒就打印warning日志记录 - slowLogRecordSecond: 3 - # remote_read时,如果没有查询条件则用这条默认的ql查询 - # 注意! ql匹配series越多,造成的oom或者慢查询可能越大 - defaultFetchSeriesQl: '{__name__=~"system.*"}' - remoteWrite: - # m3db的配置 - #- name: m3db01 - # url: http://localhost:7201/api/v1/prom/remote/write - # remoteTimeoutSecond: 5 - - # prometheus的配置 - - name: prome01 - url: http://localhost:9090/api/v1/write - remoteTimeoutSecond: 5 - remoteRead: - - name: prome01 - url: http://localhost:9090/api/v1/read - remoteTimeoutSecond: 5 - - -contactKeys: - - label: "Wecom Robot Token" - key: wecom_robot_token - - label: "Dingtalk Robot Token" - key: dingtalk_robot_token - -notifyChannels: - - email - - sms - - voice - - dingtalk - - wecom diff --git a/etc/service/n9e-server.service b/etc/service/n9e-server.service index 02d345f7..9457d3da 100644 --- a/etc/service/n9e-server.service +++ b/etc/service/n9e-server.service @@ -1,13 +1,14 @@ [Unit] Description="n9e-server" +After=network.target [Service] Type=simple -ExecStart=/opt/n9e/server/n9e-server -WorkingDirectory=/opt/n9e/server -Restart=always -RestartSecs=1s +ExecStart=/root/gopath/src/n9e/n9e server +WorkingDirectory=/root/gopath/src/n9e + +Restart=on-failure SuccessExitStatus=0 LimitNOFILE=65536 StandardOutput=syslog diff --git a/etc/service/n9e-webapi.service b/etc/service/n9e-webapi.service new file mode 100644 index 00000000..8669cecc --- /dev/null +++ b/etc/service/n9e-webapi.service @@ -0,0 +1,20 @@ +[Unit] +Description="n9e-webapi" +After=network.target + +[Service] +Type=simple + +ExecStart=/root/gopath/src/n9e/n9e webapi +WorkingDirectory=/root/gopath/src/n9e + +Restart=on-failure +SuccessExitStatus=0 +LimitNOFILE=65536 +StandardOutput=syslog +StandardError=syslog +SyslogIdentifier=n9e-webapi + + +[Install] +WantedBy=multi-user.target diff --git a/etc/template/dingtalk.tpl b/etc/template/dingtalk.tpl new file mode 100644 index 00000000..95019eba --- /dev/null +++ b/etc/template/dingtalk.tpl @@ -0,0 +1,6 @@ +级别状态: S{{.Severity}} {{if .IsRecovered}}Recovered{{else}}Triggered{{end}} +规则名称: {{.RuleName}}{{if .RuleNote}} +规则备注: {{.RuleNote}}{{end}} +监控指标: {{.TagsJSON}} +触发时间: {{timeformat .TriggerTime}} +触发时值: {{.TriggerValue}} \ No newline at end of file diff --git a/etc/template/mailbody.tpl b/etc/template/mailbody.tpl new file mode 100644 index 00000000..a45e35b6 --- /dev/null +++ b/etc/template/mailbody.tpl @@ -0,0 +1,195 @@ + + + + + + 夜莺告警通知 + + + +
+
+
+

{{.RuleName}}

+

+
+ +
+ +
+ + + {{if .IsRecovered}} + + + + + {{else}} + + + + + {{end}} + + + + + + + + + + + + + + + + + + + + + + + + + + +
级别状态:S{{.Severity}} Recovered
级别状态:S{{.Severity}} Triggered
策略备注:{{.RuleNote}}
设备备注:{{.TargetNote}}
监控指标:{{.TagsJSON}}
触发时值:{{.TriggerValue}}
触发时间: + {{timeformat .TriggerTime}} +
PromQL: + {{.PromQl}} +
+ +
+ +
+ +
+
+
+
+ + \ No newline at end of file diff --git a/etc/template/subject.tpl b/etc/template/subject.tpl new file mode 100644 index 00000000..ec241bc3 --- /dev/null +++ b/etc/template/subject.tpl @@ -0,0 +1 @@ +{{if .IsRecovered}}Recovered{{else}}Triggered{{end}}: {{.RuleName}} {{.TagsJSON}} \ No newline at end of file diff --git a/etc/template/wecom.tpl b/etc/template/wecom.tpl new file mode 100644 index 00000000..1513cbd5 --- /dev/null +++ b/etc/template/wecom.tpl @@ -0,0 +1,6 @@ +**级别状态**: {{if .IsRecovered}}S{{.Severity}} Recovered{{else}}S{{.Severity}} Triggered{{end}} +**规则标题**: {{.RuleName}}{{if .RuleNote}} +**规则备注**: {{.RuleNote}}{{end}} +**监控指标**: {{.TagsJSON}} +**触发时间**: {{timeformat .TriggerTime}} +**触发时值**: {{.TriggerValue}} \ No newline at end of file diff --git a/etc/webapi.conf b/etc/webapi.conf new file mode 100644 index 00000000..652bb0c8 --- /dev/null +++ b/etc/webapi.conf @@ -0,0 +1,166 @@ +# debug, release +RunMode = "release" + +# # custom i18n dict config +# I18N = "./etc/i18n.json" + +# do not change +AdminRole = "Admin" + +# Linkage with notify.py script +NotifyChannels = [ "email", "dingtalk", "wecom" ] + +[[ContactKeys]] +Label = "Wecom Robot Token" +Key = "wecom_robot_token" + +[[ContactKeys]] +Label = "Dingtalk Robot Token" +Key = "dingtalk_robot_token" + +[Log] +# log write dir +Dir = "logs" +# log level: DEBUG INFO WARNING ERROR +Level = "DEBUG" +# stdout, stderr, file +Output = "stdout" +# # rotate by time +# KeepHours: 4 +# # rotate by size +# RotateNum = 3 +# # unit: MB +# RotateSize = 256 + +[HTTP] +# http listening address +Host = "0.0.0.0" +# http listening port +Port = 18000 +# https cert file path +CertFile = "" +# https key file path +KeyFile = "" +# whether print access log +PrintAccessLog = true +# whether enable pprof +PProf = false +# http graceful shutdown timeout, unit: s +ShutdownTimeout = 30 +# max content length: 64M +MaxContentLength = 67108864 +# http server read timeout, unit: s +ReadTimeout = 20 +# http server write timeout, unit: s +WriteTimeout = 40 +# http server idle timeout, unit: s +IdleTimeout = 120 + +[JWTAuth] +# signing key +SigningKey = "5b94a0fd640fe2765af826acfe42d151" +# unit: min +AccessExpired = 1500 +# unit: min +RefreshExpired = 10080 +RedisKeyPrefix = "/jwt/" + +[BasicAuth] +user001 = "ccc26da7b9aba533cbb263a36c07dcc5" + +[LDAP] +Enable = false +Host = "ldap.example.org" +Port = 389 +BaseDn = "dc=example,dc=org" +# AD: manange@example.org +BindUser = "cn=manager,dc=example,dc=org" +BindPass = "*******" +# openldap format e.g. (&(uid=%s)) +# AD format e.g. (&(sAMAccountName=%s)) +AuthFilter = "(&(uid=%s))" +CoverAttributes = true +TLS = false +StartTLS = true + +[LDAP.Attributes] +Nickname = "cn" +Phone = "mobile" +Email = "mail" + +[Redis] +# address, ip:port +Address = "127.0.0.1:6379" +# requirepass +Password = "" +# # db +# DB = 0 + +[Gorm] +# enable debug mode or not +Debug = true +# mysql postgres +DBType = "mysql" +# unit: s +MaxLifetime = 7200 +# max open connections +MaxOpenConns = 150 +# max idle connections +MaxIdleConns = 50 +# table prefix +TablePrefix = "" +# enable auto migrate or not +EnableAutoMigrate = false + +[MySQL] +# mysql address host:port +Address = "127.0.0.1:3306" +# mysql username +User = "root" +# mysql password +Password = "1234" +# database name +DBName = "n9e_v5" +# connection params +Parameters = "charset=utf8mb4&parseTime=True&loc=Local&allowNativePasswords=true" + +[Postgres] +# pg address host:port +Address = "127.0.0.1:5432" +# pg user +User = "root" +# pg password +Password = "1234" +# database name +DBName = "n9e_v5" +# ssl mode +SSLMode = "disable" + +[[Clusters]] +# Prometheus cluster name +Name = "Default" +# Prometheus APIs base url +Prom = "http://127.0.0.1:9090" +# Basic auth username +BasicAuthUser = "" +# Basic auth password +BasicAuthPass = "" +# timeout settings, unit: ms +Timeout = 30000 +DialTimeout = 10000 +TLSHandshakeTimeout = 30000 +ExpectContinueTimeout = 1000 +IdleConnTimeout = 90000 +# time duration, unit: ms +KeepAlive = 30000 +MaxConnsPerHost = 0 +MaxIdleConns = 100 +MaxIdleConnsPerHost = 100 + +[Ibex] +Address = "http://127.0.0.1:10090" +# basic auth +BasicAuthUser = "ibex" +BasicAuthPass = "ibex" +# unit: ms +Timeout = 3000 \ No newline at end of file diff --git a/go.mod b/go.mod index d15f6faf..3a9733b3 100644 --- a/go.mod +++ b/go.mod @@ -3,53 +3,31 @@ module github.com/didi/nightingale/v5 go 1.14 require ( - github.com/armon/go-metrics v0.3.4 // indirect - github.com/gin-contrib/gzip v0.0.3 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/structs v1.1.0 // indirect github.com/gin-contrib/pprof v1.3.0 - github.com/gin-contrib/sessions v0.0.3 - github.com/gin-gonic/gin v1.7.0 - github.com/go-kit/kit v0.10.0 - github.com/go-ldap/ldap/v3 v3.2.4 - github.com/go-sql-driver/mysql v1.5.0 - github.com/gogo/protobuf v1.3.2 - github.com/golang/snappy v0.0.3 - github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 // indirect - github.com/gorilla/sessions v1.2.0 // indirect - github.com/hashicorp/go-immutable-radix v1.2.0 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d // indirect - github.com/magiconair/properties v1.8.2 // indirect + github.com/gin-gonic/gin v1.7.4 + github.com/go-ldap/ldap/v3 v3.4.1 + github.com/go-redis/redis/v8 v8.11.3 + github.com/golang/protobuf v1.5.2 + github.com/golang/snappy v0.0.4 + github.com/google/uuid v1.3.0 + github.com/json-iterator/go v1.1.12 + github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 github.com/mattn/go-isatty v0.0.12 - github.com/n9e/agent-payload v0.0.0-20210619031503-b72325474651 - github.com/opentracing-contrib/go-stdlib v1.0.0 - github.com/opentracing/opentracing-go v1.2.0 - github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 + github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.9.0 - github.com/prometheus/common v0.17.0 - github.com/prometheus/prometheus v1.8.2-0.20210220213500-8c8de46003d1 - github.com/smartystreets/assertions v1.0.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.7.1 - github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d // indirect - github.com/toolkits/pkg v1.1.3 - github.com/ugorji/go/codec v1.1.7 - go.uber.org/atomic v1.7.0 - go.uber.org/automaxprocs v1.4.0 // indirect - golang.org/x/text v0.3.5 - gopkg.in/ini.v1 v1.51.1 // indirect - xorm.io/builder v0.3.7 - xorm.io/xorm v1.0.7 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/common v0.26.0 + github.com/prometheus/prometheus v2.5.0+incompatible + github.com/toolkits/pkg v1.2.9 + github.com/urfave/cli/v2 v2.3.0 + golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect + golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect + google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4 // indirect + google.golang.org/grpc v1.41.0 // indirect + gorm.io/driver/mysql v1.1.2 + gorm.io/driver/postgres v1.1.1 + gorm.io/gorm v1.21.15 ) - -// branch 0.9.3-pool-read-binary-3 -replace github.com/apache/thrift => github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4 - -// Fix legacy import path - https://github.com/uber-go/atomic/pull/60 -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 - -replace google.golang.org/grpc => google.golang.org/grpc v1.26.0 diff --git a/go.sum b/go.sum index 01f8a7cd..082f5c1d 100644 --- a/go.sum +++ b/go.sum @@ -1,408 +1,104 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= -gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= -github.com/Azure/azure-sdk-for-go v51.2.0+incompatible h1:qQNk//OOHK0GZcgMMgdJ4tZuuh0zcOeUkpTxjvKFpSQ= -github.com/Azure/azure-sdk-for-go v51.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ= -github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.4 h1:Xqf+7f2Vhl9tsqDYmXhnXInUdcrtgpRNpIA15/uldSc= -github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.37.8 h1:9kywcbuz6vQuTf+FD+U7FshafrHzmqUCjgAEiLuIJ8U= -github.com/aws/aws-sdk-go v1.37.8/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff/go.mod h1:+RTT1BOk5P97fT2CiHkbFQwkK3mjsFAP6zCYV2aXtjw= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414f458e1/go.mod h1:dkChI7Tbtx7H1Tj7TqGSZMOeGpMP5gLHtjroHd4agiI= -github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.57.0 h1:uCpe0sRIZ/sJWxWDsJyBPBjUfSvxop+WHkHiSf+tjjM= -github.com/digitalocean/godo v1.57.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.3+incompatible h1:+HS4XO73J41FpA260ztGujJ+0WibrA2TPJEnWNSyGNE= -github.com/docker/docker v20.10.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k= -github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0= github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0= -github.com/gin-contrib/sessions v0.0.3 h1:PoBXki+44XdJdlgDqDrY5nDVe3Wk7wDV/UCOuLP6fBI= -github.com/gin-contrib/sessions v0.0.3/go.mod h1:8C/J6cad3Il1mWYYgtw0w+hqasmpvy25mPkXdOgeB9I= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= -github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/gin-gonic/gin v1.7.4 h1:QmUZXrvJ9qZ3GfWvQ+2wnW/1ePrTEJqPKMYEU3lD/DM= +github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E= -github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-redis/redis/v8 v8.11.3 h1:GCjoYp8c+yQTJfc0n69iwSiHjvuAdruxl7elnZCxgt8= +github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= -github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -410,896 +106,349 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210208152844-1612e9be7af6/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.15.0 h1:jQeAWj0s1p83+TrUXhJhEOK4oe2g6YcBcFwEyMNIjEk= -github.com/gophercloud/gophercloud v0.15.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= -github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d h1:r4iSf+UX1tNxFJZ64FsUoOfysT7TePSbRNz4/mYGUIE= -github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.23.1 h1:SkYdCa6x458cMSDz5GI18iPz5j2hicACiDP6J/s/bTs= -github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= -github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= -github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= +github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= +github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= +github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= +github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kidstuff/mongostore v0.0.0-20181113001930-e650cd85ee4b/go.mod h1:g2nVr8KZVXJSS97Jo8pJ0jgq29P6H7dG0oplUA86MQw= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 h1:SWlt7BoQNASbhTUD0Oy5yysI2seJ7vWuGUp///OM4TM= +github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4/go.mod h1:xVfRinGzD3cYDRvMjy6RkIwM+iNL2KHNLZjT0VpVZT8= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.2 h1:znVR8Q4g7/WlcvsxLBRWvo+vtFJUAbDn3w+Yak2xVMI= -github.com/magiconair/properties v1.8.2/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= -github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= -github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/memcachier/mc v2.0.1+incompatible/go.mod h1:7bkvFE61leUBvXz+yxsOnGBQSZpBSPIMUQSmmSHvuXc= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.38 h1:MtIY+fmHUVVgv1AXzmKMWcwdCYxTRPG1EDjpqF4RCEw= -github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/n9e/agent-payload v0.0.0-20210619031503-b72325474651 h1:wxnQEQ3YjmxDYTFdzAH8Md3RxqlkmTZxudbBiruO+uQ= -github.com/n9e/agent-payload v0.0.0-20210619031503-b72325474651/go.mod h1:qglO/Vsh4hMXMbrLk5LOQtni/pCCCksIDyn3hbmWKCY= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= -github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 h1:fa50YL1pzKW+1SsBnJDOHppJN9stOEwS+CRWyUtyYGU= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc h1:Ak86L+yDSOzKFa7WM5bf5itSOo1e3Xh8bm5YCMUXIjQ= +github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA= -github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/prometheus v1.8.2-0.20210220213500-8c8de46003d1 h1:lURTtJHk4UhfkO49b+/dB1HuC8ZqyfAcyUmVnVunyc0= -github.com/prometheus/prometheus v1.8.2-0.20210220213500-8c8de46003d1/go.mod h1:MufwlAc0JavmbTIm58WuWURAZRYzoroyPmzvOacI9eY= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasoft/memstore v0.0.0-20180925164028-84a050167438/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT59RWatovFwnwocoUxiI/eENTnOY5GK3STuY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a h1:o6gDpunpOQeRPLojT1Zo6gkzwgGJWZjjtuXTZEwo6AM= -github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d h1:YN4gX82mT31qsizy2jRheOCrGLCs15VF9SV5XPuBvkQ= -github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d/go.mod h1:GVSeM7r0P1RI1gOKYyN9IuNkhMmQwKGsjVf3ulDrdzo= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/toolkits/pkg v1.1.3 h1:cjZMz9hmuTv4v7ivYERA9mWJCLKyr8JMd4S+CL/YzMM= -github.com/toolkits/pkg v1.1.3/go.mod h1:ge83E8FQqUnFk+2wtVtZ8kvbmoSjE1l8FP3f+qmR0fY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/toolkits/pkg v1.2.9 h1:zGlrJDl+2sMBoxBRIoMtAwvKmW5wctuji2+qHCecMKk= +github.com/toolkits/pkg v1.2.9/go.mod h1:ZUsQAOoaR99PSbes+RXSirvwmtd6+XIUvizCmrjfUYc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 h1:sYNJzB4J8toYPQTM6pAkcmBRgw9SnQKP9oXCHfgy604= -golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210210192628-66670185b0cd h1:2arJsLyTCJGek+eeptQ3z49Rqndm0f+zvvpwNIXWNIA= -golang.org/x/oauth2 v0.0.0-20210210192628-66670185b0cd/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4 h1:YXPV/eKW0ZWRdB5tyI6aPoaa2Wxb4OSlFrTREMdwn64= +google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1308,34 +457,21 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1345,49 +481,15 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gorm.io/driver/mysql v1.1.2 h1:OofcyE2lga734MxwcCW9uB4mWNXMr50uaGRVwQL2B0M= +gorm.io/driver/mysql v1.1.2/go.mod h1:4P/X9vSc3WTrhTLZ259cpFd6xKNYiSSdSZngkSBGIMM= +gorm.io/driver/postgres v1.1.1 h1:tWLmqYCyaoh89fi7DhM6QggujrOnmfo3H98AzgNAAu0= +gorm.io/driver/postgres v1.1.1/go.mod h1:tpe2xN7aCst1NUdYyWQyxPtnHC+Zfp6NEux9PXD1OU0= +gorm.io/gorm v1.21.12/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= +gorm.io/gorm v1.21.15 h1:gAyaDoPw0lCyrSFWhBlahbUA1U4P5RViC1uIqoB+1Rk= +gorm.io/gorm v1.21.15/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= -k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= -k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ= -k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= -k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= -xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= -xorm.io/xorm v1.0.7 h1:26yBTDVI+CfQpVz2Y88fISh+aiJXIPP4eNoTJlwzsC4= -xorm.io/xorm v1.0.7/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= diff --git a/http/http_funcs.go b/http/http_funcs.go deleted file mode 100644 index 61590e50..00000000 --- a/http/http_funcs.go +++ /dev/null @@ -1,425 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/pkg/i18n" - "github.com/didi/nightingale/v5/pkg/ierr" -) - -const defaultLimit = 20 - -func _e(format string, a ...interface{}) error { - return fmt.Errorf(_s(format, a...)) -} - -func _s(format string, a ...interface{}) string { - return i18n.Sprintf(format, a...) -} - -func dangerous(v interface{}, code ...int) { - ierr.Dangerous(v, code...) -} - -func bomb(code int, format string, a ...interface{}) { - ierr.Bomb(code, _s(format, a...)) -} - -func bind(c *gin.Context, ptr interface{}) { - dangerous(c.ShouldBindJSON(ptr), http.StatusBadRequest) -} - -func urlParamStr(c *gin.Context, field string) string { - val := c.Param(field) - - if val == "" { - bomb(http.StatusBadRequest, "url param[%s] is blank", field) - } - - return val -} - -func urlParamInt64(c *gin.Context, field string) int64 { - strval := urlParamStr(c, field) - intval, err := strconv.ParseInt(strval, 10, 64) - if err != nil { - bomb(http.StatusBadRequest, "cannot convert %s to int64", strval) - } - - return intval -} - -func urlParamInt(c *gin.Context, field string) int { - return int(urlParamInt64(c, field)) -} - -func queryStr(c *gin.Context, key string, defaultVal ...string) string { - val := c.Query(key) - if val != "" { - return val - } - - if len(defaultVal) == 0 { - bomb(http.StatusBadRequest, "query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt(c *gin.Context, key string, defaultVal ...int) int { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.Atoi(strv) - if err != nil { - bomb(http.StatusBadRequest, "cannot convert [%s] to int", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb(http.StatusBadRequest, "query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryInt64(c *gin.Context, key string, defaultVal ...int64) int64 { - strv := c.Query(key) - if strv != "" { - intv, err := strconv.ParseInt(strv, 10, 64) - if err != nil { - bomb(http.StatusBadRequest, "cannot convert [%s] to int64", strv) - } - return intv - } - - if len(defaultVal) == 0 { - bomb(http.StatusBadRequest, "query param[%s] is necessary", key) - } - - return defaultVal[0] -} - -func queryBool(c *gin.Context, key string, defaultVal ...bool) bool { - strv := c.Query(key) - if strv != "" { - if strv == "true" || strv == "1" || strv == "on" || strv == "checked" || strv == "yes" || strv == "Y" { - return true - } else if strv == "false" || strv == "0" || strv == "off" || strv == "no" || strv == "N" { - return false - } else { - bomb(http.StatusBadRequest, "unknown arg[%s] value: %s", key, strv) - } - } - - if len(defaultVal) == 0 { - bomb(http.StatusBadRequest, "arg[%s] is necessary", key) - } - - return defaultVal[0] -} - -func offset(c *gin.Context, limit int) int { - if limit <= 0 { - limit = 10 - } - - page := queryInt(c, "p", 1) - return (page - 1) * limit -} - -func renderMessage(c *gin.Context, v interface{}, statusCode ...int) { - code := 200 - if len(statusCode) > 0 { - code = statusCode[0] - } - if v == nil { - c.JSON(code, gin.H{"err": ""}) - return - } - - switch t := v.(type) { - case string: - c.JSON(code, gin.H{"err": _s(t)}) - case error: - c.JSON(code, gin.H{"err": t.Error()}) - } -} - -func renderData(c *gin.Context, data interface{}, err error, statusCode ...int) { - code := 200 - if len(statusCode) > 0 { - code = statusCode[0] - } - - if err == nil { - c.JSON(code, gin.H{"dat": data, "err": ""}) - return - } - - renderMessage(c, err.Error(), code) -} - -func renderZeroPage(c *gin.Context) { - renderData(c, gin.H{ - "list": []int{}, - "total": 0, - }, nil) -} - -type idsForm struct { - Ids []int64 `json:"ids"` -} - -func (f idsForm) Validate() { - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids empty") - } -} - -func cookieUsername(c *gin.Context) string { - session := sessions.Default(c) - - value := session.Get("username") - if value == nil { - return "" - } - - return value.(string) -} - -func headerUsername(c *gin.Context) string { - token := c.GetHeader("Authorization") - if token == "" { - return "" - } - - ut, err := models.UserTokenGet("token=?", strings.TrimPrefix(token, "Bearer ")) - if err != nil { - return "" - } - - if ut == nil { - return "" - } - - return ut.Username -} - -// must get username -func loginUsername(c *gin.Context) string { - usernameInterface, has := c.Get("username") - if has { - return usernameInterface.(string) - } - - username := cookieUsername(c) - if username == "" { - username = headerUsername(c) - } - - if username == "" { - remoteAddr := c.Request.RemoteAddr - idx := strings.LastIndex(remoteAddr, ":") - ip := "" - if idx > 0 { - ip = remoteAddr[0:idx] - } - - if (ip == "127.0.0.1" || ip == "[::1]") && c.GetHeader("X-Local") == "1" { - //本地调用都当成是root用户在调用 - username = "root" - } - } - - if username == "" { - ierr.Bomb(http.StatusUnauthorized, "unauthorized") - } - - c.Set("username", username) - return username -} - -func loginUser(c *gin.Context) *models.User { - username := loginUsername(c) - - user, err := models.UserGetByUsername(username) - dangerous(err) - - if user == nil { - ierr.Bomb(http.StatusUnauthorized, "unauthorized") - } - - if user.Status == 1 { - ierr.Bomb(http.StatusUnauthorized, "unauthorized") - } - - return user -} - -func User(id int64) *models.User { - obj, err := models.UserGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such user") - } - - return obj -} - -func UserGroup(id int64) *models.UserGroup { - obj, err := models.UserGroupGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such user group") - } - - return obj -} - -func Classpath(id int64) *models.Classpath { - obj, err := models.ClasspathGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such classpath") - } - - return obj -} - -func Mute(id int64) *models.Mute { - obj, err := models.MuteGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such mute config") - } - - return obj -} - -func Dashboard(id int64) *models.Dashboard { - obj, err := models.DashboardGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such dashboard") - } - - return obj -} - -func ChartGroup(id int64) *models.ChartGroup { - obj, err := models.ChartGroupGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such chart group") - } - - return obj -} - -func Chart(id int64) *models.Chart { - obj, err := models.ChartGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such chart") - } - - return obj -} - -func AlertRule(id int64) *models.AlertRule { - obj, err := models.AlertRuleGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such alert rule") - } - - return obj -} - -func AlertRuleGroup(id int64) *models.AlertRuleGroup { - obj, err := models.AlertRuleGroupGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such alert rule group") - } - - return obj -} - -func AlertEvent(id int64) *models.AlertEvent { - obj, err := models.AlertEventGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such alert event") - } - - return obj -} - -func HistoryAlertEvent(id int64) *models.HistoryAlertEvent { - obj, err := models.HistoryAlertEventGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such alert all event") - } - - return obj -} - -func CollectRule(id int64) *models.CollectRule { - obj, err := models.CollectRuleGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such collect rule") - } - - return obj -} - -func MetricDescription(id int64) *models.MetricDescription { - obj, err := models.MetricDescriptionGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such metric description") - } - - return obj -} - -func Resource(id int64) *models.Resource { - obj, err := models.ResourceGet("id=?", id) - dangerous(err) - - if obj == nil { - bomb(http.StatusNotFound, "No such resource") - } - - classpathResources, err := models.ClasspathResourceGets("res_ident=?", obj.Ident) - dangerous(err) - for _, cr := range classpathResources { - obj.ClasspathIds = append(obj.ClasspathIds, cr.ClasspathId) - } - - return obj -} diff --git a/http/http_middle.go b/http/http_middle.go deleted file mode 100644 index d7f51f04..00000000 --- a/http/http_middle.go +++ /dev/null @@ -1,43 +0,0 @@ -package http - -import ( - "net/http" - "strings" - - "github.com/didi/nightingale/v5/pkg/ierr" - "github.com/gin-gonic/gin" -) - -func login() gin.HandlerFunc { - return func(c *gin.Context) { - username := loginUsername(c) - c.Set("username", username) - // 这里调用loginUser主要是为了判断当前用户是否被disable了 - loginUser(c) - c.Next() - } -} - -func admin() gin.HandlerFunc { - return func(c *gin.Context) { - username := loginUsername(c) - c.Set("username", username) - - user := loginUser(c) - - roles := strings.Fields(user.RolesForDB) - found := false - for i := 0; i < len(roles); i++ { - if roles[i] == "Admin" { - found = true - break - } - } - - if !found { - ierr.Bomb(http.StatusForbidden, "forbidden") - } - - c.Next() - } -} diff --git a/http/http_server.go b/http/http_server.go deleted file mode 100644 index fbebe073..00000000 --- a/http/http_server.go +++ /dev/null @@ -1,108 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/gin-contrib/sessions" - "github.com/gin-contrib/sessions/cookie" - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/pkg/iaop" -) - -var srv = &http.Server{ - ReadTimeout: 30 * time.Second, - WriteTimeout: 30 * time.Second, - MaxHeaderBytes: 1 << 30, -} - -var skipPaths = []string{ - "/api/n9e/auth/login", - "/api/n9e/self/password", - "/api/n9e/push", - "/v1/n9e/series", -} - -func Start() { - c := config.Config - - loggerMid := iaop.LoggerWithConfig(iaop.LoggerConfig{SkipPaths: skipPaths}) - recoveryMid := iaop.Recovery() - - if strings.ToLower(c.HTTP.Mode) == "release" { - gin.SetMode(gin.ReleaseMode) - iaop.DisableConsoleColor() - } - - r := gin.New() - r.Use(recoveryMid) - - // whether print access log - if c.HTTP.Access { - r.Use(loggerMid) - } - - // use cookie to save session - store := cookie.NewStore([]byte(config.Config.HTTP.CookieSecret)) - store.Options(sessions.Options{ - Domain: config.Config.HTTP.CookieDomain, - MaxAge: config.Config.HTTP.CookieMaxAge, - Secure: config.Config.HTTP.CookieSecure, - HttpOnly: config.Config.HTTP.CookieHttpOnly, - Path: "/", - }) - session := sessions.Sessions(config.Config.HTTP.CookieName, store) - r.Use(session) - - configRoutes(r) - configNoRoute(r) - - srv.Addr = c.HTTP.Listen - srv.Handler = r - - go func() { - fmt.Println("http.listening:", srv.Addr) - if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - fmt.Printf("listening %s occur error: %s\n", srv.Addr, err) - os.Exit(3) - } - }() -} - -// Shutdown http server -func Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - fmt.Println("cannot shutdown http server:", err) - os.Exit(2) - } - - // catching ctx.Done(). timeout of 5 seconds. - select { - case <-ctx.Done(): - fmt.Println("shutdown http server timeout of 5 seconds.") - default: - fmt.Println("http server stopped") - } -} - -func configNoRoute(r *gin.Engine) { - r.NoRoute(func(c *gin.Context) { - arr := strings.Split(c.Request.URL.Path, ".") - suffix := arr[len(arr)-1] - switch suffix { - case "png", "jpeg", "jpg", "svg", "ico", "gif", "css", "js", "html", "htm", "gz", "map": - c.File(path.Join(strings.Split("pub/"+c.Request.URL.Path, "/")...)) - default: - c.File(path.Join("pub", "index.html")) - } - }) -} diff --git a/http/router.go b/http/router.go deleted file mode 100644 index d9bbb89e..00000000 --- a/http/router.go +++ /dev/null @@ -1,220 +0,0 @@ -package http - -import ( - "fmt" - "os" - - "github.com/gin-contrib/gzip" - "github.com/gin-contrib/pprof" - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/config" -) - -func configRoutes(r *gin.Engine) { - /* - csrfMid := csrf.Middleware(csrf.Options{ - Secret: config.Config.HTTP.CsrfSecret, - ErrorFunc: func(c *gin.Context) { - c.JSON(452, gin.H{"err": "csrf token mismatch"}) - c.Abort() - }, - }) - */ - - if config.Config.HTTP.Pprof { - pprof.Register(r, "/api/debug/pprof") - } - - guest := r.Group("/api/n9e") - { - guest.GET("/ping", func(c *gin.Context) { - c.String(200, "pong") - }) - guest.GET("/pid", func(c *gin.Context) { - c.String(200, fmt.Sprintf("%d", os.Getpid())) - }) - guest.GET("/addr", func(c *gin.Context) { - c.String(200, c.Request.RemoteAddr) - }) - guest.GET("/version", func(c *gin.Context) { - c.String(200, config.Version) - }) - - guest.POST("/auth/login", loginPost) - guest.GET("/auth/logout", logoutGet) - - // 开源版本,为了支持图表分享功能,允许匿名查询数据 - guest.POST("/query", GetData) - guest.POST("/instant-query", GetDataInstant) - guest.POST("/tag-pairs", GetTagPairs) - guest.POST("/tag-keys", GetTagKeys) - guest.POST("/tag-values", GetTagValues) - guest.POST("/tag-metrics", GetMetrics) - guest.GET("/check-promql", checkPromeQl) - } - - // for brower, expose location in nginx.conf - pages := r.Group("/api/n9e") - - { - - pages.GET("/csrf", func(c *gin.Context) { - // renderData(c, csrf.GetToken(c), nil) - renderData(c, "not supported", nil) - }) - - pages.GET("/roles", rolesGet) - pages.GET("/self/profile", selfProfileGet) - pages.PUT("/self/profile", selfProfilePut) - pages.PUT("/self/password", selfPasswordPut) - pages.GET("/self/token", selfTokenGets) - pages.POST("/self/token", selfTokenPost) - pages.PUT("/self/token", selfTokenPut) - pages.GET("/users", login(), userGets) - pages.POST("/users", admin(), userAddPost) - pages.GET("/user/:id/profile", login(), userProfileGet) - pages.PUT("/user/:id/profile", admin(), userProfilePut) - pages.PUT("/user/:id/status", admin(), userStatusPut) - pages.PUT("/user/:id/password", admin(), userPasswordPut) - pages.DELETE("/user/:id", admin(), userDel) - - pages.GET("/user-groups", login(), userGroupListGet) - pages.GET("/user-groups/mine", login(), userGroupMineGet) - pages.POST("/user-groups", login(), userGroupAdd) - pages.PUT("/user-group/:id", login(), userGroupPut) - pages.GET("/user-group/:id", login(), userGroupGet) - pages.POST("/user-group/:id/members", login(), userGroupMemberAdd) - pages.DELETE("/user-group/:id/members", login(), userGroupMemberDel) - pages.DELETE("/user-group/:id", login(), userGroupDel) - - pages.GET("/classpaths", login(), classpathListGets) - pages.GET("/classpaths/tree-node/:id", login(), classpathListNodeGetsById) - pages.POST("/classpaths", login(), classpathAdd) - pages.PUT("/classpath/:id", login(), classpathPut) - pages.DELETE("/classpath/:id", login(), classpathDel) - pages.POST("/classpath/:id/resources", login(), classpathAddResources) - pages.DELETE("/classpath/:id/resources", login(), classpathDelResources) - pages.GET("/classpath/:id/resources", login(), classpathGetsResources) - - pages.GET("/classpaths/favorites", login(), classpathFavoriteGet) - pages.POST("/classpath/:id/favorites", login(), classpathFavoriteAdd) - pages.DELETE("/classpath/:id/favorites", login(), classpathFavoriteDel) - - pages.GET("/resources", login(), resourcesQuery) - pages.PUT("/resources/note", resourceNotePut) - pages.PUT("/resources/tags", resourceTagsPut) - pages.PUT("/resources/classpaths", resourceClasspathsPut) - pages.PUT("/resources/mute", resourceMutePut) - pages.GET("/resource/:id", login(), resourceGet) - pages.DELETE("/resource/:id", login(), resourceDel) - - pages.GET("/mutes", login(), muteGets) - pages.POST("/mutes", login(), muteAdd) - pages.GET("/mute/:id", login(), muteGet) - pages.DELETE("/mute/:id", login(), muteDel) - - pages.GET("/dashboards", login(), dashboardGets) - pages.POST("/dashboards", login(), dashboardAdd) - pages.POST("/dashboards-clone", login(), dashboardClone) - pages.POST("/dashboards/import", login(), dashboardImport) - pages.POST("/dashboards/export", login(), dashboardExport) - pages.GET("/dashboard/:id", login(), dashboardGet) - pages.PUT("/dashboard/:id", login(), dashboardPut) - pages.DELETE("/dashboard/:id", login(), dashboardDel) - pages.POST("/dashboard/:id/favorites", login(), dashboardFavoriteAdd) - pages.DELETE("/dashboard/:id/favorites", login(), dashboardFavoriteDel) - pages.GET("/dashboard/:id/chart-groups", login(), chartGroupGets) - pages.POST("/dashboard/:id/chart-groups", login(), chartGroupAdd) - - pages.PUT("/chart-groups", login(), chartGroupsPut) - pages.DELETE("/chart-group/:id", login(), chartGroupDel) - pages.GET("/chart-group/:id/charts", login(), chartGets) - pages.POST("/chart-group/:id/charts", login(), chartAdd) - pages.PUT("/chart/:id", login(), chartPut) - pages.DELETE("/chart/:id", login(), chartDel) - pages.PUT("/charts/configs", login(), chartConfigsPut) - pages.GET("/charts/tmps", chartTmpGets) - pages.POST("/charts/tmps", login(), chartTmpAdd) - - pages.GET("/alert-rule-groups", login(), alertRuleGroupGets) - pages.GET("/alert-rule-groups/favorites", login(), alertRuleGroupFavoriteGet) - pages.POST("/alert-rule-groups", login(), alertRuleGroupAdd) - pages.GET("/alert-rule-group/:id", login(), alertRuleGroupGet) - pages.GET("/alert-rule-group/:id/alert-rules", login(), alertRuleOfGroupGet) - pages.DELETE("/alert-rule-group/:id/alert-rules", login(), alertRuleOfGroupDel) - pages.PUT("/alert-rule-group/:id", login(), alertRuleGroupPut) - pages.DELETE("/alert-rule-group/:id", login(), alertRuleGroupDel) - pages.POST("/alert-rule-group/:id/favorites", login(), alertRuleGroupFavoriteAdd) - pages.DELETE("/alert-rule-group/:id/favorites", login(), alertRuleGroupFavoriteDel) - - pages.POST("/alert-rules", login(), alertRuleAdd) - pages.PUT("/alert-rules/status", login(), alertRuleStatusPut) - pages.PUT("/alert-rules/notify-groups", login(), alertRuleNotifyGroupsPut) - pages.PUT("/alert-rules/notify-channels", login(), alertRuleNotifyChannelsPut) - pages.PUT("/alert-rules/append-tags", login(), alertRuleAppendTagsPut) - pages.GET("/alert-rule/:id", login(), alertRuleGet) - pages.PUT("/alert-rule/:id", login(), alertRulePut) - pages.DELETE("/alert-rule/:id", login(), alertRuleDel) - - pages.GET("/alert-events", login(), alertEventGets) - pages.DELETE("/alert-events", login(), alertEventsDel) - pages.GET("/alert-event/:id", login(), alertEventGet) - pages.DELETE("/alert-event/:id", login(), alertEventDel) - // pages.PUT("/alert-event/:id", login(), alertEventNotePut) - - pages.GET("/history-alert-events", login(), historyAlertEventGets) - pages.GET("/history-alert-event/:id", login(), historyAlertEventGet) - - pages.GET("/classpath/:id/collect-rules", login(), collectRuleGets) - pages.POST("/collect-rules", login(), collectRuleAdd) - pages.DELETE("/collect-rules", login(), collectRuleDel) - pages.PUT("/collect-rule/:id", login(), collectRulePut) - pages.POST("/log/check", regExpCheck) - - pages.GET("/metric-descriptions", metricDescriptionGets) - pages.POST("/metric-descriptions", login(), metricDescriptionAdd) - pages.DELETE("/metric-descriptions", login(), metricDescriptionDel) - pages.PUT("/metric-description/:id", login(), metricDescriptionPut) - - pages.GET("/contact-channels", contactChannelsGet) - pages.GET("/notify-channels", notifyChannelsGet) - - pages.GET("/tpl/list", tplNameGets) - pages.GET("/tpl/content", tplGet) - - pages.GET("/status", Status) - - } - - // for brower, expose location in nginx.conf - pagesV2 := r.Group("/api/n9e/v2") - { - pagesV2.POST("/collect-rules", login(), collectRulesAdd) - } - - // for thirdparty, do not expose location in nginx.conf - v1 := r.Group("/v1/n9e") - { - v1.POST("/query", GetData) - v1.POST("/instant-query", GetDataInstant) - v1.POST("/tag-keys", GetTagKeys) - v1.POST("/tag-values", GetTagValues) - v1.POST("/tag-pairs", GetTagPairs) - v1.POST("/tag-metrics", GetMetrics) - v1.POST("/push", PushData) - v1.GET("/collect-rules-belong-to-ident", collectRuleGetsByIdent) - v1.GET("/collect-rules-summary", collectRuleSummaryGetByIdent) - - v1.GET("/can-do-op-by-name", login(), canDoOpByName) - v1.GET("/can-do-op-by-token", login(), canDoOpByToken) - v1.GET("/get-user-by-name", login(), getUserByName) - v1.GET("/get-user-by-token", login(), getUserByToken) - } - - push := r.Group("/v1/n9e/series").Use(gzip.Gzip(gzip.DefaultCompression)) - { - push.POST("", PushSeries) - } - -} diff --git a/http/router_alert_event.go b/http/router_alert_event.go deleted file mode 100644 index 282f8e42..00000000 --- a/http/router_alert_event.go +++ /dev/null @@ -1,82 +0,0 @@ -package http - -import ( - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func alertEventGets(c *gin.Context) { - stime := queryInt64(c, "stime", 0) - etime := queryInt64(c, "etime", 0) - hours := queryInt64(c, "hours", 0) - now := time.Now().Unix() - if hours != 0 { - stime = now - 3600*hours - etime = now + 3600*24 - } - - if stime != 0 && etime == 0 { - etime = now + 3600*24 - } - - query := queryStr(c, "query", "") - priority := queryInt(c, "priority", -1) - status := queryInt(c, "status", -1) - limit := queryInt(c, "limit", defaultLimit) - - total, err := models.AlertEventTotal(stime, etime, query, status, priority) - dangerous(err) - - list, err := models.AlertEventGets(stime, etime, query, status, priority, limit, offset(c, limit)) - dangerous(err) - - for i := 0; i < len(list); i++ { - dangerous(list[i].FillObjs()) - } - - if len(list) == 0 { - renderZeroPage(c) - return - } - - renderData(c, map[string]interface{}{ - "total": total, - "list": list, - }, nil) -} - -func alertEventGet(c *gin.Context) { - ae := AlertEvent(urlParamInt64(c, "id")) - dangerous(ae.FillObjs()) - renderData(c, ae, nil) -} - -type alertEventNoteForm struct { - EventNote string `json:"event_note"` -} - -// func alertEventNotePut(c *gin.Context) { -// var f alertEventNoteForm -// bind(c, &f) - -// me := loginUser(c).MustPerm("alert_event_modify") -// ae := AlertEvent(urlParamInt64(c, "id")) - -// renderMessage(c, models.AlertEventUpdateEventNote(ae.Id, ae.HashId, f.EventNote, me.Id)) -// } - -func alertEventDel(c *gin.Context) { - loginUser(c).MustPerm("alert_event_delete") - renderMessage(c, AlertEvent(urlParamInt64(c, "id")).Del()) -} - -func alertEventsDel(c *gin.Context) { - var f idsForm - bind(c, &f) - f.Validate() - loginUser(c).MustPerm("alert_event_delete") - renderMessage(c, models.AlertEventsDel(f.Ids)) -} diff --git a/http/router_alert_rule.go b/http/router_alert_rule.go deleted file mode 100644 index 48762ca3..00000000 --- a/http/router_alert_rule.go +++ /dev/null @@ -1,351 +0,0 @@ -package http - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/models" -) - -func alertRuleGet(c *gin.Context) { - alertRule := AlertRule(urlParamInt64(c, "id")) - alertRuleFillUserAndGroups(alertRule) - renderData(c, alertRule, nil) -} - -type alertRuleForm struct { - GroupId int64 `json:"group_id"` - Name string `json:"name"` - Note string `json:"note"` - Type int `json:"type"` - Status int `json:"status"` - Expression json.RawMessage `json:"expression"` - AppendTags string `json:"append_tags"` - EnableStime string `json:"enable_stime"` - EnableEtime string `json:"enable_etime"` - EnableDaysOfWeek string `json:"enable_days_of_week"` - AlertDuration int `json:"alert_duration"` - RecoveryNotify int `json:"recovery_notify"` - Priority int `json:"priority"` - NotifyChannels string `json:"notify_channels"` - NotifyGroups string `json:"notify_groups"` - NotifyUsers string `json:"notify_users"` - Callbacks string `json:"callbacks"` - RunbookUrl string `json:"runbook_url"` -} - -func alertRuleAdd(c *gin.Context) { - var f []alertRuleForm - bind(c, &f) - - me := loginUser(c).MustPerm("alert_rule_create") - var ids []int64 - for _, alertRule := range f { - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - - ar := models.AlertRule{ - GroupId: alertRule.GroupId, - Name: alertRule.Name, - Type: alertRule.Type, - Note: alertRule.Note, - Status: alertRule.Status, - Expression: alertRule.Expression, - AlertDuration: alertRule.AlertDuration, - AppendTags: alertRule.AppendTags, - EnableStime: alertRule.EnableStime, - EnableEtime: alertRule.EnableEtime, - EnableDaysOfWeek: alertRule.EnableDaysOfWeek, - RecoveryNotify: alertRule.RecoveryNotify, - Priority: alertRule.Priority, - NotifyChannels: alertRule.NotifyChannels, - NotifyGroups: alertRule.NotifyGroups, - NotifyUsers: alertRule.NotifyUsers, - Callbacks: alertRule.Callbacks, - RunbookUrl: alertRule.RunbookUrl, - CreateBy: me.Username, - UpdateBy: me.Username, - } - dangerous(ar.Add()) - ids = append(ids, ar.Id) - } - - renderData(c, ids, nil) -} - -func alertRulePut(c *gin.Context) { - var f alertRuleForm - bind(c, &f) - - me := loginUser(c).MustPerm("alert_rule_modify") - ar := AlertRule(urlParamInt64(c, "id")) - arg := AlertRuleGroup(ar.GroupId) - alertRuleWritePermCheck(arg, me) - - if ar.Name != f.Name { - num, err := models.AlertRuleCount("group_id=? and name=? and id<>?", ar.GroupId, f.Name, ar.Id) - dangerous(err) - - if num > 0 { - bomb(200, "Alert rule %s already exists", f.Name) - } - } - - ar.Name = f.Name - ar.Note = f.Note - ar.Type = f.Type - ar.Status = f.Status - ar.AlertDuration = f.AlertDuration - ar.Expression = f.Expression - ar.AppendTags = f.AppendTags - ar.EnableStime = f.EnableStime - ar.EnableEtime = f.EnableEtime - ar.EnableDaysOfWeek = f.EnableDaysOfWeek - ar.RecoveryNotify = f.RecoveryNotify - ar.Priority = f.Priority - ar.NotifyChannels = f.NotifyChannels - ar.NotifyGroups = f.NotifyGroups - ar.NotifyUsers = f.NotifyUsers - ar.Callbacks = f.Callbacks - ar.RunbookUrl = f.RunbookUrl - ar.CreateBy = me.Username - ar.UpdateAt = time.Now().Unix() - ar.UpdateBy = me.Username - - renderMessage(c, ar.Update( - "name", - "note", - "type", - "status", - "alert_duration", - "expression", - "res_filters", - "tags_filters", - "append_tags", - "enable_stime", - "enable_etime", - "enable_days_of_week", - "recovery_notify", - "priority", - "notify_channels", - "notify_groups", - "notify_users", - "callbacks", - "runbook_url", - "update_at", - "update_by", - )) -} - -type alertRuleStatusForm struct { - Ids []int64 `json:"ids"` - Status int `json:"status"` -} - -func alertRuleStatusPut(c *gin.Context) { - var f alertRuleStatusForm - bind(c, &f) - me := loginUser(c).MustPerm("alert_rule_modify") - - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - for _, id := range f.Ids { - alertRule := AlertRule(id) - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - } - - renderMessage(c, models.AlertRuleUpdateStatus(f.Ids, f.Status, me.Username)) -} - -type alertRuleNotifyGroupsForm struct { - Ids []int64 `json:"ids"` - NotifyGroups string `json:"notify_groups"` - NotifyUsers string `json:"notify_users"` -} - -func alertRuleNotifyGroupsPut(c *gin.Context) { - var f alertRuleNotifyGroupsForm - bind(c, &f) - //用户有修改告警策略的权限 - me := loginUser(c).MustPerm("alert_rule_modify") - //id不存在 - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - for _, id := range f.Ids { - alertRule := AlertRule(id) - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - } - - renderMessage(c, models.AlertRuleUpdateNotifyGroups(f.Ids, f.NotifyGroups, f.NotifyUsers, me.Username)) -} - -type alertRuleNotifyChannelsForm struct { - Ids []int64 `json:"ids"` - NotifyChannels string `json:"notify_channels"` -} - -func alertRuleNotifyChannelsPut(c *gin.Context) { - var f alertRuleNotifyChannelsForm - bind(c, &f) - me := loginUser(c).MustPerm("alert_rule_modify") - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - for _, id := range f.Ids { - alertRule := AlertRule(id) - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - } - - renderMessage(c, models.AlertRuleUpdateNotifyChannels(f.Ids, f.NotifyChannels, me.Username)) -} - -type alertRuleAppendTagsForm struct { - Ids []int64 `json:"ids"` - AppendTags string `json:"append_tags"` -} - -func alertRuleAppendTagsPut(c *gin.Context) { - var f alertRuleAppendTagsForm - bind(c, &f) - me := loginUser(c).MustPerm("alert_rule_modify") - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - for _, id := range f.Ids { - alertRule := AlertRule(id) - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - } - - renderMessage(c, models.AlertRuleUpdateAppendTags(f.Ids, f.AppendTags, me.Username)) -} - -func alertRuleDel(c *gin.Context) { - me := loginUser(c).MustPerm("alert_rule_delete") - alertRule := AlertRule(urlParamInt64(c, "id")) - arg := AlertRuleGroup(alertRule.GroupId) - alertRuleWritePermCheck(arg, me) - - renderMessage(c, alertRule.Del()) -} - -func notifyChannelsGet(c *gin.Context) { - renderData(c, config.Config.NotifyChannels, nil) -} - -func alertRuleFillUserAndGroups(alertRule *models.AlertRule) { - uidStrs := strings.Fields(alertRule.NotifyUsers) - userlen := len(uidStrs) - users := make([]*models.User, 0, userlen) - if userlen > 0 { - // 是否有用户已经被删除的情况出现 - userMiss := false - - for _, uidStr := range uidStrs { - uid, err := strconv.ParseInt(uidStr, 10, 64) - if err != nil { - userMiss = true - continue - } - - user := cache.UserCache.GetById(uid) - if user != nil { - users = append(users, user) - continue - } - - // uid在cache里找不到,可能是还没来得及缓存,也可能是被删除了 - // 去查一下数据库,如果确实找不到了,就更新一下 - user, err = models.UserGetById(uid) - if err != nil { - logger.Error("UserGetById fail:", err) - continue - } - - if user != nil { - users = append(users, user) - } else { - userMiss = true - } - } - - if userMiss { - userIdsNew := make([]string, len(users)) - for i := 0; i < len(users); i++ { - userIdsNew[i] = fmt.Sprint(users[i].Id) - } - - alertRule.NotifyUsers = strings.Join(userIdsNew, " ") - alertRule.UpdateAt = time.Now().Unix() - alertRule.Update("notify_users", "update_at") - } - } - - // 最终存活的user列表,赋值给alertRule - alertRule.NotifyUsersDetail = users - - gidStrs := strings.Fields(alertRule.NotifyGroups) - grplen := len(gidStrs) - grps := make([]*models.UserGroup, 0, grplen) - - if grplen > 0 { - grpMiss := false - - for _, gidStr := range gidStrs { - gid, err := strconv.ParseInt(gidStr, 10, 64) - if err != nil { - grpMiss = true - continue - } - - grp := cache.UserGroupCache.GetBy(gid) - if grp != nil { - grps = append(grps, grp) - continue - } - - grp, err = models.UserGroupGet("id=?", gid) - if err != nil { - logger.Error("UserGroupGet fail:", err) - continue - } - - if grp != nil { - grps = append(grps, grp) - } else { - grpMiss = true - } - } - - if grpMiss { - grpIdsNew := make([]string, len(grps)) - for i := 0; i < len(grps); i++ { - grpIdsNew[i] = fmt.Sprint(grps[i].Id) - } - - alertRule.NotifyGroups = strings.Join(grpIdsNew, " ") - alertRule.UpdateAt = time.Now().Unix() - alertRule.Update("notify_groups", "update_at") - } - } - - alertRule.NotifyGroupsDetail = grps -} diff --git a/http/router_alert_rule_group.go b/http/router_alert_rule_group.go deleted file mode 100644 index 04ae8d57..00000000 --- a/http/router_alert_rule_group.go +++ /dev/null @@ -1,191 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" -) - -func alertRuleGroupGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.AlertRuleGroupTotal(query) - dangerous(err) - - list, err := models.AlertRuleGroupGets(query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -func alertRuleGroupFavoriteGet(c *gin.Context) { - lst, err := loginUser(c).FavoriteAlertRuleGroups() - renderData(c, lst, err) -} - -type alertRuleGroupForm struct { - Name string `json:"name"` - UserGroupIds string `json:"user_group_ids"` -} - -func alertRuleGroupAdd(c *gin.Context) { - var f alertRuleGroupForm - bind(c, &f) - - me := loginUser(c).MustPerm("alert_rule_group_create") - - arg := models.AlertRuleGroup{ - Name: f.Name, - UserGroupIds: f.UserGroupIds, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - err := arg.Add() - if err == nil { - // 我创建的,顺便设置为我关注的 - models.AlertRuleGroupFavoriteAdd(arg.Id, me.Id) - } - - renderMessage(c, err) -} - -func alertRuleGroupGet(c *gin.Context) { - alertRuleGroup := AlertRuleGroup(urlParamInt64(c, "id")) - alertRuleGroup.FillUserGroups() - renderData(c, alertRuleGroup, nil) -} - -func alertRuleOfGroupGet(c *gin.Context) { - ars, err := models.AlertRulesOfGroup(urlParamInt64(c, "id")) - for i := range ars { - alertRuleFillUserAndGroups(&ars[i]) - } - - renderData(c, ars, err) -} - -func alertRuleOfGroupDel(c *gin.Context) { - var f idsForm - bind(c, &f) - f.Validate() - - me := loginUser(c).MustPerm("alert_rule_delete") - - // 可能大部分alert_rule都来自同一个alert_rule_group,所以权限判断可以无需重复判断 - cachePerm := make(map[string]struct{}) - - for i := 0; i < len(f.Ids); i++ { - ar := AlertRule(f.Ids[i]) - - cacheKey := fmt.Sprintf("%d,%d", f.Ids[i], ar.GroupId) - if _, has := cachePerm[cacheKey]; has { - continue - } - - arg := AlertRuleGroup(ar.GroupId) - alertRuleWritePermCheck(arg, me) - cachePerm[cacheKey] = struct{}{} - } - - renderMessage(c, models.AlertRulesDel(f.Ids)) -} - -func alertRuleGroupPut(c *gin.Context) { - var f alertRuleGroupForm - bind(c, &f) - - me := loginUser(c).MustPerm("alert_rule_group_modify") - arg := AlertRuleGroup(urlParamInt64(c, "id")) - alertRuleWritePermCheck(arg, me) - - if arg.Name != f.Name { - num, err := models.AlertRuleGroupCount("name=? and id<>?", f.Name, arg.Id) - dangerous(err) - - if num > 0 { - bomb(200, "AlertRuleGroup %s already exists", f.Name) - } - } - - arg.Name = f.Name - arg.UserGroupIds = f.UserGroupIds - arg.UpdateBy = me.Username - arg.UpdateAt = time.Now().Unix() - - renderMessage(c, arg.Update("name", "update_by", "update_at", "user_group_ids")) -} - -func alertRuleGroupDel(c *gin.Context) { - me := loginUser(c).MustPerm("alert_rule_group_delete") - arg := AlertRuleGroup(urlParamInt64(c, "id")) - alertRuleWritePermCheck(arg, me) - - renderMessage(c, arg.Del()) -} - -func alertRuleGroupFavoriteAdd(c *gin.Context) { - me := loginUser(c) - arg := AlertRuleGroup(urlParamInt64(c, "id")) - renderMessage(c, models.AlertRuleGroupFavoriteAdd(arg.Id, me.Id)) -} - -func alertRuleGroupFavoriteDel(c *gin.Context) { - me := loginUser(c) - arg := AlertRuleGroup(urlParamInt64(c, "id")) - renderMessage(c, models.AlertRuleGroupFavoriteDel(arg.Id, me.Id)) -} - -func alertRuleWritePermCheck(alertRuleGroup *models.AlertRuleGroup, user *models.User) { - roles := strings.Fields(user.RolesForDB) - for i := 0; i < len(roles); i++ { - if roles[i] == "Admin" { - return - } - } - - gids := IdsInt64(alertRuleGroup.UserGroupIds) - if len(gids) == 0 { - // 压根没有配置管理团队,表示对所有Standard角色放开,那就不校验了 - return - } - - for _, gid := range gids { - if cache.UserGroupMember.Exists(gid, user.Id) { - return - } - } - - bomb(http.StatusForbidden, "no permission") -} - -func IdsInt64(ids string) []int64 { - if ids == "" { - return []int64{} - } - - arr := strings.Fields(ids) - count := len(arr) - ret := make([]int64, 0, count) - for i := 0; i < count; i++ { - if arr[i] != "" { - id, err := strconv.ParseInt(arr[i], 10, 64) - if err == nil { - ret = append(ret, id) - } - } - } - - return ret -} diff --git a/http/router_auth.go b/http/router_auth.go deleted file mode 100644 index 90a39496..00000000 --- a/http/router_auth.go +++ /dev/null @@ -1,92 +0,0 @@ -package http - -import ( - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/models" -) - -type loginForm struct { - Username string `json:"username"` - Password string `json:"password"` -} - -func loginPost(c *gin.Context) { - var f loginForm - bind(c, &f) - - user, err1 := models.PassLogin(f.Username, f.Password) - if err1 == nil { - if user.Status == 1 { - renderMessage(c, "User disabled") - return - } - session := sessions.Default(c) - session.Set("username", f.Username) - session.Save() - renderData(c, user, nil) - return - } - - // password login fail, try ldap - if config.Config.LDAP.Enable { - user, err2 := models.LdapLogin(f.Username, f.Password) - if err2 == nil { - if user.Status == 1 { - renderMessage(c, "User disabled") - return - } - session := sessions.Default(c) - session.Set("username", f.Username) - session.Save() - renderData(c, user, nil) - return - } - } - - // password and ldap both fail - renderMessage(c, err1) -} - -func logoutGet(c *gin.Context) { - session := sessions.Default(c) - session.Set("username", "") - session.Save() - renderMessage(c, nil) -} - -func canDoOpByName(c *gin.Context) { - user, err := models.UserGetByUsername(queryStr(c, "name")) - dangerous(err) - - if user == nil { - renderData(c, false, err) - return - } - - can, err := user.CanDo(queryStr(c, "op")) - renderData(c, can, err) -} - -func canDoOpByToken(c *gin.Context) { - userToken, err := models.UserTokenGet("token=?", queryStr(c, "token")) - dangerous(err) - - if userToken == nil { - renderData(c, false, err) - return - } - - user, err := models.UserGetByUsername(userToken.Username) - dangerous(err) - - if user == nil { - renderData(c, false, err) - return - } - - can, err := user.CanDo(queryStr(c, "op")) - renderData(c, can, err) -} diff --git a/http/router_chart.go b/http/router_chart.go deleted file mode 100644 index b0695e30..00000000 --- a/http/router_chart.go +++ /dev/null @@ -1,82 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func chartGets(c *gin.Context) { - objs, err := models.ChartGets(urlParamInt64(c, "id")) - renderData(c, objs, err) -} - -type chartForm struct { - Configs string `json:"configs"` - Weight int `json:"weight"` -} - -func chartAdd(c *gin.Context) { - var f chartForm - bind(c, &f) - - loginUser(c).MustPerm("dashboard_modify") - - cg := ChartGroup(urlParamInt64(c, "id")) - ct := models.Chart{ - GroupId: cg.Id, - Configs: f.Configs, - Weight: f.Weight, - } - - dangerous(ct.Add()) - - renderData(c, ct, nil) -} - -type chartPutForm struct { - Configs string `json:"configs"` -} - -func chartPut(c *gin.Context) { - var f chartPutForm - bind(c, &f) - - loginUser(c).MustPerm("dashboard_modify") - - ct := Chart(urlParamInt64(c, "id")) - ct.Configs = f.Configs - - dangerous(ct.Update("configs")) - - renderData(c, ct, nil) -} - -func chartDel(c *gin.Context) { - loginUser(c).MustPerm("dashboard_modify") - renderMessage(c, Chart(urlParamInt64(c, "id")).Del()) -} - -type chartConfig struct { - Id int64 `json:"id"` - GroupId int64 `json:"group_id"` - Configs string `json:"configs"` -} - -func chartConfigsPut(c *gin.Context) { - var arr []chartConfig - bind(c, &arr) - - loginUser(c).MustPerm("dashboard_modify") - - for i := 0; i < len(arr); i++ { - ct := Chart(arr[i].Id) - ct.Configs = arr[i].Configs - if arr[i].GroupId > 0 { - ct.GroupId = arr[i].GroupId - } - dangerous(ct.Update("configs", "group_id")) - } - - renderMessage(c, nil) -} diff --git a/http/router_chart_group.go b/http/router_chart_group.go deleted file mode 100644 index 2d68c595..00000000 --- a/http/router_chart_group.go +++ /dev/null @@ -1,55 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func chartGroupGets(c *gin.Context) { - objs, err := models.ChartGroupGets(urlParamInt64(c, "id")) - renderData(c, objs, err) -} - -type chartGroupForm struct { - Name string `json:"name"` - Weight int `json:"weight"` -} - -func chartGroupAdd(c *gin.Context) { - var f chartGroupForm - bind(c, &f) - - loginUser(c).MustPerm("dashboard_modify") - - d := Dashboard(urlParamInt64(c, "id")) - - cg := models.ChartGroup{ - DashboardId: d.Id, - Name: f.Name, - Weight: f.Weight, - } - - dangerous(cg.Add()) - - renderData(c, cg, nil) -} - -func chartGroupsPut(c *gin.Context) { - var arr []models.ChartGroup - bind(c, &arr) - - loginUser(c).MustPerm("dashboard_modify") - - for i := 0; i < len(arr); i++ { - dangerous(arr[i].Update("name", "weight")) - } - - renderMessage(c, nil) -} - -func chartGroupDel(c *gin.Context) { - loginUser(c).MustPerm("dashboard_modify") - cg := models.ChartGroup{Id: urlParamInt64(c, "id")} - renderMessage(c, cg.Del()) -} diff --git a/http/router_chart_tmp.go b/http/router_chart_tmp.go deleted file mode 100644 index 20dfac6b..00000000 --- a/http/router_chart_tmp.go +++ /dev/null @@ -1,50 +0,0 @@ -package http - -import ( - "strconv" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -type chartTmpForm struct { - Configs string `json:"configs"` -} - -func chartTmpAdd(c *gin.Context) { - ids := []int64{} - - var forms []chartTmpForm - bind(c, &forms) - - for _, f := range forms { - chart := models.ChartTmp{ - Configs: f.Configs, - CreateBy: loginUsername(c), - CreateAt: time.Now().Unix(), - } - dangerous(chart.Add()) - ids = append(ids, chart.Id) - } - - renderData(c, ids, nil) -} - -func chartTmpGets(c *gin.Context) { - objs := []*models.ChartTmp{} - idStr := queryStr(c, "ids") - ids := strings.Split(idStr, ",") - for _, id := range ids { - i, err := strconv.ParseInt(id, 10, 64) - dangerous(err) - - obj, err := models.ChartTmpGet("id=?", i) - dangerous(err) - objs = append(objs, obj) - } - - renderData(c, objs, nil) -} diff --git a/http/router_classpath.go b/http/router_classpath.go deleted file mode 100644 index 52a1afdf..00000000 --- a/http/router_classpath.go +++ /dev/null @@ -1,152 +0,0 @@ -package http - -import ( - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func classpathListGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.ClasspathTotal(query) - dangerous(err) - - list, err := models.ClasspathGets(query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -//此api暂时不对外开放 -func classpathListNodeGets(c *gin.Context) { - query := queryStr(c, "query", "") - - list, err := models.ClasspathNodeGets(query) - dangerous(err) - - renderData(c, list, nil) -} - -func classpathListNodeGetsById(c *gin.Context) { - cp := Classpath(urlParamInt64(c, "id")) - children, err := cp.DirectChildren() - renderData(c, children, err) -} - -func classpathFavoriteGet(c *gin.Context) { - lst, err := loginUser(c).FavoriteClasspaths() - renderData(c, lst, err) -} - -type classpathForm struct { - Path string `json:"path"` - Note string `json:"note"` -} - -func classpathAdd(c *gin.Context) { - var f classpathForm - bind(c, &f) - - me := loginUser(c).MustPerm("classpath_create") - - cp := models.Classpath{ - Path: f.Path, - Note: f.Note, - Preset: 0, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - renderMessage(c, cp.Add()) -} - -func classpathPut(c *gin.Context) { - var f classpathForm - bind(c, &f) - - me := loginUser(c).MustPerm("classpath_modify") - cp := Classpath(urlParamInt64(c, "id")) - - if cp.Path != f.Path { - num, err := models.ClasspathCount("path=? and id<>?", f.Path, cp.Id) - dangerous(err) - - if num > 0 { - bomb(200, "Classpath %s already exists", f.Path) - } - } - - cp.Path = f.Path - cp.Note = f.Note - cp.UpdateBy = me.Username - cp.UpdateAt = time.Now().Unix() - - renderMessage(c, cp.Update("path", "note", "update_by", "update_at")) -} - -func classpathDel(c *gin.Context) { - loginUser(c).MustPerm("classpath_delete") - - cp := Classpath(urlParamInt64(c, "id")) - if cp.Preset == 1 { - bomb(200, "Preset classpath %s cannot delete", cp.Path) - } - - renderMessage(c, cp.Del()) -} - -func classpathAddResources(c *gin.Context) { - var arr []string - bind(c, &arr) - - me := loginUser(c).MustPerm("classpath_add_resource") - cp := Classpath(urlParamInt64(c, "id")) - - dangerous(cp.AddResources(arr)) - - cp.UpdateAt = time.Now().Unix() - cp.UpdateBy = me.Username - cp.Update("update_at", "update_by") - - renderMessage(c, nil) -} - -func classpathDelResources(c *gin.Context) { - var arr []string - bind(c, &arr) - classpathId := urlParamInt64(c, "id") - me := loginUser(c).MustPerm("classpath_del_resource") - - if classpathId == 1 { - bomb(200, _s("Resource cannot delete in preset classpath")) - } - - cp := Classpath(classpathId) - - dangerous(cp.DelResources(arr)) - - cp.UpdateAt = time.Now().Unix() - cp.UpdateBy = me.Username - cp.Update("update_at", "update_by") - - renderMessage(c, nil) -} - -func classpathFavoriteAdd(c *gin.Context) { - me := loginUser(c) - cp := Classpath(urlParamInt64(c, "id")) - renderMessage(c, models.ClasspathFavoriteAdd(cp.Id, me.Id)) -} - -func classpathFavoriteDel(c *gin.Context) { - me := loginUser(c) - cp := Classpath(urlParamInt64(c, "id")) - renderMessage(c, models.ClasspathFavoriteDel(cp.Id, me.Id)) -} diff --git a/http/router_collect_rule.go b/http/router_collect_rule.go deleted file mode 100644 index 506a745a..00000000 --- a/http/router_collect_rule.go +++ /dev/null @@ -1,283 +0,0 @@ -package http - -import ( - "regexp" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" -) - -type collectRuleForm struct { - ClasspathId int64 `json:"classpath_id"` - PrefixMatch int `json:"prefix_match"` - Name string `json:"name"` - Note string `json:"note"` - Step int `json:"step"` - Type string `json:"type"` - Data string `json:"data"` - AppendTags string `json:"append_tags"` -} - -func collectRuleAdd(c *gin.Context) { - var f collectRuleForm - bind(c, &f) - - me := loginUser(c).MustPerm("collect_rule_create") - - cr := models.CollectRule{ - ClasspathId: f.ClasspathId, - PrefixMatch: f.PrefixMatch, - Name: f.Name, - Note: f.Note, - Step: f.Step, - Type: f.Type, - Data: f.Data, - AppendTags: f.AppendTags, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - renderMessage(c, cr.Add()) -} - -func collectRulesAdd(c *gin.Context) { - var forms []collectRuleForm - bind(c, &forms) - - me := loginUser(c).MustPerm("collect_rule_create") - - for _, f := range forms { - cr := models.CollectRule{ - ClasspathId: f.ClasspathId, - PrefixMatch: f.PrefixMatch, - Name: f.Name, - Note: f.Note, - Step: f.Step, - Type: f.Type, - Data: f.Data, - AppendTags: f.AppendTags, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - dangerous(cr.Add()) - } - - renderMessage(c, nil) -} - -func collectRulePut(c *gin.Context) { - var f collectRuleForm - bind(c, &f) - - me := loginUser(c).MustPerm("collect_rule_modify") - cr := CollectRule(urlParamInt64(c, "id")) - - cr.PrefixMatch = f.PrefixMatch - cr.Name = f.Name - cr.Note = f.Note - cr.Step = f.Step - cr.Type = f.Type - cr.Data = f.Data - cr.AppendTags = f.AppendTags - cr.UpdateAt = time.Now().Unix() - cr.UpdateBy = me.Username - - renderMessage(c, cr.Update( - "prefix_match", - "name", - "note", - "step", - "type", - "data", - "update_at", - "update_by", - "append_tags", - )) -} - -func collectRuleDel(c *gin.Context) { - var f idsForm - bind(c, &f) - f.Validate() - loginUser(c).MustPerm("collect_rule_delete") - renderMessage(c, models.CollectRulesDel(f.Ids)) -} - -func collectRuleGets(c *gin.Context) { - classpathId := urlParamInt64(c, "id") - - where := "classpath_id = ?" - param := []interface{}{classpathId} - - typ := queryStr(c, "type", "") - if typ != "" { - where += " and type = ?" - param = append(param, typ) - } - - objs, err := models.CollectRuleGets(where, param...) - renderData(c, objs, err) -} - -func collectRuleGetsByIdent(c *gin.Context) { - ident := queryStr(c, "ident") - - objs := cache.CollectRulesOfIdent.GetBy(ident) - renderData(c, objs, nil) -} - -type Summary struct { - LatestUpdatedAt int64 `json:"latest_updated_at"` - Total int `json:"total"` -} - -func collectRuleSummaryGetByIdent(c *gin.Context) { - ident := queryStr(c, "ident") - var summary Summary - objs := cache.CollectRulesOfIdent.GetBy(ident) - total := len(objs) - if total > 0 { - summary.Total = total - var latestUpdatedAt int64 - for _, obj := range objs { - if latestUpdatedAt < obj.UpdateAt { - latestUpdatedAt = obj.UpdateAt - } - } - summary.LatestUpdatedAt = latestUpdatedAt - } - - renderData(c, summary, nil) -} - -type RegExpCheck struct { - Success bool `json:"success"` - Data []map[string]string `json:"tags"` -} - -func regExpCheck(c *gin.Context) { - param := make(map[string]string) - dangerous(c.ShouldBind(¶m)) - - ret := &RegExpCheck{ - Success: true, - Data: make([]map[string]string, 0), - } - - calcMethod := param["func"] - if calcMethod == "" { - tmp := map[string]string{"func": "is empty"} - ret.Data = append(ret.Data, tmp) - renderData(c, ret, nil) - return - } - - // 处理主正则 - if re, ok := param["re"]; !ok || re == "" { - tmp := map[string]string{"re": "regex does not exist or is empty"} - ret.Data = append(ret.Data, tmp) - renderData(c, ret, nil) - return - } - - // 匹配主正则 - suc, reRes, isSub := checkRegex(param["re"], param["log"]) - if !suc { - ret.Success = false - reRes = genErrMsg(param["re"]) - ret.Data = append(ret.Data, map[string]string{"re": reRes}) - renderData(c, ret, nil) - return - } - if calcMethod == "histogram" && !isSub { - ret.Success = false - reRes = genSubErrMsg(param["re"]) - ret.Data = append(ret.Data, map[string]string{"re": reRes}) - renderData(c, ret, nil) - return - } - - ret.Data = append(ret.Data, map[string]string{"re": reRes}) - // 处理tags - var nonTagKey = map[string]bool{ - "re": true, - "log": true, - "func": true, - } - - for tagk, pat := range param { - // 如果不是tag,就继续循环 - if _, ok := nonTagKey[tagk]; ok { - continue - } - suc, tagRes, isSub := checkRegex(pat, param["log"]) - if !suc { - // 正则错误 - ret.Success = false - tagRes = genErrMsg(pat) - } else if !isSub { - // 未匹配出子串 - ret.Success = false - tagRes = genSubErrMsg(pat) - } else if includeIllegalChar(tagRes) || includeIllegalChar(tagk) { - // 保留字报错 - ret.Success = false - tagRes = genIllegalCharErrMsg() - } - - tmp := map[string]string{tagk: tagRes} - ret.Data = append(ret.Data, tmp) - } - - renderData(c, ret, nil) -} - -// 出错信息直接放在body里 -func checkRegex(pat string, log string) (succ bool, result string, isSub bool) { - if pat == "" { - return false, "", false - } - - reg, err := regexp.Compile(pat) - if err != nil { - return false, "", false - } - - res := reg.FindStringSubmatch(log) - switch len(res) { - // 没查到 - case 0: - return false, "", false - // 没查到括号内的串,返回整个匹配串 - case 1: - return true, res[0], false - // 查到了,默认取第一个串 - default: - return true, res[1], true - } -} - -func includeIllegalChar(s string) bool { - illegalChars := ":,=\r\n\t" - return strings.ContainsAny(s, illegalChars) -} - -// 生成返回错误信息 -func genErrMsg(pattern string) string { - return _s("Regexp %s matching failed", pattern) -} - -// 生成子串匹配错误信息 -func genSubErrMsg(pattern string) string { - return _s("Regexp %s matched, but cannot get substring()", pattern) -} - -// 生成子串匹配错误信息 -func genIllegalCharErrMsg() string { - return _s(`TagKey or TagValue contains illegal characters[:,/=\r\n\t]`) -} diff --git a/http/router_dashboard.go b/http/router_dashboard.go deleted file mode 100644 index 25d93f6f..00000000 --- a/http/router_dashboard.go +++ /dev/null @@ -1,244 +0,0 @@ -package http - -import ( - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func dashboardGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - onlyfavorite := queryBool(c, "onlyfavorite", false) - - me := loginUser(c) - ids, err := me.FavoriteDashboardIds() - dangerous(err) - - // 我的收藏是空的,所以直接返回空列表 - if onlyfavorite && len(ids) == 0 { - renderZeroPage(c) - return - } - - total, err := models.DashboardTotal(onlyfavorite, ids, query) - dangerous(err) - - list, err := models.DashboardGets(onlyfavorite, ids, query, limit, offset(c, limit)) - dangerous(err) - - if onlyfavorite { - for i := 0; i < len(list); i++ { - list[i].Favorite = 1 - } - } else { - for i := 0; i < len(list); i++ { - list[i].FillFavorite(ids) - } - } - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -func dashboardGet(c *gin.Context) { - renderData(c, Dashboard(urlParamInt64(c, "id")), nil) -} - -type dashboardForm struct { - Id int64 `json:"id"` - Name string `json:"name"` - Tags string `json:"tags"` - Configs string `json:"configs"` -} - -func dashboardAdd(c *gin.Context) { - var f dashboardForm - bind(c, &f) - - me := loginUser(c).MustPerm("dashboard_create") - - d := &models.Dashboard{ - Name: f.Name, - Tags: f.Tags, - Configs: f.Configs, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - dangerous(d.Add()) - - renderData(c, d, nil) -} - -func dashboardPut(c *gin.Context) { - var f dashboardForm - bind(c, &f) - - me := loginUser(c).MustPerm("dashboard_modify") - d := Dashboard(urlParamInt64(c, "id")) - - if d.Name != f.Name { - num, err := models.DashboardCount("name=? and id<>?", f.Name, d.Id) - dangerous(err) - - if num > 0 { - bomb(200, "Dashboard %s already exists", f.Name) - } - } - - d.Name = f.Name - d.Tags = f.Tags - d.Configs = f.Configs - d.UpdateAt = time.Now().Unix() - d.UpdateBy = me.Username - - dangerous(d.Update("name", "tags", "configs", "update_at", "update_by")) - - renderData(c, d, nil) -} - -func dashboardClone(c *gin.Context) { - var f dashboardForm - bind(c, &f) - - me := loginUser(c).MustPerm("dashboard_create") - - d := &models.Dashboard{ - Name: f.Name, - Tags: f.Tags, - Configs: f.Configs, - CreateBy: me.Username, - UpdateBy: me.Username, - } - dangerous(d.AddOnly()) - - chartGroups, err := models.ChartGroupGets(f.Id) - dangerous(err) - for _, chartGroup := range chartGroups { - charts, err := models.ChartGets(chartGroup.Id) - dangerous(err) - chartGroup.DashboardId = d.Id - chartGroup.Id = 0 - dangerous(chartGroup.Add()) - - for _, chart := range charts { - chart.Id = 0 - chart.GroupId = chartGroup.Id - dangerous(chart.Add()) - } - } - - renderData(c, d, nil) -} - -func dashboardDel(c *gin.Context) { - loginUser(c).MustPerm("dashboard_delete") - renderMessage(c, Dashboard(urlParamInt64(c, "id")).Del()) -} - -func dashboardFavoriteAdd(c *gin.Context) { - me := loginUser(c) - d := Dashboard(urlParamInt64(c, "id")) - renderMessage(c, models.DashboardFavoriteAdd(d.Id, me.Id)) -} - -func dashboardFavoriteDel(c *gin.Context) { - me := loginUser(c) - d := Dashboard(urlParamInt64(c, "id")) - renderMessage(c, models.DashboardFavoriteDel(d.Id, me.Id)) -} - -type ChartGroupDetail struct { - Id int64 `json:"id"` - DashboardId int64 `json:"dashboard_id"` - Name string `json:"name"` - Weight int `json:"weight"` - Charts []models.Chart `json:"charts"` -} - -type DashboardDetail struct { - Id int64 `json:"id"` - Name string `json:"name"` - Tags string `json:"tags"` - Configs string `json:"configs"` - ChartGroups []ChartGroupDetail `json:"chart_groups"` -} - -func dashboardExport(c *gin.Context) { - var f idsForm - bind(c, &f) - dashboards, err := models.DashboardGetsByIds(f.Ids) - dangerous(err) - - var details []DashboardDetail - for _, databoard := range dashboards { - detail := DashboardDetail{ - Name: databoard.Name, - Tags: databoard.Tags, - Configs: databoard.Configs, - } - - chartGroups, err := models.ChartGroupGets(databoard.Id) - dangerous(err) - - var chartGroupsDetail []ChartGroupDetail - for _, chartGroup := range chartGroups { - chartGroupDetail := ChartGroupDetail{ - Name: chartGroup.Name, - Weight: chartGroup.Weight, - } - - charts, err := models.ChartGets(chartGroup.Id) - dangerous(err) - - chartGroupDetail.Charts = charts - chartGroupsDetail = append(chartGroupsDetail, chartGroupDetail) - } - detail.ChartGroups = chartGroupsDetail - details = append(details, detail) - } - - renderData(c, details, nil) -} - -func dashboardImport(c *gin.Context) { - var details []DashboardDetail - bind(c, &details) - me := loginUser(c).MustPerm("dashboard_create") - - for _, detail := range details { - d := &models.Dashboard{ - Name: detail.Name, - Tags: detail.Tags, - Configs: detail.Configs, - CreateBy: me.Username, - UpdateBy: me.Username, - } - dangerous(d.AddOnly()) - - for _, chartGroup := range detail.ChartGroups { - cg := models.ChartGroup{ - DashboardId: d.Id, - Name: chartGroup.Name, - Weight: chartGroup.Weight, - } - dangerous(cg.Add()) - - for _, chart := range chartGroup.Charts { - c := models.Chart{ - GroupId: cg.Id, - Configs: chart.Configs, - Weight: chart.Weight, - } - dangerous(c.Add()) - } - } - } - - renderMessage(c, nil) -} diff --git a/http/router_history_alert_event.go b/http/router_history_alert_event.go deleted file mode 100644 index 51da3544..00000000 --- a/http/router_history_alert_event.go +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func historyAlertEventGets(c *gin.Context) { - stime := queryInt64(c, "stime", 0) - etime := queryInt64(c, "etime", 0) - hours := queryInt64(c, "hours", 0) - now := time.Now().Unix() - if hours != 0 { - stime = now - 3600*hours - etime = now + 3600*24 - } - - if stime != 0 && etime == 0 { - etime = now + 3600*24 - } - - query := queryStr(c, "query", "") - priority := queryInt(c, "priority", -1) - status := queryInt(c, "status", -1) - isRecovery := queryInt(c, "is_recovery", -1) - limit := queryInt(c, "limit", defaultLimit) - - total, err := models.HistoryAlertEventsTotal(stime, etime, query, status, isRecovery, priority) - dangerous(err) - - list, err := models.HistoryAlertEventGets(stime, etime, query, status, isRecovery, priority, limit, offset(c, limit)) - dangerous(err) - - for i := 0; i < len(list); i++ { - dangerous(list[i].FillObjs()) - } - - if len(list) == 0 { - renderZeroPage(c) - return - } - - renderData(c, map[string]interface{}{ - "total": total, - "list": list, - }, nil) -} - -func historyAlertEventGet(c *gin.Context) { - ae := HistoryAlertEvent(urlParamInt64(c, "id")) - dangerous(ae.FillObjs()) - renderData(c, ae, nil) -} diff --git a/http/router_metric_description.go b/http/router_metric_description.go deleted file mode 100644 index 6c264d1e..00000000 --- a/http/router_metric_description.go +++ /dev/null @@ -1,82 +0,0 @@ -package http - -import ( - "net/http" - "strings" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func metricDescriptionGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.MetricDescriptionTotal(query) - dangerous(err) - - list, err := models.MetricDescriptionGets(query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -type metricDescriptionFrom struct { - Data string `json:"data"` -} - -// 没有单个新增的功能,只有批量导入 -func metricDescriptionAdd(c *gin.Context) { - var f metricDescriptionFrom - var metricDescriptions []models.MetricDescription - bind(c, &f) - lines := strings.Split(f.Data, "\n") - for _, md := range lines { - arr := strings.Split(md, ":") - if len(arr) != 2 { - bomb(200, "metric description %s is illegal", md) - } - m := models.MetricDescription{ - Metric: arr[0], - Description: arr[1], - } - metricDescriptions = append(metricDescriptions, m) - } - - if len(metricDescriptions) == 0 { - bomb(http.StatusBadRequest, "Decoded metric description empty") - } - - loginUser(c).MustPerm("metric_description_create") - - renderMessage(c, models.MetricDescriptionUpdate(metricDescriptions)) -} - -func metricDescriptionDel(c *gin.Context) { - var f idsForm - bind(c, &f) - - loginUser(c).MustPerm("metric_description_delete") - - renderMessage(c, models.MetricDescriptionDel(f.Ids)) -} - -type metricDescriptionForm struct { - Description string `json:"description"` -} - -func metricDescriptionPut(c *gin.Context) { - var f metricDescriptionForm - bind(c, &f) - - loginUser(c).MustPerm("metric_description_modify") - - md := MetricDescription(urlParamInt64(c, "id")) - md.Description = f.Description - - renderMessage(c, md.Update("description")) -} diff --git a/http/router_mute.go b/http/router_mute.go deleted file mode 100644 index 63934c58..00000000 --- a/http/router_mute.go +++ /dev/null @@ -1,62 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func muteGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.MuteTotal(query) - dangerous(err) - - list, err := models.MuteGets(query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -type muteForm struct { - ClasspathPrefix string `json:"classpath_prefix "` - Metric string `json:"metric"` - ResFilters string `json:"res_filters"` - TagFilters string `json:"tags_filters"` - Cause string `json:"cause"` - Btime int64 `json:"btime"` - Etime int64 `json:"etime"` -} - -func muteAdd(c *gin.Context) { - var f muteForm - bind(c, &f) - - me := loginUser(c).MustPerm("mute_create") - - mt := models.Mute{ - ClasspathPrefix: f.ClasspathPrefix, - Metric: f.Metric, - ResFilters: f.ResFilters, - TagFilters: f.TagFilters, - Cause: f.Cause, - Btime: f.Btime, - Etime: f.Etime, - CreateBy: me.Username, - } - - renderMessage(c, mt.Add()) -} - -func muteGet(c *gin.Context) { - renderData(c, Mute(urlParamInt64(c, "id")), nil) -} - -func muteDel(c *gin.Context) { - loginUser(c).MustPerm("mute_delete") - renderMessage(c, Mute(urlParamInt64(c, "id")).Del()) -} diff --git a/http/router_prome.go b/http/router_prome.go deleted file mode 100644 index 24cb46d2..00000000 --- a/http/router_prome.go +++ /dev/null @@ -1,24 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - "github.com/prometheus/prometheus/promql/parser" - - "github.com/didi/nightingale/v5/vos" -) - -func checkPromeQl(c *gin.Context) { - - ql := c.Query("promql") - _, err := parser.ParseExpr(ql) - respD := &vos.PromQlCheckResp{} - isCorrect := true - if err != nil { - - isCorrect = false - respD.ParseError = err.Error() - } - - respD.QlCorrect = isCorrect - renderData(c, respD, nil) -} diff --git a/http/router_resources.go b/http/router_resources.go deleted file mode 100644 index 28bd3776..00000000 --- a/http/router_resources.go +++ /dev/null @@ -1,190 +0,0 @@ -package http - -import ( - "net/http" - "strings" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/v5/models" -) - -func classpathGetsResources(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - prefix := queryInt(c, "prefix", 0) - query := queryStr(c, "query", "") - - cp := Classpath(urlParamInt64(c, "id")) - var classpathIds []int64 - if prefix == 1 { - cps, err := models.ClasspathGetsByPrefix(cp.Path) - dangerous(err) - for i := range cps { - classpathIds = append(classpathIds, cps[i].Id) - } - } else { - classpathIds = append(classpathIds, cp.Id) - } - - total, err := models.ResourceTotalByClasspathId(classpathIds, query) - dangerous(err) - - reses, err := models.ResourceGetsByClasspathId(classpathIds, query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "classpath": cp, - "list": reses, - "total": total, - }, nil) -} - -func resourcesQuery(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - qres := queryStr(c, "qres", "") - - // qpaths 可以选择多个,英文逗号分隔的多个id - qpaths := str.IdsInt64(queryStr(c, "qpaths", "")) - - total, err := models.ResourceTotalByClasspathQuery(qpaths, qres) - dangerous(err) - - reses, err := models.ResourceGetsByClasspathQuery(qpaths, qres, limit, offset(c, limit)) - dangerous(err) - - if len(reses) == 0 { - renderZeroPage(c) - return - } - - renderData(c, gin.H{ - "list": reses, - "total": total, - }, nil) -} - -func resourceGet(c *gin.Context) { - renderData(c, Resource(urlParamInt64(c, "id")), nil) -} - -func resourceDel(c *gin.Context) { - loginUser(c).MustPerm("resource_modify") - renderData(c, Resource(urlParamInt64(c, "id")).Del(), nil) -} - -type resourceNoteForm struct { - Ids []int64 `json:"ids"` - Note string `json:"note"` -} - -// 修改主机设备的备注 -func resourceNotePut(c *gin.Context) { - var f resourceNoteForm - bind(c, &f) - - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - loginUser(c).MustPerm("resource_modify") - - renderMessage(c, models.ResourceUpdateNote(f.Ids, f.Note)) -} - -type resourceTagsForm struct { - Ids []int64 `json:"ids"` - Tags string `json:"tags"` -} - -func resourceTagsPut(c *gin.Context) { - var f resourceTagsForm - bind(c, &f) - - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - loginUser(c).MustPerm("resource_modify") - - renderMessage(c, models.ResourceUpdateTags(f.Ids, f.Tags)) -} - -type resourceMuteForm struct { - Ids []int64 `json:"ids"` - Btime int64 `json:"btime"` - Etime int64 `json:"etime"` -} - -func resourceMutePut(c *gin.Context) { - var f resourceMuteForm - bind(c, &f) - - if len(f.Ids) == 0 { - bomb(http.StatusBadRequest, "ids is empty") - } - - loginUser(c).MustPerm("resource_modify") - - renderMessage(c, models.ResourceUpdateMute(f.Ids, f.Btime, f.Etime)) -} - -type resourceClasspathsForm struct { - ResIdents []string `json:"res_idents"` - ClasspathIds []int64 `json:"classpath_ids"` -} - -func resourceClasspathsPut(c *gin.Context) { - var f resourceClasspathsForm - m := make(map[string]map[int64]struct{}) //store database data to compare - toAdd := make(map[string][]int64) - - bind(c, &f) - loginUser(c).MustPerm("resource_modify") - - sql := "res_ident in (\"" + strings.Join(f.ResIdents, ",") + "\")" - oldClasspathResources, err := models.ClasspathResourceGets(sql) - dangerous(err) - - for _, obj := range oldClasspathResources { - if _, exists := m[obj.ResIdent]; !exists { - m[obj.ResIdent] = make(map[int64]struct{}) - } - m[obj.ResIdent][obj.ClasspathId] = struct{}{} - } - - for _, ident := range f.ResIdents { - toAdd[ident] = []int64{} - if _, exists := m[ident]; exists { - for _, classpathId := range f.ClasspathIds { - if _, exists := m[ident][classpathId]; exists { - // classpathResource 在数据库中已存在,不做处理 - delete(m[ident], classpathId) - } else { - toAdd[ident] = append(toAdd[ident], classpathId) - } - } - } else { - toAdd[ident] = f.ClasspathIds - } - } - - //删除数据库中多余的classpathResources - for ident := range m { - for classpathId := range m[ident] { - if classpathId == 1 { - continue - } - - dangerous(models.ClasspathResourceDel(classpathId, []string{ident})) - } - } - - //添加数据库没有的classpathResources - for ident, cids := range toAdd { - for _, cid := range cids { - dangerous(models.ClasspathResourceAdd(cid, ident)) - } - } - renderMessage(c, nil) -} diff --git a/http/router_role.go b/http/router_role.go deleted file mode 100644 index 5ff7066d..00000000 --- a/http/router_role.go +++ /dev/null @@ -1,12 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func rolesGet(c *gin.Context) { - lst, err := models.RoleGetsAll() - renderData(c, lst, err) -} diff --git a/http/router_self.go b/http/router_self.go deleted file mode 100644 index df549fc8..00000000 --- a/http/router_self.go +++ /dev/null @@ -1,58 +0,0 @@ -package http - -import ( - "encoding/json" - "time" - - "github.com/gin-gonic/gin" -) - -func selfProfileGet(c *gin.Context) { - renderData(c, loginUser(c), nil) -} - -type selfProfileForm struct { - Nickname string `json:"nickname"` - Phone string `json:"phone"` - Email string `json:"email"` - Portrait string `json:"portrait"` - Contacts json.RawMessage `json:"contacts"` -} - -func selfProfilePut(c *gin.Context) { - var f selfProfileForm - bind(c, &f) - - user := loginUser(c) - user.Nickname = f.Nickname - user.Phone = f.Phone - user.Email = f.Email - user.Portrait = f.Portrait - user.Contacts = f.Contacts - user.UpdateAt = time.Now().Unix() - user.UpdateBy = user.Username - - renderMessage( - c, - user.Update( - "nickname", - "phone", - "email", - "portrait", - "contacts", - "update_at", - "update_by", - ), - ) -} - -type selfPasswordForm struct { - OldPass string `json:"oldpass" binding:"required"` - NewPass string `json:"newpass" binding:"required"` -} - -func selfPasswordPut(c *gin.Context) { - var f selfPasswordForm - bind(c, &f) - renderMessage(c, loginUser(c).ChangePassword(f.OldPass, f.NewPass)) -} diff --git a/http/router_status.go b/http/router_status.go deleted file mode 100644 index 4c6817e4..00000000 --- a/http/router_status.go +++ /dev/null @@ -1,42 +0,0 @@ -package http - -import ( - "time" - - "github.com/didi/nightingale/v5/models" - "github.com/gin-gonic/gin" -) - -func Status(c *gin.Context) { - var err error - data := make(map[string]int64) - data["user_total"], err = models.UserTotal("") - dangerous(err) - - data["user_group_total"], err = models.UserGroupTotal("") - dangerous(err) - - data["resource_total"], err = models.ResourceTotal("") - dangerous(err) - - data["alert_rule_total"], err = models.AlertRuleTotal("") - dangerous(err) - - data["dashboard_total"], err = models.DashboardCount("") - dangerous(err) - - now := time.Now().Unix() - stime := now - 24*3600 - data["event_total_day"], err = models.AlertEventTotal(stime, now, "", -1, -1) - dangerous(err) - - stime = now - 7*24*3600 - data["event_total_week"], err = models.AlertEventTotal(stime, now, "", -1, -1) - dangerous(err) - - stime = now - 30*24*3600 - data["event_total_month"], err = models.AlertEventTotal(stime, now, "", -1, -1) - dangerous(err) - - renderData(c, data, nil) -} diff --git a/http/router_token.go b/http/router_token.go deleted file mode 100644 index ff512968..00000000 --- a/http/router_token.go +++ /dev/null @@ -1,32 +0,0 @@ -package http - -import ( - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func selfTokenGets(c *gin.Context) { - objs, err := models.UserTokenGets("user_id=?", loginUser(c).Id) - renderData(c, objs, err) -} - -func selfTokenPost(c *gin.Context) { - user := loginUser(c) - obj, err := models.UserTokenNew(user.Id, user.Username) - renderData(c, obj, err) -} - -type selfTokenForm struct { - Token string `json:"token"` -} - -func selfTokenPut(c *gin.Context) { - user := loginUser(c) - - var f selfTokenForm - bind(c, &f) - - obj, err := models.UserTokenReset(user.Id, f.Token) - renderData(c, obj, err) -} diff --git a/http/router_tpl.go b/http/router_tpl.go deleted file mode 100644 index ac7110f9..00000000 --- a/http/router_tpl.go +++ /dev/null @@ -1,58 +0,0 @@ -package http - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "path" - - "github.com/didi/nightingale/v5/config" - - "github.com/gin-gonic/gin" - "github.com/toolkits/pkg/file" -) - -func tplNameGets(c *gin.Context) { - tplType := queryStr(c, "tpl_type") - - var files []string - var err error - switch tplType { - case "alert_rule": - files, err = file.FilesUnder(config.Config.Tpl.AlertRulePath) - dangerous(err) - case "dashboard": - files, err = file.FilesUnder(config.Config.Tpl.DashboardPath) - dangerous(err) - default: - bomb(http.StatusBadRequest, "tpl type not found") - } - - renderData(c, files, err) -} - -func tplGet(c *gin.Context) { - tplName := path.Base(queryStr(c, "tpl_name")) - tplType := queryStr(c, "tpl_type") - - var filePath string - switch tplType { - case "alert_rule": - filePath = config.Config.Tpl.AlertRulePath + "/" + tplName - case "dashboard": - filePath = config.Config.Tpl.DashboardPath + "/" + tplName - default: - bomb(http.StatusBadRequest, "tpl type not found") - } - - if !file.IsExist(filePath) { - bomb(http.StatusBadRequest, "tpl not found") - } - - b, err := ioutil.ReadFile(filePath) - dangerous(err) - - var content interface{} - err = json.Unmarshal(b, &content) - renderData(c, content, err) -} diff --git a/http/router_ts_data.go b/http/router_ts_data.go deleted file mode 100644 index c2c403ab..00000000 --- a/http/router_ts_data.go +++ /dev/null @@ -1,221 +0,0 @@ -package http - -import ( - "compress/gzip" - "compress/zlib" - "errors" - "fmt" - "io/ioutil" - - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/trans" - "github.com/didi/nightingale/v5/vos" - - "github.com/gin-gonic/gin" - agentpayload "github.com/n9e/agent-payload/gogen" - "github.com/toolkits/pkg/logger" -) - -// 错误消息也是返回了200,是和客户端的约定,客户端如果发现code!=200就会重试 -func PushSeries(c *gin.Context) { - req := agentpayload.N9EMetricsPayload{} - - r := c.Request - reader := r.Body - - var err error - if encoding := r.Header.Get("Content-Encoding"); encoding == "gzip" { - if reader, err = gzip.NewReader(r.Body); err != nil { - message := fmt.Sprintf("error: get gzip reader occur error: %v", err) - logger.Warning(message) - c.String(200, message) - return - } - defer reader.Close() - } else if encoding == "deflate" { - if reader, err = zlib.NewReader(r.Body); err != nil { - message := fmt.Sprintf("error: get zlib reader occur error: %v", err) - logger.Warning(message) - c.String(200, message) - return - } - defer reader.Close() - } - - b, err := ioutil.ReadAll(reader) - if err != nil { - message := fmt.Sprintf("error: ioutil occur error: %v", err) - logger.Warning(message) - c.String(200, message) - return - } - - if r.Header.Get("Content-Type") == "application/x-protobuf" { - if err := req.Unmarshal(b); err != nil { - message := fmt.Sprintf("error: decode protobuf body occur error: %v", err) - logger.Warning(message) - c.String(200, message) - return - } - - count := len(req.Samples) - if count == 0 { - c.String(200, "error: samples is empty") - return - } - - metricPoints := make([]*vos.MetricPoint, 0, count) - for i := 0; i < count; i++ { - logger.Debugf("recv %v", req.Samples[i]) - metricPoints = append(metricPoints, convertAgentdPoint(req.Samples[i])) - } - - if err = trans.Push(metricPoints); err != nil { - logger.Warningf("error: trans.push %+v err:%v", req.Samples, err) - c.String(200, "error: "+err.Error()) - } else { - c.String(200, "success: received %d points", len(metricPoints)) - } - } else { - logger.Warningf("error: trans.push %+v Content-Type(%s) not equals application/x-protobuf", req.Samples) - c.String(200, "error: Content-Type(%s) not equals application/x-protobuf") - } -} - -func convertAgentdPoint(obj *agentpayload.N9EMetricsPayload_Sample) *vos.MetricPoint { - return &vos.MetricPoint{ - Metric: obj.Metric, - Ident: obj.Ident, - Alias: obj.Alias, - TagsMap: obj.Tags, - Time: obj.Time, - ValueUntyped: obj.Value, - } -} - -func PushData(c *gin.Context) { - var points []*vos.MetricPoint - err := c.ShouldBindJSON(&points) - if err != nil { - message := fmt.Sprintf("error: decode json body occur error: %v", err) - logger.Warning(message) - c.String(200, message) - return - } - - if err = trans.Push(points); err != nil { - c.String(200, "error: "+err.Error()) - } else { - c.String(200, "success") - } -} - -func GetTagKeys(c *gin.Context) { - recv := vos.CommonTagQueryParam{} - dangerous(c.ShouldBindJSON(&recv)) - - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - - resp := dataSource.QueryTagKeys(recv) - renderData(c, resp, nil) -} - -func GetTagValues(c *gin.Context) { - recv := vos.CommonTagQueryParam{} - dangerous(c.ShouldBindJSON(&recv)) - - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - if recv.TagKey == "" { - renderMessage(c, errors.New("missing tag_key")) - return - } - resp := dataSource.QueryTagValues(recv) - renderData(c, resp, nil) -} - -func GetMetrics(c *gin.Context) { - recv := vos.MetricQueryParam{} - dangerous(c.ShouldBindJSON(&recv)) - - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - - resp := dataSource.QueryMetrics(recv) - logger.Debugf("[GetMetrics][recv:%+v][resp:%+v]", recv, resp) - res := &vos.MetricDesQueryResp{ - Metrics: make([]vos.MetricsWithDescription, 0), - } - - for _, metric := range resp.Metrics { - t := vos.MetricsWithDescription{ - Name: metric, - } - - description, exists := cache.MetricDescMapper.Get(metric) - if exists { - t.Description = description.(string) - } - - res.Metrics = append(res.Metrics, t) - } - - renderData(c, res, nil) -} - -func GetTagPairs(c *gin.Context) { - recv := vos.CommonTagQueryParam{} - dangerous(c.ShouldBindJSON(&recv)) - - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - - resp := dataSource.QueryTagPairs(recv) - renderData(c, resp, nil) -} - -func GetData(c *gin.Context) { - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - - var input vos.DataQueryParam - dangerous(c.ShouldBindJSON(&input)) - resp := dataSource.QueryData(input) - renderData(c, resp, nil) -} - -func GetDataInstant(c *gin.Context) { - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - renderMessage(c, err) - return - } - - var input vos.DataQueryInstantParam - dangerous(c.ShouldBindJSON(&input)) - resp := dataSource.QueryDataInstant(input.PromeQl) - renderData(c, resp, nil) -} diff --git a/http/router_user.go b/http/router_user.go deleted file mode 100644 index dfcbbfcb..00000000 --- a/http/router_user.go +++ /dev/null @@ -1,197 +0,0 @@ -package http - -import ( - "encoding/json" - "strings" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/models" -) - -func userGets(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.UserTotal(query) - dangerous(err) - - list, err := models.UserGets(query, limit, offset(c, limit)) - dangerous(err) - - admin := false - roles := strings.Fields(loginUser(c).RolesForDB) - for i := 0; i < len(roles); i++ { - if roles[i] == "Admin" { - admin = true - break - } - } - - renderData(c, gin.H{ - "list": list, - "total": total, - "admin": admin, - }, nil) -} - -type userAddForm struct { - Username string `json:"username" binding:"required"` - Password string `json:"password" binding:"required"` - Nickname string `json:"nickname"` - Phone string `json:"phone"` - Email string `json:"email"` - Portrait string `json:"portrait"` - Roles []string `json:"roles"` - Contacts json.RawMessage `json:"contacts"` -} - -func userAddPost(c *gin.Context) { - var f userAddForm - bind(c, &f) - - password, err := models.CryptoPass(f.Password) - dangerous(err) - - now := time.Now().Unix() - username := loginUsername(c) - - if len(f.Roles) == 0 { - bomb(200, "roles empty") - } - - u := models.User{ - Username: f.Username, - Password: password, - Nickname: f.Nickname, - Phone: f.Phone, - Email: f.Email, - Portrait: f.Portrait, - RolesForDB: strings.Join(f.Roles, " "), - Contacts: f.Contacts, - CreateAt: now, - UpdateAt: now, - CreateBy: username, - UpdateBy: username, - } - - renderMessage(c, u.Add()) -} - -func userProfileGet(c *gin.Context) { - renderData(c, User(urlParamInt64(c, "id")), nil) -} - -type userProfileForm struct { - Nickname string `json:"nickname"` - Phone string `json:"phone"` - Email string `json:"email"` - Portrait string `json:"portrait"` - Roles []string `json:"roles"` - Status int `json:"status"` - Contacts json.RawMessage `json:"contacts"` -} - -func userProfilePut(c *gin.Context) { - var f userProfileForm - bind(c, &f) - - if len(f.Roles) == 0 { - bomb(200, "roles empty") - } - - target := User(urlParamInt64(c, "id")) - target.Nickname = f.Nickname - target.Phone = f.Phone - target.Email = f.Email - target.Portrait = f.Portrait - target.RolesForDB = strings.Join(f.Roles, " ") - target.Status = f.Status - target.Contacts = f.Contacts - target.UpdateAt = time.Now().Unix() - target.UpdateBy = loginUsername(c) - renderMessage( - c, - target.Update( - "nickname", - "phone", - "email", - "portrait", - "roles", - "status", - "contacts", - "update_at", - "update_by", - ), - ) -} - -type userPasswordForm struct { - Password string `json:"password" binding:"required"` -} - -func userPasswordPut(c *gin.Context) { - var f userPasswordForm - bind(c, &f) - - target := User(urlParamInt64(c, "id")) - - cryptoPass, err := models.CryptoPass(f.Password) - dangerous(err) - - target.Password = cryptoPass - target.UpdateAt = time.Now().Unix() - target.UpdateBy = loginUsername(c) - renderMessage(c, target.Update("password", "update_at", "update_by")) -} - -type userStatusForm struct { - Status int `json:"status"` -} - -func userStatusPut(c *gin.Context) { - var f userStatusForm - bind(c, &f) - - target := User(urlParamInt64(c, "id")) - target.Status = f.Status - target.UpdateAt = time.Now().Unix() - target.UpdateBy = loginUsername(c) - renderMessage(c, target.Update("status", "update_at", "update_by")) -} - -func userDel(c *gin.Context) { - id := urlParamInt64(c, "id") - target, err := models.UserGet("id=?", id) - dangerous(err) - - if target == nil { - renderMessage(c, nil) - return - } - - renderMessage(c, target.Del()) -} - -func contactChannelsGet(c *gin.Context) { - renderData(c, config.Config.ContactKeys, nil) -} - -func getUserByName(c *gin.Context) { - user, err := models.UserGetByUsername(queryStr(c, "name")) - renderData(c, user, err) -} - -func getUserByToken(c *gin.Context) { - userToken, err := models.UserTokenGet("token=?", queryStr(c, "token")) - dangerous(err) - if userToken == nil { - renderMessage(c, nil) - return - } - - user, err := models.UserGetByUsername(userToken.Username) - renderData(c, user, err) -} diff --git a/http/router_user_group.go b/http/router_user_group.go deleted file mode 100644 index 108ca3ea..00000000 --- a/http/router_user_group.go +++ /dev/null @@ -1,173 +0,0 @@ -package http - -import ( - "net/http" - "time" - - "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/models" -) - -func userGroupListGet(c *gin.Context) { - limit := queryInt(c, "limit", defaultLimit) - query := queryStr(c, "query", "") - - total, err := models.UserGroupTotal(query) - dangerous(err) - - list, err := models.UserGroupGets(query, limit, offset(c, limit)) - dangerous(err) - - renderData(c, gin.H{ - "list": list, - "total": total, - }, nil) -} - -// 与我相关的用户组,我创建的,或者我是其中一员 -// 这个量不大,搜索和分页都放在前端来做,后端搞起来比较麻烦 -func userGroupMineGet(c *gin.Context) { - list, err := loginUser(c).MyUserGroups() - renderData(c, list, err) -} - -type userGroupForm struct { - Name string `json:"name"` - Note string `json:"note"` -} - -func userGroupAdd(c *gin.Context) { - var f userGroupForm - bind(c, &f) - - me := loginUser(c) - - ug := models.UserGroup{ - Name: f.Name, - Note: f.Note, - CreateBy: me.Username, - UpdateBy: me.Username, - } - - dangerous(ug.Add()) - - // 顺便把创建者也作为团队的一员,失败了也没关系,用户会重新添加成员 - models.UserGroupMemberAdd(ug.Id, me.Id) - - renderData(c, ug.Id, nil) -} - -func userGroupPut(c *gin.Context) { - var f userGroupForm - bind(c, &f) - - me := loginUser(c) - ug := UserGroup(urlParamInt64(c, "id")) - - can, err := me.CanModifyUserGroup(ug) - dangerous(err) - - if !can { - bomb(http.StatusForbidden, "forbidden") - } - - if ug.Name != f.Name { - // 如果name发生变化,需要检查这个新name是否与别的group重名 - num, err := models.UserGroupCount("name=? and id<>?", f.Name, ug.Id) - dangerous(err) - - if num > 0 { - bomb(200, "UserGroup %s already exists", f.Name) - } - } - - ug.Name = f.Name - ug.Note = f.Note - ug.UpdateBy = me.Username - ug.UpdateAt = time.Now().Unix() - - renderMessage(c, ug.Update("name", "note", "update_at", "update_by")) -} - -// 不但返回UserGroup的信息,也把成员信息返回,成员不会特别多,所以, -// 成员全部返回,由前端分页、查询 -func userGroupGet(c *gin.Context) { - ug := UserGroup(urlParamInt64(c, "id")) - - ids, err := ug.MemberIds() - dangerous(err) - - users, err := models.UserGetsByIds(ids) - - renderData(c, gin.H{ - "users": users, - "user_group": ug, - }, err) -} - -func userGroupMemberAdd(c *gin.Context) { - var f idsForm - bind(c, &f) - f.Validate() - - me := loginUser(c) - ug := UserGroup(urlParamInt64(c, "id")) - - can, err := me.CanModifyUserGroup(ug) - dangerous(err) - - if !can { - bomb(http.StatusForbidden, "forbidden") - } - - dangerous(ug.AddMembers(f.Ids)) - - // 用户组的成员发生变化,相当于更新了用户组 - // 如果更新失败了直接忽略,不是啥大事 - ug.UpdateAt = time.Now().Unix() - ug.UpdateBy = me.Username - ug.Update("update_at", "update_by") - - renderMessage(c, nil) -} - -func userGroupMemberDel(c *gin.Context) { - var f idsForm - bind(c, &f) - f.Validate() - - me := loginUser(c) - ug := UserGroup(urlParamInt64(c, "id")) - - can, err := me.CanModifyUserGroup(ug) - dangerous(err) - - if !can { - bomb(http.StatusForbidden, "forbidden") - } - - dangerous(ug.DelMembers(f.Ids)) - - // 用户组的成员发生变化,相当于更新了用户组 - // 如果更新失败了直接忽略,不是啥大事 - ug.UpdateAt = time.Now().Unix() - ug.UpdateBy = me.Username - ug.Update("update_at", "update_by") - - renderMessage(c, nil) -} - -func userGroupDel(c *gin.Context) { - me := loginUser(c) - ug := UserGroup(urlParamInt64(c, "id")) - - can, err := me.CanModifyUserGroup(ug) - dangerous(err) - - if !can { - bomb(http.StatusForbidden, "forbidden") - } - - renderMessage(c, ug.Del()) -} diff --git a/judge/compute.go b/judge/compute.go deleted file mode 100644 index d414cb3c..00000000 --- a/judge/compute.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package judge - -import ( - "fmt" - "math" - - "github.com/didi/nightingale/v5/vos" -) - -type Function interface { - Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) -} - -type MaxFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f MaxFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - count := len(vs) - if count < 1 { - return - } - - max := vs[0].Value - for i := 1; i < len(vs); i++ { - if max < vs[i].Value { - max = vs[i].Value - } - } - - leftValue = max - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type MinFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f MinFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - count := len(vs) - if count < 1 { - return - } - - min := vs[0].Value - - for i := 1; i < len(vs); i++ { - if min > vs[i].Value { - min = vs[i].Value - } - } - - leftValue = min - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type AllFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f AllFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - count := len(vs) - if count < 1 { - return - } - - for i := 0; i < len(vs); i++ { - isTriggered = checkIsTriggered(vs[i].Value, f.Operator, f.RightValue) - if !isTriggered { - break - } - } - - leftValue = vs[0].Value - return -} - -type SumFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f SumFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - count := len(vs) - if count < 1 { - return - } - - sum := vos.JsonFloat(0.0) - for i := 0; i < count; i++ { - sum += vs[i].Value - } - - leftValue = sum - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type AvgFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f AvgFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - sum := vos.JsonFloat(0.0) - - for i := 0; i < vsLen; i++ { - sum += vs[i].Value - } - - leftValue = sum / vos.JsonFloat(vsLen) - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type StddevFunction struct { - Function - Num int - Limit int -} - -func (f StddevFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - var sum float64 - vsLen := len(vs) - if vsLen < 1 { - return - } - - for i := 0; i < vsLen; i++ { - sum += float64(vs[i].Value) - } - mean := sum / float64(vsLen) - - var num float64 - for i := 0; i < vsLen; i++ { - num += math.Pow(float64(vs[i].Value)-mean, 2) - } - - std := math.Sqrt(num / float64(vsLen)) - - upperBound := mean + std*float64(f.Num) - lowerBound := mean - std*float64(f.Num) - - leftValue = vs[0].Value - isTriggered = checkIsTriggered(leftValue, "<", lowerBound) || checkIsTriggered(leftValue, ">", upperBound) - return -} - -type DiffFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -// 只要有一个点的diff触发阈值,就报警 -func (f DiffFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - first := vs[0].Value - - isTriggered = false - for i := 1; i < vsLen; i++ { - // diff是当前值减去历史值 - leftValue = first - vs[i].Value - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - if isTriggered { - break - } - } - - return -} - -// pdiff(#3) -type PDiffFunction struct { - Function - Limit int - Operator string - RightValue float64 -} - -func (f PDiffFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - first := vs[0].Value - isTriggered = false - for i := 1; i < len(vs); i++ { - if vs[i].Value == 0 { - continue - } - - leftValue = (first - vs[i].Value) / vs[i].Value * 100.0 - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - if isTriggered { - break - } - } - - return -} - -type HappenFunction struct { - Function - Num int - Limit int - Operator string - RightValue float64 -} - -func (f HappenFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - for n, i := 0, 0; i < len(vs); i++ { - if checkIsTriggered(vs[i].Value, f.Operator, f.RightValue) { - n++ - if n == f.Num { - isTriggered = true - leftValue = vs[i].Value - return - } - } - } - return -} - -type CAvgAbsFunction struct { - Function - Limit int - Operator string - RightValue float64 - CompareValue float64 -} - -func (f CAvgAbsFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - sum := vos.JsonFloat(0.0) - - for i := 0; i < vsLen; i++ { - sum += vs[i].Value - } - - value := sum / vos.JsonFloat(vsLen) - leftValue = vos.JsonFloat(math.Abs(float64(value) - float64(f.CompareValue))) - - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type CAvgFunction struct { - Function - Limit int - Operator string - RightValue float64 - CompareValue float64 -} - -func (f CAvgFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - sum := vos.JsonFloat(0.0) - for i := 0; i < vsLen; i++ { - sum += vs[i].Value - } - - leftValue = sum/vos.JsonFloat(vsLen) - vos.JsonFloat(f.CompareValue) - - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type CAvgRateAbsFunction struct { - Function - Limit int - Operator string - RightValue float64 - CompareValue float64 -} - -func (f CAvgRateAbsFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - sum := vos.JsonFloat(0.0) - for i := 0; i < vsLen; i++ { - sum += vs[i].Value - } - - value := sum / vos.JsonFloat(vsLen) - leftValue = vos.JsonFloat(math.Abs((float64(value)-float64(f.CompareValue))/f.CompareValue)) * 100.00 - - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -type CAvgRateFunction struct { - Function - Limit int - Operator string - RightValue float64 - CompareValue float64 -} - -func (f CAvgRateFunction) Compute(vs []*vos.HPoint) (leftValue vos.JsonFloat, isTriggered bool) { - vsLen := len(vs) - if vsLen < 1 { - return - } - - sum := vos.JsonFloat(0.0) - for i := 0; i < vsLen; i++ { - sum += vs[i].Value - } - - value := sum / vos.JsonFloat(vsLen) - leftValue = (value - vos.JsonFloat(f.CompareValue)) / vos.JsonFloat(math.Abs(f.CompareValue)) * 100.00 - - isTriggered = checkIsTriggered(leftValue, f.Operator, f.RightValue) - return -} - -func ParseFuncFromString(str string, span []interface{}, operator string, rightValue float64) (fn Function, err error) { - if str == "" { - return nil, fmt.Errorf("func can not be null") - } - limit := span[0].(int) - - switch str { - case "max": - fn = &MaxFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "min": - fn = &MinFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "all": - fn = &AllFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "sum": - fn = &SumFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "avg": - fn = &AvgFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "stddev": - fn = &StddevFunction{Limit: limit, Num: span[1].(int)} - case "diff": - fn = &DiffFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "pdiff": - fn = &PDiffFunction{Limit: limit, Operator: operator, RightValue: rightValue} - case "happen": - fn = &HappenFunction{Limit: limit, Num: span[1].(int), Operator: operator, RightValue: rightValue} - case "c_avg": - fn = &CAvgFunction{Limit: limit, CompareValue: span[1].(float64), Operator: operator, RightValue: rightValue} - case "c_avg_abs": - fn = &CAvgAbsFunction{Limit: limit, CompareValue: span[1].(float64), Operator: operator, RightValue: rightValue} - case "c_avg_rate": - fn = &CAvgRateFunction{Limit: limit, CompareValue: span[1].(float64), Operator: operator, RightValue: rightValue} - case "c_avg_rate_abs": - fn = &CAvgRateAbsFunction{Limit: limit, CompareValue: span[1].(float64), Operator: operator, RightValue: rightValue} - default: - err = fmt.Errorf("not_supported_method") - } - - return -} - -func checkIsTriggered(leftValue vos.JsonFloat, operator string, rightValue float64) (isTriggered bool) { - switch operator { - case "=", "==": - isTriggered = math.Abs(float64(leftValue)-rightValue) < 0.0001 - case "!=": - isTriggered = math.Abs(float64(leftValue)-rightValue) > 0.0001 - case "<": - isTriggered = float64(leftValue) < rightValue - case "<=": - isTriggered = float64(leftValue) <= rightValue - case ">": - isTriggered = float64(leftValue) > rightValue - case ">=": - isTriggered = float64(leftValue) >= rightValue - } - - return -} diff --git a/judge/handler.go b/judge/handler.go deleted file mode 100644 index 2780d785..00000000 --- a/judge/handler.go +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package judge - -import ( - "bytes" - "encoding/json" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/vos" -) - -var ( - bufferPool = sync.Pool{New: func() interface{} { return new(bytes.Buffer) }} - - EVENT_ALERT = "alert" - EVENT_RECOVER = "recovery" -) - -func Send(points []*vos.MetricPoint) { - for i := range points { - alertRules := getMatchAlertRules(points[i]) - - rulesCount := len(alertRules) - if rulesCount == 0 { - // 这个监控数据没有关联任何告警策略,省事了不用处理 - continue - } - - logger.Debugf("[point_match_alertRules][point:%+v][alertRuleNum:%+v]", points[i], rulesCount) - // 不同的告警规则,alert_duration字段大小不同,找到最大的,按照最大的值来缓存历史数据 - var maxAliveDuration = 0 - for j := range alertRules { - if maxAliveDuration < alertRules[j].AlertDuration { - maxAliveDuration = alertRules[j].AlertDuration - } - } - - ll := PointCaches[points[i].PK[0:2]].PutPoint(points[i], int64(maxAliveDuration)) - - for j := range alertRules { - go ToJudge(ll, alertRules[j], points[i]) - } - } -} - -func getMatchAlertRules(point *vos.MetricPoint) []*models.AlertRule { - alertRules := cache.AlertRulesByMetric.GetBy(point.Metric) - matchRules := make([]*models.AlertRule, 0, len(alertRules)) - - for i := range alertRules { - if alertRules[i].Type == models.PULL { - continue - } - - if matchAlertRule(point, alertRules[i]) { - matchRules = append(matchRules, alertRules[i]) - } - } - - return matchRules -} - -func matchAlertRule(item *vos.MetricPoint, alertRule *models.AlertRule) bool { - //TODO 过滤方式待优化 - for _, filter := range alertRule.PushExpr.ResFilters { - if !valueMatch(item.Ident, filter.Func, filter.Params) { - return false - } - } - - for _, filter := range alertRule.PushExpr.TagFilters { - value, exists := item.TagsMap[filter.Key] - if !exists { - return false - } - - if !valueMatch(value, filter.Func, filter.Params) { - return false - } - } - - return true -} - -func valueMatch(value, f string, params []string) bool { - switch f { - - case "InClasspath": - for i := range params { - if cache.ResClasspath.Exists(value, params[i]) { - return true - } - } - return false - case "NotInClasspath": - for i := range params { - if cache.ResClasspath.Exists(value, params[i]) { - return false - } - } - return true - case "InClasspathPrefix": - classpaths := cache.ResClasspath.GetValues(value) - for _, classpath := range classpaths { - for i := range params { - if strings.HasPrefix(classpath, params[i]) { - return true - } - } - } - return false - case "NotInClasspathPrefix": - classpaths := cache.ResClasspath.GetValues(value) - for _, classpath := range classpaths { - for i := range params { - if strings.HasPrefix(classpath, params[i]) { - return false - } - } - } - return true - case "InList": - for i := range params { - if value == params[i] { - return true - } - } - return false - case "NotInList": - for i := range params { - if value == params[i] { - return false - } - } - return true - case "InResourceList": - for i := range params { - if value == params[i] { - return true - } - } - return false - case "NotInResourceList": - for i := range params { - if value == params[i] { - return false - } - } - return true - case "HasPrefixString": - for i := range params { - if strings.HasPrefix(value, params[i]) { - return true - } - } - return false - case "NoPrefixString": - for i := range params { - if strings.HasPrefix(value, params[i]) { - return false - } - } - return true - case "HasSuffixString": - for i := range params { - if strings.HasSuffix(value, params[i]) { - return true - } - } - return false - case "NoSuffixString": - for i := range params { - if strings.HasSuffix(value, params[i]) { - return false - } - } - return true - case "ContainsString": - for i := range params { - if strings.Contains(value, params[i]) { - return true - } - } - return false - case "NotContainsString": - for i := range params { - if strings.Contains(value, params[i]) { - return false - } - } - return true - case "MatchRegexp": - for i := range params { - r, _ := regexp.Compile(params[i]) - if r.MatchString(value) { - return true - } - } - return false - case "NotMatchRegexp": - for i := range params { - r, _ := regexp.Compile(params[i]) - if r.MatchString(value) { - return false - } - } - return true - } - - return false -} - -func ToJudge(linkedList *SafeLinkedList, stra *models.AlertRule, val *vos.MetricPoint) { - logger.Debugf("[ToJudge.start][stra:%+v][val:%+v]", stra, val) - now := val.Time - - hps := linkedList.HistoryPoints(now - int64(stra.AlertDuration)) - if len(hps) == 0 { - return - } - - historyArr := []vos.HistoryPoints{} - statusArr := []bool{} - eventInfo := "" - value := "" - - if len(stra.PushExpr.Exps) == 1 { - for _, expr := range stra.PushExpr.Exps { - history, info, lastValue, status := Judge(stra, expr, hps, val, now) - statusArr = append(statusArr, status) - - if value == "" { - value = fmt.Sprintf("%s: %s", expr.Metric, lastValue) - } else { - value += fmt.Sprintf("; %s: %s", expr.Metric, lastValue) - } - - historyArr = append(historyArr, history) - eventInfo += info - } - } else { //多个条件 - for _, expr := range stra.PushExpr.Exps { - - respData, err := GetData(stra, expr, val, now) - if err != nil { - logger.Errorf("stra:%+v get query data err:%v", stra, err) - return - } - if len(respData) <= 0 { - logger.Errorf("stra:%+v get query data respData:%v err", stra, respData) - return - } - - history, info, lastValue, status := Judge(stra, expr, respData, val, now) - - statusArr = append(statusArr, status) - if value == "" { - value = fmt.Sprintf("%s: %s", expr.Metric, lastValue) - } else { - value += fmt.Sprintf("; %s: %s", expr.Metric, lastValue) - } - - historyArr = append(historyArr, history) - if eventInfo == "" { - eventInfo = info - } else { - if stra.PushExpr.TogetherOrAny == 0 { - eventInfo += fmt.Sprintf(" & %s", info) - } else if stra.PushExpr.TogetherOrAny == 1 { - eventInfo += fmt.Sprintf(" || %s", info) - } - - } - - } - - } - - bs, err := json.Marshal(historyArr) - if err != nil { - logger.Errorf("Marshal history:%+v err:%v", historyArr, err) - } - - event := &models.AlertEvent{ - RuleId: stra.Id, - RuleName: stra.Name, - RuleNote: stra.Note, - HashId: str.MD5(fmt.Sprintf("%d_%s", stra.Id, val.PK)), - ResIdent: val.Ident, - Priority: stra.Priority, - HistoryPoints: bs, - TriggerTime: now, - Values: value, - NotifyChannels: stra.NotifyChannels, - NotifyGroups: stra.NotifyGroups, - NotifyUsers: stra.NotifyUsers, - RunbookUrl: stra.RunbookUrl, - ReadableExpression: eventInfo, - TagMap: val.TagsMap, - } - logger.Debugf("[ToJudge.event.create][statusArr:%v][type=push][stra:%+v][val:%+v][event:%+v]", statusArr, stra, val, event) - sendEventIfNeed(statusArr, event, stra) -} - -func Judge(stra *models.AlertRule, exp models.Exp, historyData []*vos.HPoint, firstItem *vos.MetricPoint, now int64) (history vos.HistoryPoints, info string, lastValue string, status bool) { - - var leftValue vos.JsonFloat - if exp.Func == "stddev" { - info = fmt.Sprintf(" %s (%s,%ds) %v", exp.Metric, exp.Func, stra.AlertDuration, exp.Params) - } else if exp.Func == "happen" { - info = fmt.Sprintf(" %s (%s,%ds) %v %s %v", exp.Metric, exp.Func, stra.AlertDuration, exp.Params, exp.Optr, exp.Threshold) - } else { - info = fmt.Sprintf(" %s(%s,%ds) %s %v", exp.Metric, exp.Func, stra.AlertDuration, exp.Optr, exp.Threshold) - } - - leftValue, status = judgeItemWithStrategy(stra, historyData, exp, firstItem, now) - - lastValue = "null" - if !math.IsNaN(float64(leftValue)) { - lastValue = strconv.FormatFloat(float64(leftValue), 'f', -1, 64) - } - - history = vos.HistoryPoints{ - Metric: exp.Metric, - Tags: firstItem.TagsMap, - Points: historyData, - } - return -} - -func judgeItemWithStrategy(stra *models.AlertRule, historyData []*vos.HPoint, exp models.Exp, firstItem *vos.MetricPoint, now int64) (leftValue vos.JsonFloat, isTriggered bool) { - straFunc := exp.Func - - var straParam []interface{} - - straParam = append(straParam, stra.AlertDuration) - - switch straFunc { - case "happen", "stddev": - if len(exp.Params) < 1 { - logger.Errorf("stra:%d exp:%+v stra param is null", stra.Id, exp) - return - } - straParam = append(straParam, exp.Params[0]) - case "c_avg", "c_avg_abs", "c_avg_rate", "c_avg_rate_abs": - if len(exp.Params) < 1 { - logger.Errorf("stra:%d exp:%+v stra param is null", stra.Id, exp) - return - } - - hisD, err := GetData(stra, exp, firstItem, now-int64(exp.Params[0])) - if err != nil { - logger.Errorf("stra:%v %+v get compare data err:%v", stra.Id, exp, err) - return - } - - if len(hisD) != 1 { - logger.Errorf("stra:%d %+v get compare data err, respItems:%v", stra.Id, exp, hisD) - return - } - - var sum float64 - for _, i := range hisD { - sum += float64(i.Value) - } - - //环比数据的平均值 - straParam = append(straParam, sum/float64(len(hisD))) - } - - fn, err := ParseFuncFromString(straFunc, straParam, exp.Optr, exp.Threshold) - if err != nil { - logger.Errorf("stra:%d %+v parse func fail: %v", stra.Id, exp, err) - return - } - - return fn.Compute(historyData) -} - -func GetData(stra *models.AlertRule, exp models.Exp, firstItem *vos.MetricPoint, now int64) ([]*vos.HPoint, error) { - var respData []*vos.HPoint - var err error - - //多查一些数据,防止由于查询不到最新点,导致点数不够 - start := now - int64(stra.AlertDuration) - 2 - // 这里的参数肯定只有一个 - queryParam, err := NewQueryRequest(firstItem.Ident, exp.Metric, firstItem.TagsMap, start, now) - - if err != nil { - return respData, err - } - respData = Query(queryParam) - logger.Debugf("[exp:%+v][queryParam:%+v][respData:%+v]\n", exp, queryParam, respData) - return respData, err -} - -// 虽然最近的数据确实产生了事件(产生事件很频繁),但是未必一定要发送,只有告警/恢复状态发生变化的时候才需发送 -func sendEventIfNeed(status []bool, event *models.AlertEvent, stra *models.AlertRule) { - isTriggered := true - - if stra.Type == 0 { - // 只判断push型的 - switch stra.PushExpr.TogetherOrAny { - - case 0: - // 全部触发 - for _, s := range status { - isTriggered = isTriggered && s - } - - case 1: - // 任意一个触发 - isTriggered = false - for _, s := range status { - if s == true { - isTriggered = true - break - } - } - - } - } - - now := time.Now().Unix() - lastEvent, exists := LastEvents.Get(event.RuleId, event.HashId) - - switch event.IsPromePull { - case 0: - // push型的 && 与条件型的 - if exists && lastEvent.IsPromePull == 1 { - // 之前内存中的事件是pull型的,先清空内存中的事件 - LastEvents.Del(event.RuleId, event.HashId) - } - - if isTriggered { - // 新告警或者上次是恢复事件,都需要立即发送 - if !exists || lastEvent.IsRecov() { - event.MarkAlert() - SendEvent(event) - } - } else { - // 上次是告警事件,现在恢复了,自然需要通知 - if exists && lastEvent.IsAlert() { - event.MarkRecov() - SendEvent(event) - } - } - case 1: - // pull型的,产生的事件一定是触发了阈值的,即这个case里不存在recovery的场景,recovery的场景用resolve_timeout的cron来处理 - if exists && lastEvent.IsPromePull == 0 { - // 之前内存中的事件是push型的,先清空内存中的事件 - LastEvents.Del(event.RuleId, event.HashId) - } - - // 1. 第一次来,并且AlertDuration=0,直接发送 - // 2. 触发累计到AlertDuration时长后触发一条 - if !exists { - // 这是个新事件,之前未曾产生过的 - if stra.AlertDuration == 0 { - // 代表prometheus rule for 配置为0,直接发送 - event.LastSend = true - event.MarkAlert() - SendEvent(event) - } else { - // 只有一条事件,显然无法满足for AlertDuration的时间,放到内存里等待 - LastEvents.Set(event) - } - return - } - - // 内存里有事件,虽然AlertDuration是0但是上次没有发过(可能是中间调整过AlertDuration,比如从某个大于0的值调整为0) - if stra.AlertDuration == 0 && !lastEvent.LastSend { - event.LastSend = true - event.MarkAlert() - SendEvent(event) - return - } - - // 内存里有事件,AlertDuration也是大于0的,需要判断Prometheus里的for的逻辑 - if now-lastEvent.TriggerTime < int64(stra.AlertDuration) { - // 距离上次告警的时间小于告警统计周期,即不满足for的条件,不产生告警通知 - return - } - - logger.Debugf("[lastEvent.LastSend:%+v][event.LastSend:%+v][now:%+v][lastEvent.TriggerTime:%+v][stra.AlertDuration:%+v][now-lastEvent.TriggerTime:%+v]\n", - lastEvent.LastSend, - event.LastSend, - now, - lastEvent.TriggerTime, - stra.AlertDuration, - now-lastEvent.TriggerTime, - ) - - // 满足for的条件了,应产生事件,但是未必一定要发送,上次没发送或者上次是恢复这次才发送,即保证只发一条 - if !lastEvent.LastSend || lastEvent.IsRecov() { - event.LastSend = true - event.MarkAlert() - SendEvent(event) - } - } -} - -func SendEvent(event *models.AlertEvent) { - // update last event - LastEvents.Set(event) - - if event.IsAlert() { - // 只有是告警事件,才需要判断是否重复发送的问题,如果是恢复事件,就直接交给后续alert处理 - ae, err := models.AlertEventGet("hash_id = ?", event.HashId) - if err == nil && ae != nil { - logger.Debugf("[event exists do not send again][type:%+v][event:%+v]", event.IsPromePull, event) - return - } - } - - ok := EventQueue.PushFront(event) - if !ok { - logger.Errorf("push event:%v err", event) - } - logger.Debugf("[SendEvent.event.success][type:%+v][event:%+v]", event.IsPromePull, event) -} diff --git a/judge/history.go b/judge/history.go deleted file mode 100644 index be6db525..00000000 --- a/judge/history.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package judge - -import ( - "container/list" - "sync" - "time" - - "github.com/didi/nightingale/v5/vos" -) - -type PointCache struct { - sync.RWMutex - M map[string]*SafeLinkedList -} - -func NewPointCache() *PointCache { - return &PointCache{M: make(map[string]*SafeLinkedList)} -} - -func (pc *PointCache) Get(key string) (*SafeLinkedList, bool) { - pc.RLock() - defer pc.RUnlock() - val, ok := pc.M[key] - return val, ok -} - -func (pc *PointCache) Set(key string, val *SafeLinkedList) { - pc.Lock() - defer pc.Unlock() - pc.M[key] = val -} - -func (pc *PointCache) Len() int { - pc.RLock() - defer pc.RUnlock() - return len(pc.M) -} - -func (pc *PointCache) CleanStale(before int64) { - var keys []string - - pc.RLock() - for key, L := range pc.M { - front := L.Front() - if front == nil { - continue - } - - if front.Value.(*vos.MetricPoint).Time < before { - keys = append(keys, key) - } - } - pc.RUnlock() - - pc.BatchDelete(keys) -} - -func (pc *PointCache) BatchDelete(keys []string) { - count := len(keys) - if count == 0 { - return - } - - pc.Lock() - defer pc.Unlock() - for i := 0; i < count; i++ { - delete(pc.M, keys[i]) - } -} - -func (pc *PointCache) PutPoint(p *vos.MetricPoint, maxAliveDuration int64) *SafeLinkedList { - linkedList, exists := pc.Get(p.PK) - if exists { - linkedList.PushFrontAndMaintain(p, maxAliveDuration) - } else { - NL := list.New() - NL.PushFront(p) - linkedList = &SafeLinkedList{L: NL} - pc.Set(p.PK, linkedList) - } - - return linkedList -} - -// 这是个线程不安全的大Map,需要提前初始化好 -var PointCaches = make(map[string]*PointCache) -var pointChars = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"} -var pointHeadKeys = make([]string, 0, 256) - -func initPointCaches() { - for i := 0; i < 16; i++ { - for j := 0; j < 16; j++ { - pointHeadKeys = append(pointHeadKeys, pointChars[i]+pointChars[j]) - } - } - - for i := 0; i < 256; i++ { - PointCaches[pointHeadKeys[i]] = NewPointCache() - } -} - -func CleanStalePoints() { - // 监控数据2天都没关联到任何告警策略,说明对应的告警策略已经删除了 - before := time.Now().Unix() - 3600*24*2 - for i := 0; i < 256; i++ { - PointCaches[pointHeadKeys[i]].CleanStale(before) - } -} diff --git a/judge/judge.go b/judge/judge.go deleted file mode 100644 index 33c40c34..00000000 --- a/judge/judge.go +++ /dev/null @@ -1,118 +0,0 @@ -package judge - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/naming" - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/logger" -) - -var ( - // 这个内存Queue放到judge的包里或alert的包里感觉都可以 - // 放到judge的包里,即当前的做法,相当于把alert看做judge的一个附属小功能 - // 这个Queue的核心作用就是削峰填谷,应对突然产生的大面积事件 - EventQueue *list.SafeListLimited - - // 上次同步全量告警规则的时间,全量同步都没做过,我这也不用处理PULL的规则了 - lastSyncTime int64 -) - -func Start(ctx context.Context) { - // PUSH型的告警引擎,依赖内存里缓存的数据来做告警判断,两层map减小锁粒度 - initPointCaches() - - // 把数据库中的未恢复告警同步一份到内存中,便于后续判断告警是否应该发送 - LastEvents.Init() - - // 默认初始化的大小是1000万,相当于内存里有1000万事件,应该够用了 - EventQueue = list.NewSafeListLimited(10000000) - - // 开始心跳,对于PUSH型的数据我有策略了自然就可以处理了 - if err := heartbeat(config.Config.Heartbeat.LocalAddr); err != nil { - fmt.Println(err) - logger.Close() - os.Exit(1) - } - - // 启动心跳goroutinue,如果挂了,trans可以及时感知 - go loopHeartbeat() - - // PULL型的策略不着急,等一段时间(等哈希环是稳态的)再开始周期性干活 - go syncPullRules(ctx) - - // 告警策略删除之后,针对这些告警策略缓存的监控数据要被清理 - go loopCleanStalePoints() -} - -func syncPullRules(ctx context.Context) { - // 先等一会再干活,等大部分judge都上报心跳过了,哈希环不变了 - time.Sleep(time.Second * 33) - for { - syncPullRulesOnce(ctx) - time.Sleep(time.Second * 9) - } -} - -func syncPullRulesOnce(ctx context.Context) { - if cache.AlertRulesByMetric.LastSync == lastSyncTime { - return - } - - // 根据我自己的标识,去查找属于我的PULL型告警规则 - ident := config.Config.Heartbeat.LocalAddr - - rules := cache.AlertRules.Pulls() - count := len(rules) - mines := make([]models.AlertRule, 0, count) - logger.Debugf("[got_one_pull_rule_for_all][ruleNum:%v]", count) - for i := 0; i < count; i++ { - - instance, err := naming.HashRing.GetNode(fmt.Sprint(rules[i].Id)) - if err != nil { - logger.Warningf("hashring: sharding pull rule(%d) fail: %v", rules[i].Id, err) - continue - } - logger.Debugf("[got_one_pull_rule_hash_result][instance:%v][ident:%v][rule:%v]", instance, ident, rules[i]) - if instance == ident { - // 属于我的 - mines = append(mines, *rules[i]) - logger.Debugf("[got_one_pull_rule_for_me][rule:%v]", rules[i]) - } - } - - pullRuleManager.SyncRules(ctx, mines) - lastSyncTime = cache.AlertRulesByMetric.LastSync -} - -func loopHeartbeat() { - interval := time.Duration(config.Config.Heartbeat.Interval) * time.Millisecond - - for { - time.Sleep(interval) - if err := heartbeat(config.Config.Heartbeat.LocalAddr); err != nil { - logger.Warning(err) - } - } -} - -func heartbeat(endpoint string) error { - err := models.InstanceHeartbeat(config.EndpointName, endpoint) - if err != nil { - return fmt.Errorf("mysql.error: instance(service=%s, endpoint=%s) heartbeat fail: %v", config.EndpointName, endpoint, err) - } - return nil -} - -func loopCleanStalePoints() { - for { - time.Sleep(time.Hour) - CleanStalePoints() - } -} diff --git a/judge/last_event.go b/judge/last_event.go deleted file mode 100644 index 046c5978..00000000 --- a/judge/last_event.go +++ /dev/null @@ -1,119 +0,0 @@ -package judge - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/didi/nightingale/v5/models" - "github.com/toolkits/pkg/logger" -) - -// rule_id -> hash_id -> *models.AlertEvent -type SafeEventMap struct { - sync.RWMutex - M map[int64]map[string]*models.AlertEvent -} - -var ( - LastEvents = &SafeEventMap{M: make(map[int64]map[string]*models.AlertEvent)} -) - -func (s *SafeEventMap) Get(ruleId int64, hashId string) (*models.AlertEvent, bool) { - s.RLock() - defer s.RUnlock() - - m, has := s.M[ruleId] - if !has { - return nil, false - } - - event, has := m[hashId] - return event, has -} - -func (s *SafeEventMap) Set(event *models.AlertEvent) { - s.Lock() - defer s.Unlock() - - _, has := s.M[event.RuleId] - if !has { - m := make(map[string]*models.AlertEvent) - m[event.HashId] = event - s.M[event.RuleId] = m - } else { - s.M[event.RuleId][event.HashId] = event - } -} - -func (s *SafeEventMap) Init() { - aes, err := models.AlertEventGetAll() - if err != nil { - fmt.Println("load all alert_event fail:", err) - os.Exit(1) - } - - if len(aes) == 0 { - return - } - - data := make(map[int64]map[string]*models.AlertEvent) - for i := 0; i < len(aes); i++ { - event := aes[i] - _, has := data[event.RuleId] - if !has { - m := make(map[string]*models.AlertEvent) - m[event.HashId] = event - data[event.RuleId] = m - } else { - data[event.RuleId][event.HashId] = event - } - } - - s.Lock() - s.M = data - s.Unlock() -} - -func (s *SafeEventMap) Del(ruleId int64, hashId string) { - s.Lock() - defer s.Unlock() - - _, has := s.M[ruleId] - if !has { - return - } - - delete(s.M[ruleId], hashId) -} - -func (s *SafeEventMap) DeleteOrSendRecovery(ruleId int64, toKeepKeys map[string]struct{}) { - s.Lock() - defer s.Unlock() - - m, has := s.M[ruleId] - if !has { - return - } - - for k, ev := range m { - if _, loaded := toKeepKeys[k]; loaded { - continue - } - - // 如果因为promql修改,导致本来是告警状态变成了恢复,也接受 - logger.Debugf("[to_del][ev.IsRecovery:%+v][ev.LastSend:%+v]", ev.IsRecovery, ev.LastSend) - - // promql 没查询到结果,需要将告警标记为已恢复并发送 - // 同时需要满足 已经发送过触发信息,并且时间差满足 大于AlertDuration - // 为了避免 发送告警后 一个点 断点了就立即发送恢复信息的case - now := time.Now().Unix() - if ev.IsAlert() && ev.LastSend && now-ev.TriggerTime > ev.AlertDuration { - logger.Debugf("[prom.alert.MarkRecov][ev.RuleName:%v]", ev.RuleName) - ev.MarkRecov() - EventQueue.PushFront(ev) - delete(s.M[ruleId], k) - } - } -} diff --git a/judge/linkedlist.go b/judge/linkedlist.go deleted file mode 100644 index eecec231..00000000 --- a/judge/linkedlist.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2017 Xiaomi, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package judge - -import ( - "container/list" - "sync" - - "github.com/didi/nightingale/v5/vos" -) - -type SafeLinkedList struct { - sync.RWMutex - L *list.List -} - -func (ll *SafeLinkedList) Front() *list.Element { - ll.RLock() - defer ll.RUnlock() - return ll.L.Front() -} - -func (ll *SafeLinkedList) Len() int { - ll.RLock() - defer ll.RUnlock() - return ll.L.Len() -} - -func (ll *SafeLinkedList) PushFrontAndMaintain(v *vos.MetricPoint, maintainDuration int64) { - ll.Lock() - defer ll.Unlock() - - sz := ll.L.Len() - lastPointTs := ll.L.Front().Value.(*vos.MetricPoint).Time - earliestTs := v.Time - maintainDuration - - if sz > 0 { - // 新push上来的数据有可能重复了,或者timestamp不对,这种数据要丢掉 - if v.Time <= lastPointTs { - return - } - } - - ll.L.PushFront(v) - - sz++ - - for i := 0; i < sz; i++ { - if ll.L.Back().Value.(*vos.MetricPoint).Time >= earliestTs { - break - } - //最前面的点已经不在告警策略时间周期内,丢弃掉 - ll.L.Remove(ll.L.Back()) - } -} - -func (ll *SafeLinkedList) HistoryPoints(smallestTime int64) []*vos.HPoint { - size := ll.Len() - if size == 0 { - return []*vos.HPoint{} - } - - firstElement := ll.Front() - firstItem := firstElement.Value.(*vos.MetricPoint) - - vs := make([]*vos.HPoint, 0) - - if firstItem.Time < smallestTime { - return vs - } - - v := &vos.HPoint{ - Timestamp: firstItem.Time, - Value: vos.JsonFloat(firstItem.Value), - } - - vs = append(vs, v) - - currentElement := firstElement - for i := 1; i < size; i++ { - nextElement := currentElement.Next() - if nextElement == nil { - return vs - } - - item := nextElement.Value.(*vos.MetricPoint) - - if item.Time < smallestTime { - return vs - } - - v := &vos.HPoint{ - Timestamp: item.Time, - Value: vos.JsonFloat(item.Value), - } - vs = append(vs, v) - currentElement = nextElement - } - - return vs -} - -// func (ll *SafeLinkedList) QueryDataByTS(start, end int64) []*vos.HPoint { -// size := ll.Len() -// if size == 0 { -// return []*vos.HPoint{} -// } - -// firstElement := ll.Front() -// firstItem := firstElement.Value.(*vos.MetricPoint) - -// var vs []*vos.HPoint - -// if firstItem.Time < start { -// //最新的点也比起始时间旧,直接返回 -// return vs -// } - -// v := &vos.HPoint{ -// Timestamp: firstItem.Time, -// Value: vos.JsonFloat(firstItem.Value), -// } - -// vs = append(vs, v) -// currentElement := firstElement - -// for { -// nextElement := currentElement.Next() -// if nextElement == nil { -// return vs -// } - -// if nextElement.Value.(*vos.MetricPoint).Time < start { -// return vs -// } - -// if nextElement.Value.(*vos.MetricPoint).Time > end { -// currentElement = nextElement -// continue -// } - -// v := &vos.HPoint{ -// Timestamp: nextElement.Value.(*vos.MetricPoint).Time, -// Value: vos.JsonFloat(nextElement.Value.(*vos.MetricPoint).Value), -// } - -// vs = append(vs, v) -// currentElement = nextElement -// } - -// return vs -// } diff --git a/judge/prome_pull.go b/judge/prome_pull.go deleted file mode 100644 index 9c57444d..00000000 --- a/judge/prome_pull.go +++ /dev/null @@ -1,200 +0,0 @@ -package judge - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/prometheus/prometheus/promql" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/vos" -) - -const ( - DEFAULT_PULL_ALERT_INTERVAL = 15 - LABEL_NAME = "__name__" -) - -type RuleManager struct { - targetMtx sync.Mutex - activeRules map[string]RuleEval -} - -var pullRuleManager = NewRuleManager() - -func NewRuleManager() *RuleManager { - return &RuleManager{ - activeRules: make(map[string]RuleEval), - } -} - -type RuleEval struct { - R models.AlertRule - quiteChan chan struct{} - ctx context.Context -} - -func (re RuleEval) start() { - go func(re RuleEval) { - logger.Debugf("[prome_pull_alert_start][RuleEval: %+v]", re) - if re.R.PullExpr.EvaluationInterval <= 0 { - re.R.PullExpr.EvaluationInterval = DEFAULT_PULL_ALERT_INTERVAL - } - - sleepDuration := time.Duration(re.R.PullExpr.EvaluationInterval) * time.Second - - for { - select { - case <-re.ctx.Done(): - return - case <-re.quiteChan: - return - default: - } - - // 获取backend的prometheus DataSource - pb, err := backend.GetDataSourceFor("prometheus") - if err != nil { - logger.Errorf("[pull_alert][get_prome_datasource_error][err: %v]", err) - return - } - - // 调prometheus instance query 查询数据 - promVector := pb.QueryVector(re.R.PullExpr.PromQl) - - handlePromqlVector(promVector, re.R) - - time.Sleep(sleepDuration) - } - }(re) -} - -func (r RuleEval) stop() { - logger.Debugf("[prome_pull_alert_stop][RuleEval: %+v]", r) - close(r.quiteChan) -} - -func (rm *RuleManager) SyncRules(ctx context.Context, rules []models.AlertRule) { - - thisNewRules := make(map[string]RuleEval) - thisAllRules := make(map[string]RuleEval) - - rm.targetMtx.Lock() - for _, r := range rules { - newR := RuleEval{ - R: r, - quiteChan: make(chan struct{}, 1), - ctx: ctx, - } - hash := str.MD5(fmt.Sprintf("rid_%d_%d_%d_%s", - r.Id, - r.AlertDuration, - r.PullExpr.EvaluationInterval, - r.PullExpr.PromQl, - )) - thisAllRules[hash] = newR - if _, loaded := rm.activeRules[hash]; !loaded { - thisNewRules[hash] = newR - rm.activeRules[hash] = newR - } - } - - // 停止旧的 - for hash := range rm.activeRules { - if _, loaded := thisAllRules[hash]; !loaded { - rm.activeRules[hash].stop() - delete(rm.activeRules, hash) - } - } - rm.targetMtx.Unlock() - - // 开启新的 - for hash := range thisNewRules { - thisNewRules[hash].start() - } -} - -func handlePromqlVector(pv promql.Vector, r models.AlertRule) { - toKeepKeys := map[string]struct{}{} - if len(pv) == 0 { - // 说明没触发,或者没查询到,删掉rule-id开头的所有event - LastEvents.DeleteOrSendRecovery(r.Id, toKeepKeys) - - return - } - - for _, s := range pv { - readableStr := s.Metric.String() - - value := fmt.Sprintf("[vector=%s]: [value=%f]", readableStr, s.Point.V) - hashId := str.MD5(fmt.Sprintf("s_%d_%s", r.Id, readableStr)) - toKeepKeys[hashId] = struct{}{} - tags := "" - tagm := make(map[string]string) - metricsName := "" - for _, l := range s.Metric { - if l.Name == LABEL_NAME { - metricsName = l.Value - continue - } - tags += fmt.Sprintf("%s=%s,", l.Name, l.Value) - tagm[l.Name] = l.Value - - } - - tags = strings.TrimRight(tags, ",") - // prometheus查询返回 13位时间戳 - triggerTs := s.T / 1e3 - //triggerTs := time.Now().Unix() - historyArr := make([]vos.HistoryPoints, 0) - - hp := &vos.HPoint{ - Timestamp: triggerTs, - Value: vos.JsonFloat(s.V), - } - historyArr = append(historyArr, vos.HistoryPoints{ - Metric: metricsName, - Tags: tagm, - Points: []*vos.HPoint{hp}, - }) - bs, err := json.Marshal(historyArr) - if err != nil { - logger.Errorf("[pull_alert][historyArr_json_Marshal_error][historyArr:%+v][err: %v]", historyArr, err) - return - } - logger.Debugf("[proml.historyArr][metricsName:%v][Tags:%v]\n", metricsName, tagm) - - event := &models.AlertEvent{ - RuleId: r.Id, - RuleName: r.Name, - RuleNote: r.Note, - HashId: hashId, - IsPromePull: 1, - IsRecovery: 0, - Priority: r.Priority, - HistoryPoints: bs, - TriggerTime: triggerTs, - Values: value, - NotifyChannels: r.NotifyChannels, - NotifyGroups: r.NotifyGroups, - NotifyUsers: r.NotifyUsers, - RunbookUrl: r.RunbookUrl, - ReadableExpression: r.PullExpr.PromQl, - Tags: tags, - AlertDuration: int64(r.AlertDuration), - TagMap: tagm, - } - - logger.Debugf("[handlePromqlVector_has_value][event:%+v]\n", event) - sendEventIfNeed([]bool{true}, event, &r) - } - LastEvents.DeleteOrSendRecovery(r.Id, toKeepKeys) - -} diff --git a/judge/query.go b/judge/query.go deleted file mode 100644 index 15103c37..00000000 --- a/judge/query.go +++ /dev/null @@ -1,92 +0,0 @@ -package judge - -import ( - "errors" - "sort" - - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/vos" - - "github.com/toolkits/pkg/logger" -) - -var ( - ErrorIndexParamIllegal = errors.New("index param illegal") - ErrorQueryParamIllegal = errors.New("query param illegal") -) - -func queryDataByBackend(args vos.DataQueryParam) []*vos.DataQueryResp { - dataSource, err := backend.GetDataSourceFor("") - if err != nil { - logger.Warningf("could not find datasource") - return nil - } - reply := dataSource.QueryData(args) - - return reply -} - -// 执行Query操作 -// 默认不重试, 如果要做重试, 在这里完成 -func Query(reqs *vos.DataQueryParam) []*vos.HPoint { - hisD := make([]*vos.HPoint, 0) - - // 默认重试 - queryResD := queryDataByBackend(*reqs) - - if len(queryResD) == 0 { - return hisD - } - logger.Debugf("[reqs:%+v][queryResD:%+v]\n", reqs, queryResD[0]) - fD := queryResD[0] - - var values vos.HistoryDataS - - //裁剪掉多余的点 - for _, i := range fD.Values { - //将毫秒改为秒 - if i.Timestamp/1000000000 > 1 { - i.Timestamp = i.Timestamp / 1000 - } - - oneV := &vos.HPoint{ - Timestamp: i.Timestamp, - Value: i.Value, - } - values = append(values, oneV) - } - - sort.Sort(values) - - return values -} - -func NewQueryRequest(ident, metric string, tagsMap map[string]string, - start, end int64) (*vos.DataQueryParam, error) { - if end <= start || start < 0 { - return nil, ErrorQueryParamIllegal - } - - tagPairs := make([]*vos.TagPair, 0) - for k, v := range tagsMap { - oneKeyV := &vos.TagPair{ - Key: k, - Value: v, - } - tagPairs = append(tagPairs, oneKeyV) - - } - - paramOne := vos.DataQueryParamOne{ - Idents: []string{ident}, - Metric: metric, - TagPairs: tagPairs, - } - paramS := make([]vos.DataQueryParamOne, 0) - paramS = append(paramS, paramOne) - return &vos.DataQueryParam{ - Start: start, - End: end, - Params: paramS, - }, nil -} diff --git a/main.go b/main.go deleted file mode 100644 index 5ea363c1..00000000 --- a/main.go +++ /dev/null @@ -1,117 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "os" - "os/signal" - "syscall" - - _ "github.com/go-sql-driver/mysql" - prom_runtime "github.com/prometheus/prometheus/pkg/runtime" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/runner" - - "github.com/didi/nightingale/v5/alert" - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/http" - "github.com/didi/nightingale/v5/judge" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/pkg/i18n" - "github.com/didi/nightingale/v5/pkg/ilog" - "github.com/didi/nightingale/v5/rpc" - "github.com/didi/nightingale/v5/timer" - "github.com/didi/nightingale/v5/trans" -) - -var ( - vers *bool - help *bool -) - -func init() { - vers = flag.Bool("v", false, "display the version.") - help = flag.Bool("h", false, "print this help.") - flag.Parse() - - if *vers { - fmt.Println("version:", config.Version) - os.Exit(0) - } - - if *help { - flag.Usage() - os.Exit(0) - } - - runner.Init() - fmt.Println("runner.cwd:", runner.Cwd) - fmt.Println("runner.hostname:", runner.Hostname) - fmt.Println("fd_limits", prom_runtime.FdLimits()) - fmt.Println("vm_limits", prom_runtime.VMLimits()) -} - -func main() { - parseConf() - - ilog.Init(config.Config.Logger) - i18n.Init(config.Config.I18N) - - models.InitMySQL(config.Config.MySQL) - models.InitLdap(config.Config.LDAP) - models.InitSalt() - models.InitRoot() - models.InitError() - - ctx, cancelFunc := context.WithCancel(context.Background()) - - timer.SyncResourceTags() - timer.SyncUsers() - timer.SyncUserGroups() - timer.SyncUserGroupMember() - timer.SyncClasspathReses() - timer.SyncCollectRules() - timer.SyncAlertMutes() - timer.SyncAlertRules() - timer.SyncMetricDesc() - timer.CleanExpireMute() - timer.CleanExpireResource() - timer.BindOrphanRes() - timer.UpdateAlias() - - judge.Start(ctx) - alert.Start(ctx) - trans.Start(ctx) - - backend.Init(config.Config.Trans.Backend) - - http.Start() - rpc.Start() - - endingProc(cancelFunc) -} - -func parseConf() { - if err := config.Parse(); err != nil { - fmt.Println("cannot parse configuration file:", err) - os.Exit(1) - } -} - -func endingProc(cancelFunc context.CancelFunc) { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - <-c - fmt.Printf("stop signal caught, stopping... pid=%d\n", os.Getpid()) - - // 执行清理工作 - backend.DatasourceCleanUp() - cancelFunc() - logger.Close() - http.Shutdown() - - fmt.Println("process stopped successfully") -} diff --git a/models/alert_event.go b/models/alert_event.go deleted file mode 100644 index 96971340..00000000 --- a/models/alert_event.go +++ /dev/null @@ -1,305 +0,0 @@ -package models - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/didi/nightingale/v5/vos" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - "xorm.io/builder" -) - -type AlertEvent struct { - Id int64 `json:"id"` - RuleId int64 `json:"rule_id"` - RuleName string `json:"rule_name"` - RuleNote string `json:"rule_note"` - // ProcessorUid int64 `json:"processor_uid"` - // ProcessorObj User `json:"processor_user_obj" xorm:"-"` - // EventNote string `json:"event_note"` - HashId string `json:"hash_id"` // 唯一标识 - IsPromePull int `json:"is_prome_pull"` // 代表是否是prometheus pull告警,为1时前端使用 ReadableExpression 拉取最近1小时数据 - LastSend bool `json:"last_sent" xorm:"-"` // true 代表上次发了,false代表还没发:给prometheus做for判断的 - AlertDuration int64 `xorm:"-" json:"alert_duration"` // 告警统计周期,PULL模型会当做P8S的for时间 - ResClasspaths string `json:"res_classpaths"` - ResIdent string `json:"res_ident" xorm:"-"` // res_ident会出现在tags字段,就不用单独写入数据库了,但是各块逻辑中有个单独的res_ident字段更便于处理,所以struct里还留有这个字段;前端不用展示这个字段 - Priority int `json:"priority"` - Status int `json:"status"` // 标识是否 被屏蔽 - IsRecovery int `json:"is_recovery" xorm:"-"` // 0: alert, 1: recovery - HistoryPoints json.RawMessage `json:"history_points"` // HistoryPoints{} - TriggerTime int64 `json:"trigger_time"` - Values string `json:"values" xorm:"-"` // e.g. cpu.idle: 23.3; load.1min: 32 - NotifyChannels string `json:"notify_channels"` - NotifyGroups string `json:"notify_groups"` - NotifyUsers string `json:"notify_users"` - RunbookUrl string `json:"runbook_url"` - ReadableExpression string `json:"readable_expression"` // e.g. mem.bytes.used.percent(all,60s) > 0 - Tags string `json:"tags"` // merge data_tags rule_tags and res_tags - NotifyGroupObjs []UserGroup `json:"notify_group_objs" xorm:"-"` - NotifyUserObjs []User `json:"notify_user_objs" xorm:"-"` - TagMap map[string]string `json:"tag_map" xorm:"-"` -} - -// IsAlert 语法糖,避免直接拿IsRecovery字段做比对不直观易出错 -func (ae *AlertEvent) IsAlert() bool { - return ae.IsRecovery != 1 -} - -// IsRecov 语法糖,避免直接拿IsRecovery字段做比对不直观易出错 -func (ae *AlertEvent) IsRecov() bool { - return ae.IsRecovery == 1 -} - -// MarkAlert 语法糖,标记为告警状态 -func (ae *AlertEvent) MarkAlert() { - ae.IsRecovery = 0 -} - -// MarkRecov 语法糖,标记为恢复状态 -func (ae *AlertEvent) MarkRecov() { - ae.IsRecovery = 1 -} - -// MarkMuted 语法糖,标记为屏蔽状态 -func (ae *AlertEvent) MarkMuted() { - ae.Status = 1 -} - -func (ae *AlertEvent) String() string { - return fmt.Sprintf("id:%d,rule_id:%d,rule_name:%s,rule_note:%s,hash_id:%s,is_prome_pull:%d,alert_duration:%d,res_classpaths:%s,res_ident:%s,priority:%d,status:%d,is_recovery:%d,history_points:%s,trigger_time:%d,values:%s,notify_channels:%s,runbook_url:%s,readable_expression:%s,tags:%s,notify_group_objs:%+v,notify_user_objs:%+v,tag_map:%v", - ae.Id, - ae.RuleId, - ae.RuleName, - ae.RuleNote, - ae.HashId, - ae.IsPromePull, - ae.AlertDuration, - ae.ResClasspaths, - ae.ResIdent, - ae.Priority, - ae.Status, - ae.IsRecovery, - string(ae.HistoryPoints), - ae.TriggerTime, - ae.Values, - ae.NotifyChannels, - ae.RunbookUrl, - ae.ReadableExpression, - ae.Tags, - ae.NotifyGroupObjs, - ae.NotifyUserObjs, - ae.TagMap) -} - -func (ae *AlertEvent) TableName() string { - return "alert_event" -} - -func (ae *AlertEvent) FillObjs() error { - userGroupIds := strings.Fields(ae.NotifyGroups) - if len(userGroupIds) > 0 { - groups, err := UserGroupGetsByIdsStr(userGroupIds) - if err != nil { - return err - } - ae.NotifyGroupObjs = groups - } - - userIds := strings.Fields(ae.NotifyUsers) - if len(userIds) > 0 { - users, err := UserGetsByIdsStr(userIds) - if err != nil { - return err - } - ae.NotifyUserObjs = users - } - - // if ae.ProcessorUid != 0 { - // processor, err := UserGetById(ae.ProcessorUid) - // if err != nil { - // return err - // } - // ae.ProcessorObj = *processor - // } - - return nil -} - -func (ae *AlertEvent) GetHistoryPoints() ([]vos.HistoryPoints, error) { - historyPoints := []vos.HistoryPoints{} - - err := json.Unmarshal([]byte(ae.HistoryPoints), &historyPoints) - return historyPoints, err -} - -func (ae *AlertEvent) Add() error { - return DBInsertOne(ae) -} - -func (ar *AlertEvent) DelByHashId() error { - _, err := DB.Where("hash_id=?", ar.HashId).Delete(new(AlertEvent)) - if err != nil { - logger.Errorf("mysql.error: delete alert_event fail: %v", err) - return internalServerError - } - - return nil -} - -func (ar *AlertEvent) HashIdExists() (bool, error) { - num, err := DB.Where("hash_id=?", ar.HashId).Count(new(AlertEvent)) - return num > 0, err -} - -func (ar *AlertEvent) Del() error { - _, err := DB.Where("id=?", ar.Id).Delete(new(AlertEvent)) - if err != nil { - logger.Errorf("mysql.error: delete alert_event fail: %v", err) - return internalServerError - } - - return nil -} - -func AlertEventsDel(ids []int64) error { - if len(ids) == 0 { - return fmt.Errorf("param ids is empty") - } - - _, err := DB.Exec("DELETE FROM alert_event where id in (" + str.IdsString(ids) + ")") - if err != nil { - logger.Errorf("mysql.error: delete alert_event(%v) fail: %v", ids, err) - return internalServerError - } - - return nil -} - -func AlertEventTotal(stime, etime int64, query string, status, priority int) (num int64, err error) { - cond := builder.NewCond() - if stime != 0 && etime != 0 { - cond = cond.And(builder.Between{Col: "trigger_time", LessVal: stime, MoreVal: etime}) - } - - if status != -1 { - cond = cond.And(builder.Eq{"status": status}) - } - - if priority != -1 { - cond = cond.And(builder.Eq{"priority": priority}) - } - - if query != "" { - arr := strings.Fields(query) - for i := 0; i < len(arr); i++ { - qarg := "%" + arr[i] + "%" - innerCond := builder.NewCond() - innerCond = innerCond.Or(builder.Like{"res_classpaths", qarg}) - innerCond = innerCond.Or(builder.Like{"rule_name", qarg}) - innerCond = innerCond.Or(builder.Like{"tags", qarg}) - cond = cond.And(innerCond) - } - } - - num, err = DB.Where(cond).Count(new(AlertEvent)) - if err != nil { - logger.Errorf("mysql.error: count alert_event fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func AlertEventGets(stime, etime int64, query string, status, priority int, limit, offset int) ([]AlertEvent, error) { - cond := builder.NewCond() - if stime != 0 && etime != 0 { - cond = cond.And(builder.Between{Col: "trigger_time", LessVal: stime, MoreVal: etime}) - } - - if status != -1 { - cond = cond.And(builder.Eq{"status": status}) - } - - if priority != -1 { - cond = cond.And(builder.Eq{"priority": priority}) - } - - if query != "" { - arr := strings.Fields(query) - for i := 0; i < len(arr); i++ { - qarg := "%" + arr[i] + "%" - innerCond := builder.NewCond() - innerCond = innerCond.Or(builder.Like{"res_classpaths", qarg}) - innerCond = innerCond.Or(builder.Like{"rule_name", qarg}) - innerCond = innerCond.Or(builder.Like{"tags", qarg}) - cond = cond.And(innerCond) - } - } - - var objs []AlertEvent - err := DB.Where(cond).Desc("trigger_time").Limit(limit, offset).Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query alert_event fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []AlertEvent{}, nil - } - - return objs, nil -} - -func AlertEventGet(where string, args ...interface{}) (*AlertEvent, error) { - var obj AlertEvent - has, err := DB.Where(where, args...).Get(&obj) - - if err != nil { - logger.Errorf("mysql.error: query alert_event(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func AlertEventGetAll() ([]*AlertEvent, error) { - var objs []*AlertEvent - err := DB.Find(&objs) - if err != nil { - return objs, err - } - - if len(objs) == 0 { - return []*AlertEvent{}, nil - } - - return objs, nil -} - -// func AlertEventUpdateEventNote(id int64, hashId string, note string, uid int64) error { -// session := DB.NewSession() -// defer session.Close() - -// if err := session.Begin(); err != nil { -// return err -// } - -// if _, err := session.Exec("UPDATE alert_event SET event_note = ?, processor_uid = ? WHERE id = ?", note, uid, id); err != nil { -// logger.Errorf("mysql.error: update alert_event event_note fail: %v", err) -// return err -// } - -// if _, err := session.Exec("UPDATE history_alert_event SET event_note = ?, processor_uid = ? WHERE hash_id = ? ORDER BY id DESC LIMIT 1", note, uid, hashId); err != nil { -// logger.Errorf("mysql.error: update history_alert_event event_note fail: %v", err) -// return err -// } - -// return session.Commit() -// } diff --git a/models/alert_rule.go b/models/alert_rule.go deleted file mode 100644 index 8d08f082..00000000 --- a/models/alert_rule.go +++ /dev/null @@ -1,429 +0,0 @@ -package models - -import ( - "encoding/json" - "fmt" - "regexp" - "strconv" - "strings" - "time" - - "github.com/prometheus/prometheus/promql/parser" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -const PUSH = 0 -const PULL = 1 -const ALERT_RULE_ACTIVE = 0 -const ALERT_RULE_DISABLED = 1 - -type AlertRule struct { - Id int64 `json:"id"` - GroupId int64 `json:"group_id"` - Name string `json:"name"` - Type int `json:"type"` // 0: nightingale, 1: prometheus - Expression json.RawMessage `json:"expression"` - Status int `json:"status"` // 0: active, 1: disabled - AppendTags string `json:"append_tags"` - EnableStime string `json:"enable_stime"` - EnableEtime string `json:"enable_etime"` - EnableDaysOfWeek string `json:"enable_days_of_week"` - RecoveryNotify int `json:"recovery_notify"` - Priority int `json:"priority"` - NotifyChannels string `json:"notify_channels"` - NotifyGroups string `json:"notify_groups"` - NotifyUsers string `json:"notify_users"` - Callbacks string `json:"callbacks"` - RunbookUrl string `json:"runbook_url"` - Note string `json:"note"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` - AlertDuration int `json:"alert_duration"` // 告警统计周期,PULL模型会当做P8S的for时间 - PushExpr PushExpression `xorm:"-" json:"-"` - PullExpr PullExpression `xorm:"-" json:"-"` - FirstMetric string `xorm:"-" json:"-"` // Exps里可能有多个metric,只取第一个,给后续制作map使用 - NotifyUsersDetail []*User `xorm:"-" json:"notify_users_detail"` - NotifyGroupsDetail []*UserGroup `xorm:"-" json:"notify_groups_detail"` -} - -type PushExpression struct { - TagFilters []TagFilter `json:"tags_filters"` - ResFilters []ResFilter `json:"res_filters"` - Exps []Exp `json:"trigger_conditions"` - TogetherOrAny int `json:"together_or_any"` // 所有触发还是触发一条即可,=0所有 =1一条 -} - -type PullExpression struct { - PromQl string `json:"promql"` // promql 最终表达式 - EvaluationInterval int `json:"evaluation_interval"` // promql pull 计算周期 -} - -type ResFilter struct { - Func string `json:"func"` - // * InClasspath -> 可以内存里做个大map,host->classpath,然后看host对应的classpath中是否有某一个满足InClasspath的条件 - // * NotInClasspath - // * InClasspathPrefix -> 可以内存里做个大map,host->classpath,然后看host对应的classpath中是否有某一个满足InClasspathPrefix的条件 - // * NotInClasspathPrefix - // * InResourceList - // * NotInResourceList - // * HasPrefixString - // * NoPrefixString - // * HasSuffixString - // * NoSuffixString - // * ContainsString - // * NotContainsString - // * MatchRegexp - // * NotMatchRegexp - Params []string `json:"params"` -} - -type TagFilter struct { - Key string `json:"key"` - Func string `json:"func"` - // * InList - // * NotInList - // * HasPrefixString - // * NoPrefixString - // * HasSuffixString - // * NoSuffixString - // * ContainsString - // * NotContainsString - // * MatchRegexp - // * NotMatchRegexp - Params []string `json:"params"` -} - -type Exp struct { - Optr string `json:"optr"` //>,<,=,!= - Func string `json:"func"` //all,max,min - Metric string `json:"metric"` //metric - Params []int `json:"params"` //连续n秒 - Threshold float64 `json:"threshold"` //阈值 -} - -func (ar *AlertRule) Decode() error { - if ar.Type == PUSH { - err := json.Unmarshal(ar.Expression, &ar.PushExpr) - if err != nil { - logger.Warningf("decode alert rule(%d): unmarshal push expression(%s) error: %v", ar.Id, string(ar.Expression), err) - return err - } - - if len(ar.PushExpr.Exps) < 1 { - logger.Warningf("decode alert rule(%d): exps size is zero", ar.Id) - return err - } - - ar.FirstMetric = ar.PushExpr.Exps[0].Metric - } else { - err := json.Unmarshal(ar.Expression, &ar.PullExpr) - if err != nil { - logger.Warningf("decode alert rule(%d): unmarshal pull expression(%s) error: %v", ar.Id, string(ar.Expression), err) - return err - } - } - - return nil -} - -func (ar *AlertRule) TableName() string { - return "alert_rule" -} - -func (ar *AlertRule) Validate() error { - if str.Dangerous(ar.Name) { - return _e("AlertRule name has invalid characters") - } - - if err := ar.Decode(); err != nil { - return _e("AlertRule expression is invalid") - } - - if ar.Type == PUSH { - if ar.AlertDuration <= 0 { - ar.AlertDuration = 60 - } - - for _, filter := range ar.PushExpr.ResFilters { - // 参数不能是空的,即不能一个参数都没有 - if len(filter.Params) == 0 { - return _e("Resource filter(Func:%s)'s param invalid", filter.Func) - } - - // 对于每个参数而言,不能包含空格,不能是空 - for i := range filter.Params { - if strings.ContainsAny(filter.Params[i], " \r\n\t") { - return _e("Resource filter(Func:%s)'s param invalid", filter.Func) - } - - if filter.Params[i] == "" { - return _e("Resource filter(Func:%s)'s param invalid", filter.Func) - } - } - - if strings.Contains(filter.Func, "Regexp") { - for i := range filter.Params { - _, err := regexp.Compile(filter.Params[i]) - if err != nil { - return _e("Regexp: %s cannot be compiled", filter.Params[i]) - } - } - } - } - - for _, filter := range ar.PushExpr.TagFilters { - // 参数不能是空的,即不能一个参数都没有 - if len(filter.Params) == 0 { - return _e("Tags filter(Func:%s)'s param invalid", filter.Func) - } - - // 对于每个参数而言,不能包含空格,不能是空 - for i := range filter.Params { - if strings.ContainsAny(filter.Params[i], " \r\n\t") { - return _e("Tags filter(Func:%s)'s param invalid", filter.Func) - } - - if filter.Params[i] == "" { - return _e("Tags filter(Func:%s)'s param invalid", filter.Func) - } - } - - if strings.Contains(filter.Func, "Regexp") { - for i := range filter.Params { - _, err := regexp.Compile(filter.Params[i]) - if err != nil { - return _e("Regexp: %s cannot be compiled", filter.Params[i]) - } - } - } - } - } - - if ar.Type == PULL { - if ar.AlertDuration <= 0 { - ar.AlertDuration = 60 - } - if ar.PullExpr.PromQl == "" { - return _e("promql empty") - } - _, err := parser.ParseExpr(ar.PullExpr.PromQl) - - if err != nil { - return _e("promql parse error:%s", err.Error()) - } - if ar.PullExpr.EvaluationInterval <= 0 { - ar.PullExpr.EvaluationInterval = 15 - } - } - - ar.AppendTags = strings.TrimSpace(ar.AppendTags) - arr := strings.Fields(ar.AppendTags) - for i := 0; i < len(arr); i++ { - // 如果有appendtags,那就要校验一下格式了 - if len(strings.Split(arr[i], "=")) != 2 { - return _e("AppendTags(%s) invalid", arr[i]) - } - } - - // notifyGroups notifyUsers check - gids := strings.Fields(ar.NotifyGroups) - for i := 0; i < len(gids); i++ { - if _, err := strconv.ParseInt(gids[i], 10, 64); err != nil { - // 这个如果真的非法了肯定是恶意流量,不用i18n - return fmt.Errorf("NotifyGroups(%s) invalid", ar.NotifyGroups) - } - } - - uids := strings.Fields(ar.NotifyUsers) - for i := 0; i < len(uids); i++ { - if _, err := strconv.ParseInt(uids[i], 10, 64); err != nil { - // 这个如果真的非法了肯定是恶意流量,不用i18n - return fmt.Errorf("NotifyUsers(%s) invalid", ar.NotifyUsers) - } - } - - return nil -} - -func AlertRuleCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(AlertRule)) - if err != nil { - logger.Errorf("mysql.error: count alert_rule fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func (ar *AlertRule) Add() error { - if err := ar.Validate(); err != nil { - return err - } - - num, err := AlertRuleCount("group_id=? and name=?", ar.GroupId, ar.Name) - if err != nil { - return err - } - - if num > 0 { - return _e("Alert rule %s already exists", ar.Name) - } - - now := time.Now().Unix() - ar.CreateAt = now - ar.UpdateAt = now - return DBInsertOne(ar) -} - -func (ar *AlertRule) Update(cols ...string) error { - if err := ar.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", ar.Id).Cols(cols...).Update(ar) - if err != nil { - logger.Errorf("mysql.error: update alert_rule(id=%d) fail: %v", ar.Id, err) - return internalServerError - } - - return nil -} - -func AlertRuleUpdateStatus(ids []int64, status int, username string) error { - _, err := DB.Exec("UPDATE alert_rule SET status=?, update_at=?, update_by=? WHERE id in ("+str.IdsString(ids)+")", status, time.Now().Unix(), username) - return err -} - -func AlertRuleUpdateNotifyGroups(ids []int64, notifyGroups, notifyUsers, username string) error { - _, err := DB.Exec("UPDATE alert_rule SET notify_groups = ? , notify_users = ?, update_at = ?, update_by = ? where id in ("+str.IdsString(ids)+")", notifyGroups, notifyUsers, time.Now().Unix(), username) - return err -} - -func AlertRuleUpdateNotifyChannels(ids []int64, notifyChannels, username string) error { - _, err := DB.Exec("UPDATE alert_rule SET notify_channels = ?, update_at = ?, update_by = ? where id in ("+str.IdsString(ids)+")", notifyChannels, time.Now().Unix(), username) - return err -} - -func AlertRuleUpdateAppendTags(ids []int64, appendTags, username string) error { - _, err := DB.Exec("UPDATE alert_rule SET append_tags = ?, update_at = ?, update_by = ? where id in ("+str.IdsString(ids)+")", appendTags, time.Now().Unix(), username) - return err -} - -func AlertRuleTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("name like ?", q).Count(new(AlertRule)) - } else { - num, err = DB.Count(new(AlertRule)) - } - - if err != nil { - logger.Errorf("mysql.error: count alert_rule fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func AlertRuleGets(query string, limit, offset int) ([]AlertRule, error) { - session := DB.Limit(limit, offset).OrderBy("name") - if query != "" { - q := "%" + query + "%" - session = session.Where("name like ?", q) - } - - var objs []AlertRule - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query alert_rule fail: %v", err) - return objs, internalServerError - } - - return objs, nil -} - -func AlertRulesOfGroup(groupId int64) ([]AlertRule, error) { - var objs []AlertRule - err := DB.Where("group_id=?", groupId).OrderBy("name").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query alert_rule of group(id=%d) fail: %v", groupId, err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []AlertRule{}, nil - } - - return objs, nil -} - -func AlertRuleGet(where string, args ...interface{}) (*AlertRule, error) { - var obj AlertRule - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query alert_rule(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func (ar *AlertRule) Del() error { - _, err := DB.Where("id=?", ar.Id).Delete(new(AlertRule)) - if err != nil { - logger.Errorf("mysql.error: delete alert_rule fail: %v", err) - return internalServerError - } - return nil -} - -func AlertRulesDel(ids []int64) error { - if len(ids) == 0 { - return fmt.Errorf("param ids is empty") - } - - _, err := DB.Exec("DELETE FROM alert_rule where id in (" + str.IdsString(ids) + ")") - if err != nil { - logger.Errorf("mysql.error: delete alert_rule(%v) fail: %v", ids, err) - return internalServerError - } - - return nil -} - -func AlertRuleUpdateGroup(alertRuleIds []int64, groupId int64) error { - if len(alertRuleIds) == 0 { - return fmt.Errorf("param alertRuleIds is empty") - } - - _, err := DB.Exec("UPDATE alert_rule SET group_id = ? where id in ("+str.IdsString(alertRuleIds)+")", groupId) - if err != nil { - logger.Errorf("mysql.error: update alert_rule(group_id=%d) fail: %v", groupId, err) - return internalServerError - } - - return nil -} - -func AllAlertRules() ([]*AlertRule, error) { - var objs []*AlertRule - err := DB.Find(&objs) - return objs, err -} - -type AlertRuleStatistic struct { - Count int64 `json:"count"` - MaxUpdateAt int64 `json:"max_update_at"` -} - -func GetAlertRuleStatistic() (AlertRuleStatistic, error) { - var obj AlertRuleStatistic - _, err := DB.SQL("select count(1) as count, max(update_at) as max_update_at from alert_rule").Get(&obj) - return obj, err -} diff --git a/models/alert_rule_group.go b/models/alert_rule_group.go deleted file mode 100644 index eab6f44a..00000000 --- a/models/alert_rule_group.go +++ /dev/null @@ -1,197 +0,0 @@ -package models - -import ( - "fmt" - "sort" - "strings" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type AlertRuleGroup struct { - Id int64 `json:"id"` - Name string `json:"name"` - UserGroupIds string `json:"user_group_ids"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` - UserGroups []UserGroup `json:"user_groups" xorm:"-"` -} - -func (arg *AlertRuleGroup) TableName() string { - return "alert_rule_group" -} - -func (arg *AlertRuleGroup) Validate() error { - if str.Dangerous(arg.Name) { - return _e("AlertRuleGroup name has invalid characters") - } - return nil -} - -func (arg *AlertRuleGroup) Add() error { - if err := arg.Validate(); err != nil { - return err - } - - num, err := AlertRuleGroupCount("name=?", arg.Name) - if err != nil { - return err - } - - if num > 0 { - return _e("AlertRuleGroup %s already exists", arg.Name) - } - - now := time.Now().Unix() - arg.CreateAt = now - arg.UpdateAt = now - return DBInsertOne(arg) -} - -func AlertRuleGroupCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(AlertRuleGroup)) - if err != nil { - logger.Errorf("mysql.error: count alert_rule_group fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func (arg *AlertRuleGroup) Update(cols ...string) error { - if err := arg.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", arg.Id).Cols(cols...).Update(arg) - if err != nil { - logger.Errorf("mysql.error: update alert_rule_group(id=%d) fail: %v", arg.Id, err) - return internalServerError - } - - return nil -} - -func (arg *AlertRuleGroup) FillUserGroups() error { - ids := strings.Fields(arg.UserGroupIds) - if len(ids) == 0 { - arg.UserGroups = []UserGroup{} - return nil - } - - ugs, err := UserGroupGetsByIdsStr(ids) - if err != nil { - logger.Errorf("mysql.error: UserGroupGetsByIds fail: %v", err) - return internalServerError - } - - arg.UserGroups = ugs - - // 这里附一个清理逻辑,如果某个团队已经删除了,就顺带把这个团队id从arg中删除 - ugslen := len(ugs) - idslst := make([]string, 0, ugslen) - - for i := 0; i < ugslen; i++ { - idslst = append(idslst, fmt.Sprint(ugs[i].Id)) - } - - sort.Strings(idslst) - newids := strings.Join(idslst, " ") - - // 把原来的ids也排个序,两相比较,如果发生变化,就说明有团队已经被删了,更新之 - sort.Strings(ids) - oldids := strings.Join(ids, " ") - - if newids != oldids { - arg.UserGroupIds = newids - arg.Update("user_group_ids") - } - - return nil -} - -func AlertRuleGroupTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("name like ?", q).Count(new(AlertRuleGroup)) - } else { - num, err = DB.Count(new(AlertRuleGroup)) - } - - if err != nil { - logger.Errorf("mysql.error: count alert_rule_group fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func AlertRuleGroupGets(query string, limit, offset int) ([]AlertRuleGroup, error) { - session := DB.Limit(limit, offset).OrderBy("name") - if query != "" { - q := "%" + query + "%" - session = session.Where("name like ?", q) - } - - var objs []AlertRuleGroup - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query alert_rule_group fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []AlertRuleGroup{}, nil - } - - return objs, nil -} - -func AlertRuleGroupGet(where string, args ...interface{}) (*AlertRuleGroup, error) { - var obj AlertRuleGroup - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query alert_rule_group(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -// Del AlertRuleGroup删除,前提是下面没有AlertRule了 -func (arg *AlertRuleGroup) Del() error { - ds, err := AlertRulesOfGroup(arg.Id) - if err != nil { - return err - } - - if len(ds) > 0 { - return _e("There are still alert rules under the group") - } - - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM alert_rule_group_favorite WHERE group_id=?", arg.Id); err != nil { - logger.Errorf("mysql.error: delete alert_rule_group_favorite fail: %v", err) - return err - } - - if _, err := session.Exec("DELETE FROM alert_rule_group WHERE id=?", arg.Id); err != nil { - logger.Errorf("mysql.error: delete alert_rule_group fail: %v", err) - return err - } - - return session.Commit() -} diff --git a/models/alert_rule_group_favorite.go b/models/alert_rule_group_favorite.go deleted file mode 100644 index 214bb6c9..00000000 --- a/models/alert_rule_group_favorite.go +++ /dev/null @@ -1,62 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type AlertRuleGroupFavorite struct { - Id int64 - GroupId int64 - UserId int64 -} - -func (AlertRuleGroupFavorite) TableName() string { - return "alert_rule_group_favorite" -} - -func AlertRuleGroupFavoriteCount(where string, args ...interface{}) (int64, error) { - num, err := DB.Where(where, args...).Count(new(AlertRuleGroupFavorite)) - if err != nil { - logger.Errorf("mysql.error: count alert_rule_group_favorite(where=%s, args=%+v) fail: %v", where, args, err) - return 0, internalServerError - } - return num, nil -} - -func AlertRuleGroupFavoriteAdd(groupId, userId int64) error { - num, err := AlertRuleGroupFavoriteCount("user_id=? and group_id=?", userId, groupId) - if err != nil { - return err - } - - if num > 0 { - // already exists - return nil - } - - obj := AlertRuleGroupFavorite{ - GroupId: groupId, - UserId: userId, - } - - return DBInsertOne(obj) -} - -func AlertRuleGroupFavoriteDel(groupId, userId int64) error { - _, err := DB.Where("user_id=? and group_id=?", userId, groupId).Delete(new(AlertRuleGroupFavorite)) - if err != nil { - logger.Errorf("mysql.error: delete alert_rule_group_favorite fail: %v", err) - return internalServerError - } - - return nil -} - -func AlertRuleGroupFavoriteGetGroupIds(userId int64) ([]int64, error) { - var ids []int64 - err := DB.Table(new(AlertRuleGroupFavorite)).Select("group_id").Where("user_id=?", userId).Find(&ids) - if err != nil { - logger.Errorf("mysql.error: query alert_rule_group_favorite fail: %v", err) - return ids, internalServerError - } - - return ids, nil -} diff --git a/models/chart.go b/models/chart.go deleted file mode 100644 index 42c1a6e5..00000000 --- a/models/chart.go +++ /dev/null @@ -1,68 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type Chart struct { - Id int64 `json:"id"` - GroupId int64 `json:"group_id"` - Configs string `json:"configs"` - Weight int `json:"weight"` -} - -func (c *Chart) TableName() string { - return "chart" -} - -func (c *Chart) Add() error { - return DBInsertOne(c) -} - -func (c *Chart) Update(cols ...string) error { - _, err := DB.Where("id=?", c.Id).Cols(cols...).Update(c) - if err != nil { - logger.Errorf("mysql.error: update chart(id=%d) fail: %v", c.Id, err) - return internalServerError - } - - return nil -} - -func (c *Chart) Del() error { - _, err := DB.Where("id=?", c.Id).Delete(new(Chart)) - if err != nil { - logger.Errorf("mysql.error: delete chart(id=%d) fail: %v", c.Id, err) - return internalServerError - } - - return nil -} - -func ChartGets(groupId int64) ([]Chart, error) { - var objs []Chart - err := DB.Where("group_id=?", groupId).OrderBy("weight").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: ChartGets(groupId=%d) fail: %v", groupId, err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Chart{}, nil - } - - return objs, nil -} - -func ChartGet(where string, args ...interface{}) (*Chart, error) { - var obj Chart - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: get chart(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} diff --git a/models/chart_group.go b/models/chart_group.go deleted file mode 100644 index 98c8b9eb..00000000 --- a/models/chart_group.go +++ /dev/null @@ -1,87 +0,0 @@ -package models - -import ( - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type ChartGroup struct { - Id int64 `json:"id"` - DashboardId int64 `json:"dashboard_id"` - Name string `json:"name"` - Weight int `json:"weight"` -} - -func (cg *ChartGroup) TableName() string { - return "chart_group" -} - -func (cg *ChartGroup) Validate() error { - if str.Dangerous(cg.Name) { - return _e("ChartGroup name has invalid characters") - } - return nil -} - -func (cg *ChartGroup) Add() error { - if err := cg.Validate(); err != nil { - return err - } - - return DBInsertOne(cg) -} - -func (cg *ChartGroup) Update(cols ...string) error { - if err := cg.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", cg.Id).Cols(cols...).Update(cg) - if err != nil { - logger.Errorf("mysql.error: update chart_group(id=%d) fail: %v", cg.Id, err) - return internalServerError - } - - return nil -} - -func (cg *ChartGroup) Del() error { - _, err := DB.Where("group_id=?", cg.Id).Delete(new(Chart)) - if err != nil { - logger.Errorf("mysql.error: delete chart by group_id(%d) fail: %v", cg.Id, err) - return internalServerError - } - - _, err = DB.Where("id=?", cg.Id).Delete(new(ChartGroup)) - if err != nil { - logger.Errorf("mysql.error: delete chart_group(id=%d) fail: %v", cg.Id, err) - return internalServerError - } - - return nil -} - -func ChartGroupGet(where string, args ...interface{}) (*ChartGroup, error) { - var obj ChartGroup - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: get chart_group(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func ChartGroupGets(dashboardId int64) ([]ChartGroup, error) { - var objs []ChartGroup - err := DB.Where("dashboard_id=?", dashboardId).OrderBy("weight").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: ChartGroupGets(dashboardId=%d) fail: %v", dashboardId, err) - return nil, internalServerError - } - return objs, nil -} diff --git a/models/chart_tmp.go b/models/chart_tmp.go deleted file mode 100644 index 93526168..00000000 --- a/models/chart_tmp.go +++ /dev/null @@ -1,30 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type ChartTmp struct { - Id int64 `json:"id"` - Configs string `json:"configs"` - CreateBy string `json:"create_by"` - CreateAt int64 `json:"create_at"` -} - -func (t *ChartTmp) Add() error { - _, err := DB.InsertOne(t) - return err -} - -func ChartTmpGet(where string, args ...interface{}) (*ChartTmp, error) { - var obj ChartTmp - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: get chart_tmp(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} diff --git a/models/classpath.go b/models/classpath.go deleted file mode 100644 index 5e68b4da..00000000 --- a/models/classpath.go +++ /dev/null @@ -1,318 +0,0 @@ -package models - -import ( - "strings" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type Classpath struct { - Id int64 `json:"id"` - Path string `json:"path"` - Note string `json:"note"` - Preset int `json:"preset"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` -} - -type ClasspathNode struct { - Id int64 `json:"id"` - Path string `json:"path"` - Note string `json:"note"` - Preset int `json:"preset"` - Children []*ClasspathNode `json:"children"` -} - -func (c *Classpath) TableName() string { - return "classpath" -} - -func (c *Classpath) Validate() error { - if str.Dangerous(c.Path) { - return _e("Classpath path has invalid characters") - } - - if strings.Contains(c.Path, " ") { - return _e("Classpath path has invalid characters") - } - - if str.Dangerous(c.Note) { - return _e("Classpath note has invalid characters") - } - - return nil -} - -func (c *Classpath) Add() error { - if err := c.Validate(); err != nil { - return err - } - - num, err := ClasspathCount("path=?", c.Path) - if err != nil { - return err - } - - if num > 0 { - return _e("Classpath %s already exists", c.Path) - } - - now := time.Now().Unix() - c.CreateAt = now - c.UpdateAt = now - return DBInsertOne(c) -} - -func ClasspathCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(Classpath)) - if err != nil { - logger.Errorf("mysql.error: count classpath fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func (c *Classpath) Update(cols ...string) error { - if err := c.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", c.Id).Cols(cols...).Update(c) - if err != nil { - logger.Errorf("mysql.error: update classpath(id=%d) fail: %v", c.Id, err) - return internalServerError - } - - return nil -} - -func ClasspathTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("path like ?", q).Count(new(Classpath)) - } else { - num, err = DB.Count(new(Classpath)) - } - - if err != nil { - logger.Errorf("mysql.error: count classpath fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func ClasspathGets(query string, limit, offset int) ([]Classpath, error) { - session := DB.Limit(limit, offset).OrderBy("path") - if query != "" { - q := "%" + query + "%" - session = session.Where("path like ?", q) - } - var objs []Classpath - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query classpath fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []Classpath{}, nil - } - - return objs, nil -} - -func ClasspathGetAll() ([]Classpath, error) { - var objs []Classpath - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query classpath fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []Classpath{}, nil - } - - return objs, nil -} - -func ClasspathGet(where string, args ...interface{}) (*Classpath, error) { - var obj Classpath - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query classpath(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func ClasspathGetsByPrefix(prefix string) ([]Classpath, error) { - var objs []Classpath - err := DB.Where("path like ?", prefix+"%").OrderBy("path").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query classpath fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []Classpath{}, nil - } - - return objs, nil -} - -// Del classpath的删除,前提是挂载的机器、配置的采集策略都要提前删除 -func (c *Classpath) Del() error { - num, err := ClasspathResourceCount("classpath_id=?", c.Id) - if err != nil { - return err - } - - if num > 0 { - return _e("There are still resources under the classpath") - } - - num, err = CollectRuleCount("classpath_id=?", c.Id) - if err != nil { - return err - } - - if num > 0 { - return _e("There are still collect rules under the classpath") - } - - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM classpath_favorite WHERE classpath_id=?", c.Id); err != nil { - logger.Errorf("mysql.error: delete classpath_favorite fail: %v", err) - return err - } - - if _, err := session.Exec("DELETE FROM classpath WHERE id=?", c.Id); err != nil { - logger.Errorf("mysql.error: delete classpath fail: %v", err) - return err - } - - return session.Commit() -} - -func (c *Classpath) AddResources(idents []string) error { - count := len(idents) - for i := 0; i < count; i++ { - err := ClasspathResourceAdd(c.Id, strings.TrimSpace(idents[i])) - if err != nil { - return err - } - } - return nil -} - -func (c *Classpath) DelResources(idents []string) error { - return ClasspathResourceDel(c.Id, idents) -} - -func ClasspathNodeGets(query string) ([]*ClasspathNode, error) { - session := DB.OrderBy("path") - if query != "" { - q := "%" + query + "%" - session = session.Where("path like ?", q) - } - var objs []Classpath - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query classpath fail: %v", err) - return []*ClasspathNode{}, internalServerError - } - - if len(objs) == 0 { - return []*ClasspathNode{}, nil - } - pcs := ClasspathNodeAllChildren(objs) - - return pcs, nil -} - -func (cp *Classpath) DirectChildren() ([]Classpath, error) { - var pcs []Classpath - objs, err := ClasspathGetsByPrefix(cp.Path) - if err != nil { - logger.Errorf("mysql.error: query prefix classpath fail: %v", err) - return []Classpath{}, internalServerError - } - if len(objs) < 2 { - return []Classpath{}, nil - } - - pre := objs[1] - path := pre.Path[len(objs[0].Path):] - pre.Path = path - pcs = append(pcs, pre) - - for _, cp := range objs[2:] { - has := strings.HasPrefix(cp.Path, pre.Path) - if !has { - path := cp.Path[len(objs[0].Path):] - pre.Path = path - pcs = append(pcs, pre) - pre = cp - } - } - - return pcs, nil -} - -func ClasspathNodeAllChildren(cps []Classpath) []*ClasspathNode { - var node ClasspathNode - for _, cp := range cps { - ListInsert(cp, &node) - } - - return node.Children -} - -func ListInsert(obj Classpath, node *ClasspathNode) { - path := obj.Path - has := true - for { - if len(node.Children) == 0 { - break - } - children := node.Children[len(node.Children)-1] - prefix := children.Path - has = strings.HasPrefix(path, prefix) - if !has { - break - } - path = path[len(prefix):] - node = children - } - - newNode := ToClasspathNode(obj, path) - node.Children = append(node.Children, &newNode) -} - -func ToClasspathNode(cp Classpath, path string) ClasspathNode { - var obj ClasspathNode - obj.Id = cp.Id - obj.Path = path - obj.Note = cp.Note - obj.Preset = cp.Preset - obj.Children = []*ClasspathNode{} - - return obj -} diff --git a/models/classpath_favorite.go b/models/classpath_favorite.go deleted file mode 100644 index abd4747c..00000000 --- a/models/classpath_favorite.go +++ /dev/null @@ -1,62 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type ClasspathFavorite struct { - Id int64 - ClasspathId int64 - UserId int64 -} - -func (ClasspathFavorite) TableName() string { - return "classpath_favorite" -} - -func ClasspathFavoriteCount(where string, args ...interface{}) (int64, error) { - num, err := DB.Where(where, args...).Count(new(ClasspathFavorite)) - if err != nil { - logger.Errorf("mysql.error: count classpath_favorite(where=%s, args=%+v) fail: %v", where, args, err) - return 0, internalServerError - } - return num, nil -} - -func ClasspathFavoriteAdd(ClasspathId, userId int64) error { - num, err := ClasspathFavoriteCount("user_id=? and classpath_id=?", userId, ClasspathId) - if err != nil { - return err - } - - if num > 0 { - // already exists - return nil - } - - obj := ClasspathFavorite{ - ClasspathId: ClasspathId, - UserId: userId, - } - - return DBInsertOne(obj) -} - -func ClasspathFavoriteDel(classpathId int64, userId int64) error { - _, err := DB.Where("user_id=? and classpath_id=?", userId, classpathId).Delete(new(ClasspathFavorite)) - if err != nil { - logger.Errorf("mysql.error: delete classpath_favorite fail: %v", err) - return internalServerError - } - - return nil -} - -func ClasspathFavoriteGetClasspathIds(userId int64) ([]int64, error) { - var ids []int64 - err := DB.Table(new(ClasspathFavorite)).Select("classpath_id").Where("user_id=?", userId).Find(&ids) - if err != nil { - logger.Errorf("mysql.error: query classpath_favorite fail: %v", err) - return ids, internalServerError - } - - return ids, nil -} diff --git a/models/classpath_resource.go b/models/classpath_resource.go deleted file mode 100644 index 2fab9737..00000000 --- a/models/classpath_resource.go +++ /dev/null @@ -1,108 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type ClasspathResource struct { - Id int64 - ClasspathId int64 - ResIdent string -} - -func (ClasspathResource) TableName() string { - return "classpath_resource" -} - -func ClasspathResourceCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(ClasspathResource)) - if err != nil { - logger.Errorf("mysql.error: count classpath_resource fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func ClasspathResourceGets(where string, args ...interface{}) ([]ClasspathResource, error) { - var objs []ClasspathResource - - err := DB.Where(where, args...).Find(&objs) - if err != nil { - logger.Errorf("mysql.error: get classpath_resources fail: %v", err) - return objs, internalServerError - } - - return objs, nil -} - -func ClasspathResourceGetAll() ([]ClasspathResource, error) { - var objs []ClasspathResource - - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: get classpath_resources fail: %v", err) - return objs, internalServerError - } - - return objs, nil -} - -func ClasspathResourceAdd(classpathId int64, resIdent string) error { - num, err := ClasspathResourceCount("classpath_id=? and res_ident=?", classpathId, resIdent) - if err != nil { - return err - } - - if num > 0 { - // already exists - return nil - } - - res, err := ResourceGet("ident=?", resIdent) - if err != nil { - return err - } - - if res == nil { - return _e("No such resource %s", resIdent) - } - - obj := ClasspathResource{ - ClasspathId: classpathId, - ResIdent: resIdent, - } - - return DBInsertOne(obj) -} - -func ClasspathResourceDel(classpathId int64, idents []string) error { - if len(idents) == 0 { - return nil - } - - _, err := DB.Where("classpath_id=?", classpathId).In("res_ident", idents).Delete(new(ClasspathResource)) - if err != nil { - logger.Errorf("mysql.error: delete classpath_resource fail: %v", err) - return internalServerError - } - - return nil -} - -// 如果发现资源没有在all这个classpath下,就把它加进来 -func BindOrphanToPresetClasspath() { - sql := "insert into classpath_resource(classpath_id, res_ident) select 1, ident from resource where ident not in (select res_ident from classpath_resource)" - ret, err := DB.Exec(sql) - if err != nil { - logger.Errorf("mysql.error: addOrphanToPresetClasspath fail: %v", err) - return - } - - num, err := ret.RowsAffected() - if err != nil { - logger.Errorf("mysql.error: cannot load rows affected: %v", err) - return - } - - if num > 0 { - logger.Warningf("we found %d orphan resources", num) - } -} diff --git a/models/collect_rule.go b/models/collect_rule.go deleted file mode 100644 index c8efa947..00000000 --- a/models/collect_rule.go +++ /dev/null @@ -1,187 +0,0 @@ -package models - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type CollectRule struct { - Id int64 `json:"id"` - ClasspathId int64 `json:"classpath_id"` - PrefixMatch int `json:"prefix_match"` - Name string `json:"name"` - Note string `json:"note"` - Step int `json:"step"` - Type string `json:"type"` - Data string `json:"data"` - AppendTags string `json:"append_tags"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` -} - -type PortConfig struct { - Port int `json:"port"` - Protocol string `json:"protocol"` // tcp or udp - Timeout int `json:"timeout"` // second -} - -type ProcConfig struct { - Method string `json:"method"` - Param string `json:"param"` -} - -type ScriptConfig struct { - Path string `json:"path"` - Params string `json:"params"` - Stdin string `json:"stdin"` - Env map[string]string `json:"env"` - Timeout int `json:"timeout"` // second -} - -type LogConfig struct { - FilePath string `json:"file_path"` - Func string `json:"func"` - Pattern string `json:"pattern"` - TagsPattern map[string]string `json:"tags_pattern"` -} - -func (cr *CollectRule) TableName() string { - return "collect_rule" -} - -func (cr *CollectRule) Validate() error { - if str.Dangerous(cr.Name) { - return _e("CollectRule name has invalid characters") - } - switch cr.Type { - case "port": - var conf PortConfig - err := json.Unmarshal([]byte(cr.Data), &conf) - if err != nil { - return err - } - case "script": - var conf ScriptConfig - err := json.Unmarshal([]byte(cr.Data), &conf) - if err != nil { - return err - } - case "log": - var conf LogConfig - err := json.Unmarshal([]byte(cr.Data), &conf) - if err != nil { - return err - } - case "process": - var conf ProcConfig - err := json.Unmarshal([]byte(cr.Data), &conf) - if err != nil { - return err - } - } - - return nil -} - -func (cr *CollectRule) Add() error { - now := time.Now().Unix() - cr.CreateAt = now - cr.UpdateAt = now - err := cr.Validate() - if err != nil { - return err - } - - return DBInsertOne(cr) -} - -func (cr *CollectRule) Del() error { - _, err := DB.Where("id=?", cr.Id).Delete(new(CollectRule)) - if err != nil { - logger.Errorf("mysql.error: delete collect_rule(id=%d) fail: %v", cr.Id, err) - return internalServerError - } - return nil -} - -func (cr *CollectRule) Update(cols ...string) error { - err := cr.Validate() - if err != nil { - return err - } - - _, err = DB.Where("id=?", cr.Id).Cols(cols...).Update(cr) - if err != nil { - logger.Errorf("mysql.error: update collect_rule(id=%d) fail: %v", cr.Id, err) - return internalServerError - } - - return nil -} - -func CollectRuleCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(CollectRule)) - if err != nil { - logger.Errorf("mysql.error: count collect_rule fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func CollectRuleGet(where string, args ...interface{}) (*CollectRule, error) { - var obj CollectRule - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query collect_rule(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, err -} - -// CollectRuleGets 量不大,前端检索和排序 -func CollectRuleGets(where string, args ...interface{}) ([]CollectRule, error) { - var objs []CollectRule - err := DB.Where(where, args...).OrderBy("name").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: get all collect_rule fail: %v", err) - return nil, internalServerError - } - - return objs, nil -} - -func CollectRuleGetAll() ([]*CollectRule, error) { - var objs []*CollectRule - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: get all collect_rule fail: %v", err) - return nil, internalServerError - } - - return objs, nil -} - -func CollectRulesDel(ids []int64) error { - if len(ids) == 0 { - return fmt.Errorf("param ids is empty") - } - - _, err := DB.Exec("DELETE FROM collect_rule where id in (" + str.IdsString(ids) + ")") - if err != nil { - logger.Errorf("mysql.error: delete collect_rule(%v) fail: %v", ids, err) - return internalServerError - } - - return nil -} diff --git a/models/dashboard.go b/models/dashboard.go deleted file mode 100644 index 21b62da2..00000000 --- a/models/dashboard.go +++ /dev/null @@ -1,240 +0,0 @@ -package models - -import ( - "strings" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/slice" - "github.com/toolkits/pkg/str" -) - -type Dashboard struct { - Id int64 `json:"id"` - Name string `json:"name"` - Tags string `json:"tags"` - Configs string `json:"configs"` - Favorite int `json:"favorite" xorm:"-"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` -} - -func (d *Dashboard) TableName() string { - return "dashboard" -} - -func (d *Dashboard) Validate() error { - if d.Name == "" { - return _e("Dashboard name is empty") - } - - if str.Dangerous(d.Name) { - return _e("Dashboard name has invalid characters") - } - - if strings.Contains(d.Name, "://") { - return _e("Dashboard name has invalid characters") - } - - return nil -} - -func (d *Dashboard) FillFavorite(ids []int64) { - if slice.ContainsInt64(ids, d.Id) { - d.Favorite = 1 - } -} - -func DashboardCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(Dashboard)) - if err != nil { - logger.Errorf("mysql.error: count dashboard fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func (d *Dashboard) AddOnly() error { - if err := d.Validate(); err != nil { - return err - } - - num, err := DashboardCount("name=?", d.Name) - if err != nil { - return err - } - - if num > 0 { - return _e("Dashboard %s already exists", d.Name) - } - - now := time.Now().Unix() - d.CreateAt = now - d.UpdateAt = now - err = DBInsertOne(d) - return err -} - -func (d *Dashboard) Add() error { - if err := d.Validate(); err != nil { - return err - } - - num, err := DashboardCount("name=?", d.Name) - if err != nil { - return err - } - - if num > 0 { - return _e("Dashboard %s already exists", d.Name) - } - - now := time.Now().Unix() - d.CreateAt = now - d.UpdateAt = now - err = DBInsertOne(d) - if err == nil { - // 如果成功创建dashboard,可以自动创建一个default chart group,便于用户使用 - cg := ChartGroup{ - DashboardId: d.Id, - Name: "Default chart group", - Weight: 0, - } - cg.Add() - } - - return err -} - -func (d *Dashboard) Update(cols ...string) error { - if err := d.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", d.Id).Cols(cols...).Update(d) - if err != nil { - logger.Errorf("mysql.error: update dashboard(id=%d) fail: %v", d.Id, err) - return internalServerError - } - - return nil -} - -func DashboardTotal(onlyfavorite bool, ids []int64, query string) (num int64, err error) { - session := DB.NewSession() - defer session.Close() - - if onlyfavorite { - session = session.In("id", ids) - } - - arr := strings.Fields(query) - if len(arr) > 0 { - for i := 0; i < len(arr); i++ { - if strings.HasPrefix(arr[i], "-") { - q := "%" + arr[i][1:] + "%" - session = session.Where("name not like ? and tags not like ?", q, q) - } else { - q := "%" + arr[i] + "%" - session = session.Where("(name like ? or tags like ?)", q, q) - } - } - } - - num, err = session.Count(new(Dashboard)) - if err != nil { - logger.Errorf("mysql.error: count dashboard fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func DashboardGets(onlyfavorite bool, ids []int64, query string, limit, offset int) ([]Dashboard, error) { - session := DB.Limit(limit, offset).OrderBy("name") - - if onlyfavorite { - session = session.In("id", ids) - } - - arr := strings.Fields(query) - if len(arr) > 0 { - for i := 0; i < len(arr); i++ { - if strings.HasPrefix(arr[i], "-") { - q := "%" + arr[i][1:] + "%" - session = session.Where("name not like ? and tags not like ?", q, q) - } else { - q := "%" + arr[i] + "%" - session = session.Where("(name like ? or tags like ?)", q, q) - } - } - } - - // configs字段内容太多,列表页面不需要 - var objs []Dashboard - err := session.Cols("id", "name", "tags", "create_at", "create_by", "update_at", "update_by").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query dashboard fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []Dashboard{}, nil - } - - return objs, nil -} - -func DashboardGetsByIds(ids []int64) ([]*Dashboard, error) { - var objs []*Dashboard - err := DB.In("id", ids).Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query dashboards(%v) fail: %v", ids, err) - return nil, internalServerError - } - - return objs, nil -} - -func DashboardGet(where string, args ...interface{}) (*Dashboard, error) { - var obj Dashboard - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query dashboard(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func (d *Dashboard) Del() error { - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM chart WHERE group_id in (select id from chart_group where dashboard_id=?)", d.Id); err != nil { - logger.Errorf("mysql.error: delete chart fail: %v", err) - return err - } - - if _, err := session.Exec("DELETE FROM chart_group WHERE dashboard_id=?", d.Id); err != nil { - logger.Errorf("mysql.error: delete chart_group fail: %v", err) - return err - } - - if _, err := session.Exec("DELETE FROM dashboard WHERE id=?", d.Id); err != nil { - logger.Errorf("mysql.error: delete dashboard fail: %v", err) - return err - } - - return session.Commit() -} diff --git a/models/dashboard_favorite.go b/models/dashboard_favorite.go deleted file mode 100644 index f4df1ccd..00000000 --- a/models/dashboard_favorite.go +++ /dev/null @@ -1,62 +0,0 @@ -package models - -import "github.com/toolkits/pkg/logger" - -type DashboardFavorite struct { - Id int64 - DashboardId int64 - UserId int64 -} - -func (DashboardFavorite) TableName() string { - return "dashboard_favorite" -} - -func DashboardFavoriteCount(where string, args ...interface{}) (int64, error) { - num, err := DB.Where(where, args...).Count(new(DashboardFavorite)) - if err != nil { - logger.Errorf("mysql.error: count dashboard_favorite(where=%s, args=%+v) fail: %v", where, args, err) - return 0, internalServerError - } - return num, nil -} - -func DashboardFavoriteAdd(dashboardId, userId int64) error { - num, err := DashboardFavoriteCount("user_id=? and dashboard_id=?", userId, dashboardId) - if err != nil { - return err - } - - if num > 0 { - // already exists - return nil - } - - obj := DashboardFavorite{ - DashboardId: dashboardId, - UserId: userId, - } - - return DBInsertOne(obj) -} - -func DashboardFavoriteDel(dashboardId, userId int64) error { - _, err := DB.Where("user_id=? and dashboard_id=?", userId, dashboardId).Delete(new(DashboardFavorite)) - if err != nil { - logger.Errorf("mysql.error: delete dashboard_favorite fail: %v", err) - return internalServerError - } - - return nil -} - -func DashboardFavoriteGetDashboardIds(userId int64) ([]int64, error) { - var ids []int64 - err := DB.Table(new(DashboardFavorite)).Select("dashboard_id").Where("user_id=?", userId).Find(&ids) - if err != nil { - logger.Errorf("mysql.error: query dashboard_favorite fail: %v", err) - return ids, internalServerError - } - - return ids, nil -} diff --git a/models/funcs.go b/models/funcs.go deleted file mode 100644 index cf7f6259..00000000 --- a/models/funcs.go +++ /dev/null @@ -1,37 +0,0 @@ -package models - -import ( - "fmt" - - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/v5/pkg/i18n" -) - -var ( - internalServerError error - loginFailError error -) - -func InitError() { - internalServerError = _e("Internal server error, try again later please") - loginFailError = _e("Login fail, check your username and password") -} - -func _e(format string, a ...interface{}) error { - return fmt.Errorf(_s(format, a...)) -} - -func _s(format string, a ...interface{}) string { - return i18n.Sprintf(format, a...) -} - -// CryptoPass crypto password use salt -func CryptoPass(raw string) (string, error) { - salt, err := ConfigsGet("salt") - if err != nil { - return "", err - } - - return str.MD5(salt + "<-*Uk30^96eY*->" + raw), nil -} diff --git a/models/history_alert_event.go b/models/history_alert_event.go deleted file mode 100644 index a611383d..00000000 --- a/models/history_alert_event.go +++ /dev/null @@ -1,196 +0,0 @@ -package models - -import ( - "encoding/json" - "strings" - - "github.com/toolkits/pkg/logger" - "xorm.io/builder" -) - -type HistoryAlertEvent struct { - Id int64 `json:"id"` - RuleId int64 `json:"rule_id"` - RuleName string `json:"rule_name"` - RuleNote string `json:"rule_note"` - //ProcessorUid int64 `json:"processor_uid"` - //ProcessorObj User `json:"processor_user_objs" xorm:""` - //EventNote string `json:"event_note"` - HashId string `json:"hash_id"` // 唯一标识 - IsPromePull int `json:"is_prome_pull"` // 代表是否是prometheus pull告警,为1时前端使用 ReadableExpression 拉取最近1小时数据 - ResClasspaths string `json:"res_classpaths"` - ResIdent string `json:"res_ident" xorm:"-"` // res_ident会出现在tags字段,就不用单独写入数据库了,但是各块逻辑中有个单独的res_ident字段更便于处理,所以struct里还留有这个字段;前端不用展示这个字段 - Priority int `json:"priority"` - Status int `json:"status"` // 标识是否 被屏蔽 - IsRecovery int `json:"is_recovery"` // 0: alert, 1: recovery - HistoryPoints json.RawMessage `json:"history_points"` // HistoryPoints{} - TriggerTime int64 `json:"trigger_time"` - Values string `json:"values" xorm:"-"` // e.g. cpu.idle: 23.3; load.1min: 32 - NotifyChannels string `json:"notify_channels"` - NotifyGroups string `json:"notify_groups"` - NotifyUsers string `json:"notify_users"` - RunbookUrl string `json:"runbook_url"` - ReadableExpression string `json:"readable_expression"` // e.g. mem.bytes.used.percent(all,60s) > 0 - Tags string `json:"tags"` // merge data_tags rule_tags and res_tags - NotifyGroupObjs []UserGroup `json:"notify_group_objs" xorm:"-"` - NotifyUserObjs []User `json:"notify_user_objs" xorm:"-"` -} - -// IsAlert 语法糖,避免直接拿IsRecovery字段做比对不直观易出错 -func (hae *HistoryAlertEvent) IsAlert() bool { - return hae.IsRecovery != 1 -} - -// IsRecov 语法糖,避免直接拿IsRecovery字段做比对不直观易出错 -func (hae *HistoryAlertEvent) IsRecov() bool { - return hae.IsRecovery == 1 -} - -// MarkAlert 语法糖,标记为告警状态 -func (hae *HistoryAlertEvent) MarkAlert() { - hae.IsRecovery = 0 -} - -// MarkRecov 语法糖,标记为恢复状态 -func (hae *HistoryAlertEvent) MarkRecov() { - hae.IsRecovery = 1 -} - -// MarkMuted 语法糖,标记为屏蔽状态 -func (hae *HistoryAlertEvent) MarkMuted() { - hae.Status = 1 -} - -func (hae *HistoryAlertEvent) FillObjs() error { - userGroupIds := strings.Fields(hae.NotifyGroups) - if len(userGroupIds) > 0 { - groups, err := UserGroupGetsByIdsStr(userGroupIds) - if err != nil { - return err - } - hae.NotifyGroupObjs = groups - } - - userIds := strings.Fields(hae.NotifyUsers) - if len(userIds) > 0 { - users, err := UserGetsByIdsStr(userIds) - if err != nil { - return err - } - hae.NotifyUserObjs = users - } - - /* - if hae.ProcessorUid != 0 { - processor, err := UserGetById(hae.ProcessorUid) - if err != nil { - return err - } - hae.ProcessorObj = *processor - } - */ - - return nil -} - -func (hae *HistoryAlertEvent) Add() error { - return DBInsertOne(hae) -} - -func HistoryAlertEventsTotal(stime, etime int64, query string, status, isRecovery, priority int) (num int64, err error) { - cond := builder.NewCond() - if stime != 0 && etime != 0 { - cond = cond.And(builder.Between{Col: "trigger_time", LessVal: stime, MoreVal: etime}) - } - - if status != -1 { - cond = cond.And(builder.Eq{"status": status}) - } - - if isRecovery != -1 { - cond = cond.And(builder.Eq{"is_recovery": isRecovery}) - } - - if priority != -1 { - cond = cond.And(builder.Eq{"priority": priority}) - } - - if query != "" { - arr := strings.Fields(query) - for i := 0; i < len(arr); i++ { - qarg := "%" + arr[i] + "%" - innerCond := builder.NewCond() - innerCond = innerCond.Or(builder.Like{"res_classpaths", qarg}) - innerCond = innerCond.Or(builder.Like{"rule_name", qarg}) - innerCond = innerCond.Or(builder.Like{"tags", qarg}) - cond = cond.And(innerCond) - } - } - - num, err = DB.Where(cond).Count(new(HistoryAlertEvent)) - if err != nil { - logger.Errorf("mysql.error: count history_alert_event fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func HistoryAlertEventGets(stime, etime int64, query string, status, isRecovery, priority int, limit, offset int) ([]HistoryAlertEvent, error) { - cond := builder.NewCond() - if stime != 0 && etime != 0 { - cond = cond.And(builder.Between{Col: "trigger_time", LessVal: stime, MoreVal: etime}) - } - - if status != -1 { - cond = cond.And(builder.Eq{"status": status}) - } - - if isRecovery != -1 { - cond = cond.And(builder.Eq{"is_recovery": isRecovery}) - } - - if priority != -1 { - cond = cond.And(builder.Eq{"priority": priority}) - } - - if query != "" { - arr := strings.Fields(query) - for i := 0; i < len(arr); i++ { - qarg := "%" + arr[i] + "%" - innerCond := builder.NewCond() - innerCond = innerCond.Or(builder.Like{"res_classpaths", qarg}) - innerCond = innerCond.Or(builder.Like{"rule_name", qarg}) - innerCond = innerCond.Or(builder.Like{"tags", qarg}) - cond = cond.And(innerCond) - } - } - - var objs []HistoryAlertEvent - err := DB.Where(cond).Desc("trigger_time").Limit(limit, offset).Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query history_alert_event fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []HistoryAlertEvent{}, nil - } - - return objs, nil -} - -func HistoryAlertEventGet(where string, args ...interface{}) (*HistoryAlertEvent, error) { - var obj HistoryAlertEvent - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query history_alert_event(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} diff --git a/models/instance.go b/models/instance.go deleted file mode 100644 index 5db3b00a..00000000 --- a/models/instance.go +++ /dev/null @@ -1,75 +0,0 @@ -package models - -import ( - "time" - - "github.com/toolkits/pkg/logger" - "xorm.io/builder" -) - -type Instance struct { - Service string - Endpoint string - Clock time.Time -} - -func InstanceHeartbeat(service, endpoint string) error { - cnt, err := DB.Where("service=? and endpoint = ?", service, endpoint).Count(new(Instance)) - if err != nil { - logger.Errorf("mysql.error: InstanceHeartbeat count fail: %v", err) - return err - } - - if cnt == 0 { - _, err = DB.Exec("INSERT INTO instance(service, endpoint, clock) VALUES(?, ?, now())", service, endpoint) - } else { - _, err = DB.Exec("UPDATE instance SET clock = now() WHERE service = ? and endpoint = ?", service, endpoint) - } - - if err != nil { - logger.Errorf("mysql.error: InstanceHeartbeat write fail: %v", err) - } - - return err -} - -func InstanceGetDead(service string) ([]string, error) { - var arr []string - err := DB.Table("instance").Where("service=? and clock < DATE_SUB(now(),INTERVAL 10 SECOND)", service).Select("endpoint").Find(&arr) - if err != nil { - logger.Errorf("mysql.error: InstanceGetDead fail: %v", err) - return arr, err - } - - if len(arr) == 0 { - return []string{}, nil - } - - return arr, nil -} - -func InstanceGetAlive(service string) ([]string, error) { - var arr []string - err := DB.Table("instance").Where("service=? and clock >= DATE_SUB(now(),INTERVAL 10 SECOND)", service).Select("endpoint").Find(&arr) - if err != nil { - logger.Errorf("mysql.error: InstanceGetAlive fail: %v", err) - return arr, err - } - - if len(arr) == 0 { - return []string{}, nil - } - - return arr, nil -} - -func InstanceDelDead(service string, endpoints []string) error { - cond := builder.NewCond() - cond = cond.And(builder.Eq{"service": service}) - cond = cond.And(builder.In("endpoint", endpoints)) - _, err := DB.Where(cond).Delete(new(Instance)) - if err != nil { - logger.Errorf("mysql.error: InstanceDelDead fail: %v", err) - } - return err -} diff --git a/models/ldap.go b/models/ldap.go deleted file mode 100644 index a5c83f2a..00000000 --- a/models/ldap.go +++ /dev/null @@ -1,116 +0,0 @@ -package models - -import ( - "crypto/tls" - "fmt" - - "github.com/go-ldap/ldap/v3" - "github.com/toolkits/pkg/logger" -) - -type LdapSection struct { - Enable bool `yaml:"enable"` - Host string `yaml:"host"` - Port int `yaml:"port"` - BaseDn string `yaml:"baseDn"` - BindUser string `yaml:"bindUser"` - BindPass string `yaml:"bindPass"` - AuthFilter string `yaml:"authFilter"` - Attributes ldapAttributes `yaml:"attributes"` - CoverAttributes bool `yaml:"coverAttributes"` - TLS bool `yaml:"tls"` - StartTLS bool `yaml:"startTLS"` -} - -type ldapAttributes struct { - Nickname string `yaml:"nickname"` - Phone string `yaml:"phone"` - Email string `yaml:"email"` -} - -var LDAP LdapSection - -func InitLdap(ldap LdapSection) { - LDAP = ldap -} - -func genLdapAttributeSearchList() []string { - var ldapAttributes []string - attrs := LDAP.Attributes - if attrs.Nickname != "" { - ldapAttributes = append(ldapAttributes, attrs.Nickname) - } - if attrs.Email != "" { - ldapAttributes = append(ldapAttributes, attrs.Email) - } - if attrs.Phone != "" { - ldapAttributes = append(ldapAttributes, attrs.Phone) - } - return ldapAttributes -} - -func ldapReq(user, pass string) (*ldap.SearchResult, error) { - var conn *ldap.Conn - var err error - lc := LDAP - addr := fmt.Sprintf("%s:%d", lc.Host, lc.Port) - - if lc.TLS { - conn, err = ldap.DialTLS("tcp", addr, &tls.Config{InsecureSkipVerify: true}) - } else { - conn, err = ldap.Dial("tcp", addr) - } - - if err != nil { - logger.Errorf("ldap.error: cannot dial ldap(%s): %v", addr, err) - return nil, internalServerError - } - - defer conn.Close() - - if !lc.TLS && lc.StartTLS { - if err := conn.StartTLS(&tls.Config{InsecureSkipVerify: true}); err != nil { - logger.Errorf("ldap.error: conn startTLS fail: %v", err) - return nil, internalServerError - } - } - - // if bindUser is empty, anonymousSearch mode - if lc.BindUser != "" { - // BindSearch mode - if err := conn.Bind(lc.BindUser, lc.BindPass); err != nil { - logger.Errorf("ldap.error: bind ldap fail: %v, use user(%s) to bind", err, lc.BindUser) - return nil, internalServerError - } - } - - searchRequest := ldap.NewSearchRequest( - lc.BaseDn, // The base dn to search - ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, - fmt.Sprintf(lc.AuthFilter, ldap.EscapeFilter(user)), // The filter to apply - genLdapAttributeSearchList(), // A list attributes to retrieve - nil, - ) - - sr, err := conn.Search(searchRequest) - if err != nil { - logger.Errorf("ldap.error: ldap search fail: %v", err) - return nil, internalServerError - } - - if len(sr.Entries) == 0 { - logger.Infof("ldap auth fail, no such user: %s", user) - return nil, loginFailError - } - - if len(sr.Entries) > 1 { - logger.Errorf("ldap.error: search user(%s), multi entries found", user) - return nil, internalServerError - } - - if err := conn.Bind(sr.Entries[0].DN, pass); err != nil { - logger.Infof("ldap auth fail, password error, user: %s", user) - return nil, loginFailError - } - return sr, nil -} diff --git a/models/metric_description.go b/models/metric_description.go deleted file mode 100644 index e0c92939..00000000 --- a/models/metric_description.go +++ /dev/null @@ -1,151 +0,0 @@ -package models - -import ( - "strings" - - "github.com/toolkits/pkg/logger" -) - -type MetricDescription struct { - Id int64 `json:"id"` - Metric string `json:"metric"` - Description string `json:"description"` -} - -func (md *MetricDescription) TableName() string { - return "metric_description" -} - -func MetricDescriptionUpdate(mds []MetricDescription) error { - for i := 0; i < len(mds); i++ { - mds[i].Metric = strings.TrimSpace(mds[i].Metric) - md, err := MetricDescriptionGet("metric = ?", mds[i].Metric) - if err != nil { - return err - } - - if md == nil { - // insert - err = DBInsertOne(mds[i]) - if err != nil { - return err - } - } else { - // update - md.Description = mds[i].Description - err = md.Update("description") - if err != nil { - return err - } - } - } - return nil -} - -func (md *MetricDescription) Update(cols ...string) error { - _, err := DB.Where("id=?", md.Id).Cols(cols...).Update(md) - if err != nil { - logger.Errorf("mysql.error: update metric_description(metric=%s) fail: %v", md.Metric, err) - return internalServerError - } - - return nil -} - -func MetricDescriptionGet(where string, args ...interface{}) (*MetricDescription, error) { - var obj MetricDescription - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query metric_description(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func MetricDescriptionTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("metric like ? or description like ?", q, q).Count(new(MetricDescription)) - } else { - num, err = DB.Count(new(MetricDescription)) - } - - if err != nil { - logger.Errorf("mysql.error: count metric_description fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func MetricDescriptionGets(query string, limit, offset int) ([]MetricDescription, error) { - session := DB.Limit(limit, offset).OrderBy("metric") - if query != "" { - q := "%" + query + "%" - session = session.Where("metric like ? or description like ?", q, q) - } - - var objs []MetricDescription - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query metric_description fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []MetricDescription{}, nil - } - - return objs, nil -} - -func MetricDescriptionGetAll() ([]MetricDescription, error) { - var objs []MetricDescription - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query metric_description fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []MetricDescription{}, nil - } - - return objs, nil -} - -// MetricDescriptionMapper 即时看图页面,应该会用到这个方法,填充metric对应的description -func MetricDescriptionMapper(metrics []string) (map[string]string, error) { - var objs []MetricDescription - err := DB.In("metric", metrics).Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query metric_description fail: %v", err) - return nil, internalServerError - } - - count := len(objs) - if count == 0 { - return map[string]string{}, nil - } - - mapper := make(map[string]string, count) - for i := 0; i < count; i++ { - mapper[objs[i].Metric] = objs[i].Description - } - - return mapper, nil -} - -func MetricDescriptionDel(ids []int64) error { - _, err := DB.In("id", ids).Delete(new(MetricDescription)) - if err != nil { - logger.Errorf("mysql.error: delete metric_description fail: %v", err) - return internalServerError - } - return nil -} diff --git a/models/mute.go b/models/mute.go deleted file mode 100644 index 483cb8ef..00000000 --- a/models/mute.go +++ /dev/null @@ -1,154 +0,0 @@ -package models - -import ( - "fmt" - "regexp" - "strings" - "time" - - "github.com/toolkits/pkg/logger" -) - -type Mute struct { - Id int64 `json:"id"` - ClasspathPrefix string `json:"classpath_prefix"` - Metric string `json:"metric"` - ResFilters string `json:"res_filters"` - TagFilters string `json:"tags_filters"` - Cause string `json:"cause"` - Btime int64 `json:"btime"` - Etime int64 `json:"etime"` - CreateBy string `json:"create_by"` - CreateAt int64 `json:"create_at"` - ResRegexp *regexp.Regexp `xorm:"-" json:"-"` - TagsMap map[string]string `xorm:"-" json:"-"` -} - -func (m *Mute) TableName() string { - return "mute" -} - -func (m *Mute) Parse() error { - var err error - if m.ResFilters != "" { - m.ResRegexp, err = regexp.Compile(m.ResFilters) - if err != nil { - return err - } - } - - if m.TagFilters != "" { - tags := strings.Fields(m.TagFilters) - m.TagsMap = make(map[string]string) - for i := 0; i < len(tags); i++ { - pair := strings.Split(tags[i], "=") - if len(pair) != 2 { - return fmt.Errorf("tagfilters format error") - } - m.TagsMap[pair[0]] = pair[1] - } - } - - return nil -} - -func (m *Mute) Validate() error { - m.Metric = strings.TrimSpace(m.Metric) - m.ResFilters = strings.TrimSpace(m.ResFilters) - m.TagFilters = strings.TrimSpace(m.TagFilters) - return m.Parse() -} - -func (m *Mute) Add() error { - if err := m.Validate(); err != nil { - return err - } - m.CreateAt = time.Now().Unix() - return DBInsertOne(m) -} - -func (m *Mute) Del() error { - _, err := DB.Where("id=?", m.Id).Delete(new(Mute)) - if err != nil { - logger.Errorf("mysql.error: delete mute(id=%d) fail: %v", m.Id, err) - return internalServerError - } - return nil -} - -func MuteGet(where string, args ...interface{}) (*Mute, error) { - var obj Mute - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query mute(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func MuteTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("metric like ? or cause like ? or res_filters like ? or tag_filters like ?", q, q, q, q).Count(new(Mute)) - } else { - num, err = DB.Count(new(Mute)) - } - - if err != nil { - logger.Errorf("mysql.error: count mute(query: %s) fail: %v", query, err) - return num, internalServerError - } - - return num, nil -} - -func MuteGets(query string, limit, offset int) ([]Mute, error) { - session := DB.Limit(limit, offset).OrderBy("metric") - if query != "" { - q := "%" + query + "%" - session = session.Where("metric like ? or cause like ? or res_filters like ? or tag_filters like ?", q, q, q, q) - } - - var objs []Mute - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: select mute(query: %s) fail: %v", query, err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []Mute{}, nil - } - - return objs, nil -} - -func MuteGetsAll() ([]Mute, error) { - var objs []Mute - - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: get all mute fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Mute{}, nil - } - - return objs, nil -} - -// MuteCleanExpire 这个方法应该由cron调用,所以返回error不需要是用户友好的 -func MuteCleanExpire() error { - _, err := DB.Where("etime < unix_timestamp(now())").Delete(new(Mute)) - if err != nil { - logger.Errorf("mysql.error: MuteCleanExpire fail: %v", err) - } - return err -} diff --git a/models/mysql.go b/models/mysql.go deleted file mode 100644 index 18384de6..00000000 --- a/models/mysql.go +++ /dev/null @@ -1,50 +0,0 @@ -package models - -import ( - "fmt" - "os" - "time" - - "xorm.io/xorm" - "xorm.io/xorm/log" - - "github.com/toolkits/pkg/logger" -) - -var DB *xorm.Engine - -type MysqlSection struct { - Addr string `yaml:"addr"` - Max int `yaml:"max"` - Idle int `yaml:"idle"` - Debug bool `yaml:"debug"` -} - -var MySQL MysqlSection - -func InitMySQL(MySQL MysqlSection) { - conf := MySQL - - db, err := xorm.NewEngine("mysql", conf.Addr) - if err != nil { - fmt.Printf("cannot connect mysql[%s]: %v", conf.Addr, err) - os.Exit(1) - } - - db.SetMaxIdleConns(conf.Idle) - db.SetMaxOpenConns(conf.Max) - db.SetConnMaxLifetime(time.Hour) - db.ShowSQL(conf.Debug) - db.Logger().SetLevel(log.LOG_INFO) - DB = db -} - -func DBInsertOne(bean interface{}) error { - _, err := DB.InsertOne(bean) - if err != nil { - logger.Errorf("mysql.error: insert fail: %v, to insert object: %+v", err, bean) - return internalServerError - } - - return nil -} diff --git a/models/resource.go b/models/resource.go deleted file mode 100644 index a654ce4e..00000000 --- a/models/resource.go +++ /dev/null @@ -1,243 +0,0 @@ -package models - -import ( - "strings" - - "github.com/didi/nightingale/v5/pkg/istr" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type Resource struct { - Id int64 `json:"id"` - Ident string `json:"ident"` - Alias string `json:"alias"` - Tags string `json:"tags"` - Note string `json:"note"` - MuteBtime int64 `json:"mute_btime"` - MuteEtime int64 `json:"mute_etime"` - ClasspathIds []int64 `xorm:"-" json:"classpath_ids"` -} - -func (r *Resource) Del() error { - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM classpath_resource WHERE res_ident=?", r.Ident); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM resource WHERE id=?", r.Id); err != nil { - return err - } - - return session.Commit() -} - -func ResourceTotalByClasspathId(classpathIds []int64, query string) (int64, error) { - if len(classpathIds) == 0 { - return 0, nil - } - - q := "%" + query + "%" - num, err := DB.Where("ident in (select res_ident from classpath_resource where classpath_id in ("+str.IdsString(classpathIds)+")) and (ident like ? or alias like ? or tags like ? or note like ?)", q, q, q, q).Count(new(Resource)) - if err != nil { - logger.Errorf("mysql.error count resource in classpath(id=%v) query=%s fail: %v", classpathIds, query, err) - return 0, internalServerError - } - return num, nil -} - -func ResourceGetsByClasspathId(classpathIds []int64, query string, limit, offset int) ([]Resource, error) { - if len(classpathIds) == 0 { - return []Resource{}, nil - } - - q := "%" + query + "%" - - var objs []Resource - err := DB.Where("ident in (select res_ident from classpath_resource where classpath_id in ("+str.IdsString(classpathIds)+")) and (ident like ? or alias like ? or tags like ? or note like ?)", q, q, q, q).OrderBy("ident").Limit(limit, offset).Find(&objs) - if err != nil { - logger.Errorf("mysql.error query resource in classpath(id=%d) query=%s fail: %v", classpathIds, query, err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Resource{}, nil - } - - return objs, nil -} - -// 这个sql写法很简单,只是不知道性能是否会是问题 -func ResourceTotalByClasspathQuery(qpaths []int64, qres string) (int64, error) { - if len(qpaths) == 0 { - return ResourceTotal(qres) - } - - num, err := DB.Where("ident in (select res_ident from classpath_resource where classpath_id in ("+str.IdsString(qpaths)+")) and (ident like ? or alias like ?)", "%"+qres+"%", "%"+qres+"%").Count(new(Resource)) - if err != nil { - logger.Errorf("mysql.error ResourceTotalByClasspathQuery fail: %v", err) - return 0, internalServerError - } - return num, nil -} - -func ResourceGetsByClasspathQuery(qpaths []int64, qres string, limit, offset int) ([]Resource, error) { - if len(qpaths) == 0 { - return ResourceGets(qres, limit, offset) - } - - var objs []Resource - err := DB.Where("ident in (select res_ident from classpath_resource where classpath_id in ("+str.IdsString(qpaths)+")) and (ident like ? or alias like ?)", "%"+qres+"%", "%"+qres+"%").OrderBy("ident").Limit(limit, offset).Find(&objs) - if err != nil { - logger.Errorf("mysql.error ResourceGetsByClasspathQuery fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Resource{}, nil - } - - return objs, nil -} - -// ResourceTotal query resource only -func ResourceTotal(query string) (int64, error) { - num, err := DB.Where("ident like ? or alias like ?", "%"+query+"%", "%"+query+"%").Count(new(Resource)) - if err != nil { - logger.Errorf("mysql.error query(%s) resource fail: %v", query, err) - return 0, internalServerError - } - - return num, nil -} - -// ResourceGets query resource only -func ResourceGets(query string, limit, offset int) ([]Resource, error) { - var objs []Resource - - err := DB.Where("ident like ? or alias like ?", "%"+query+"%", "%"+query+"%").OrderBy("ident").Limit(limit, offset).Find(&objs) - if err != nil { - logger.Errorf("mysql.error query resource fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Resource{}, nil - } - - return objs, nil -} - -func ResourceGetAll() ([]Resource, error) { - var objs []Resource - - err := DB.Find(&objs) - if err != nil { - logger.Errorf("mysql.error query resource fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Resource{}, nil - } - - return objs, nil -} - -func ResourceCleanExpire() error { - _, err := DB.Exec("UPDATE resource SET mute_btime = 0, mute_etime = 0 WHERE mute_etime < unix_timestamp(now())") - if err != nil { - logger.Errorf("mysql.error: ResourceCleanExpire fail: %v", err) - } - return err -} - -// ResourceAliasMapper 返回map,key是ident,value是alias -func ResourceAliasMapper() (map[string]string, error) { - var objs []Resource - err := DB.Cols("ident", "alias").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: ResourceAliasMapper fail: %v", err) - return nil, err - } - - count := len(objs) - if count == 0 { - return map[string]string{}, nil - } - - mapper := make(map[string]string, count) - for i := 0; i < count; i++ { - mapper[objs[i].Ident] = objs[i].Alias - } - - return mapper, nil -} - -func ResourceGet(where string, args ...interface{}) (*Resource, error) { - var obj Resource - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: get resource(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func ResourceUpdateNote(ids []int64, note string) error { - _, err := DB.Exec("UPDATE resource SET note=? WHERE id in ("+str.IdsString(ids)+")", note) - if err != nil { - logger.Errorf("mysql.error: update resource note fail: %s", err) - return internalServerError - } - return nil -} - -// 资源tags这个更新,一定要认真校验tags格式,后面会把这个tags附到监控数据里 -func ResourceUpdateTags(ids []int64, tags string) error { - tags = strings.TrimSpace(tags) - - if tags != "" { - // 不为空才值得去校验,空的字符串,没啥好校验的 - arr := strings.Fields(tags) - cnt := len(arr) - for i := 0; i < cnt; i++ { - tagarr := strings.Split(arr[i], "=") - if len(tagarr) != 2 { - return _e("Tags(%s) invalid", arr[i]) - } - - if istr.SampleKeyInvalid(tagarr[0]) { - return _e("Tags(%s) invalid", arr[i]) - } - } - } - - _, err := DB.Exec("UPDATE resource SET tags=? WHERE id in ("+str.IdsString(ids)+")", tags) - if err != nil { - logger.Errorf("mysql.error: update resource tags fail: %s", err) - return internalServerError - } - - return nil -} - -func ResourceUpdateMute(ids []int64, btime, etime int64) error { - _, err := DB.Exec("UPDATE resource SET mute_btime=?, mute_etime=? WHERE id in ("+str.IdsString(ids)+")", btime, etime) - if err != nil { - logger.Errorf("mysql.error: update resource mute fail: %s", err) - return internalServerError - } - return nil -} diff --git a/models/resource_updater.go b/models/resource_updater.go deleted file mode 100644 index 89797f9a..00000000 --- a/models/resource_updater.go +++ /dev/null @@ -1,73 +0,0 @@ -package models - -import ( - "time" - - cmap "github.com/orcaman/concurrent-map" - "github.com/toolkits/pkg/logger" -) - -type AliasTime struct { - Alias string - Time int64 -} - -var AliasMapper = cmap.New() - -func UpdateAlias() error { - mapper := AliasMapper.Items() - if len(mapper) == 0 { - logger.Warning("alias mapper is nil, no points push?") - return nil - } - - now := time.Now().Unix() - - // 先清理数据,只保留最近15s的数据 - for key, at := range mapper { - if at.(*AliasTime).Time < now-15 { - AliasMapper.Remove(key) - } - } - - // 从数据库获取所有的ident->alias对应关系 - dbmap, err := ResourceAliasMapper() - if err != nil { - logger.Warningf("ResourceAliasMapper fail: %v", err) - return err - } - - // 从内存里拿到最新的ident->alias对应关系 - upmap := AliasMapper.Items() - - for key, upval := range upmap { - dbval, has := dbmap[key] - if !has { - // 数据库里没有,写入 - err = DBInsertOne(Resource{ - Ident: key, - Alias: upval.(*AliasTime).Alias, - }) - if err != nil { - logger.Errorf("mysql.error: insert resource(ident=%s, alias=%s) fail: %v", key, upval.(*AliasTime).Alias, err) - } else { - // 新资源,默认绑定到id为1的classpath,方便用户管理 - if err = ClasspathResourceAdd(1, key); err != nil { - logger.Errorf("bind resource(%s) to classpath(1) fail: %v", key, err) - } - } - - continue - } - - if upval.(*AliasTime).Alias != dbval { - // alias 的值与 DB 中不同,更新 - _, err = DB.Exec("UPDATE resource SET alias=? WHERE ident=?", upval.(*AliasTime).Alias, key) - if err != nil { - logger.Errorf("mysql.error: update resource(ident=%s, alias=%s) fail: %v", key, upval.(*AliasTime).Alias, err) - } - } - } - - return nil -} diff --git a/models/role_operation.go b/models/role_operation.go deleted file mode 100644 index f0d34de4..00000000 --- a/models/role_operation.go +++ /dev/null @@ -1,27 +0,0 @@ -package models - -import ( - "github.com/toolkits/pkg/logger" - "xorm.io/builder" -) - -type RoleOperation struct { - RoleName string - Operation string -} - -func (RoleOperation) TableName() string { - return "role_operation" -} - -func RoleHasOperation(roles []string, operation string) (bool, error) { - cond := builder.NewCond() - cond = cond.And(builder.In("role_name", roles)) - cond = cond.And(builder.Eq{"operation": operation}) - num, err := DB.Where(cond).Count(new(RoleOperation)) - if err != nil { - logger.Errorf("mysql.error query role_operation fail: %v", err) - return false, internalServerError - } - return num > 0, nil -} diff --git a/models/user.go b/models/user.go deleted file mode 100644 index 6b7bd51a..00000000 --- a/models/user.go +++ /dev/null @@ -1,539 +0,0 @@ -package models - -import ( - "encoding/json" - "fmt" - "os" - "strings" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - "xorm.io/builder" - - "github.com/didi/nightingale/v5/pkg/ierr" -) - -type User struct { - Id int64 `json:"id"` - Username string `json:"username"` - Nickname string `json:"nickname"` - Password string `json:"-"` - Phone string `json:"phone"` - Email string `json:"email"` - Portrait string `json:"portrait"` - Status int `json:"status"` - RolesForDB string `json:"-" xorm:"'roles'"` // 这个字段写入数据库 - RolesForFE []string `json:"roles" xorm:"-"` // 这个字段和前端交互 - Contacts json.RawMessage `json:"contacts"` // 内容为 map[string]string 结构 - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` -} - -func (u *User) TableName() string { - return "user" -} - -func (u *User) Validate() error { - u.Username = strings.TrimSpace(u.Username) - - if u.Username == "" { - return _e("Username is blank") - } - - if str.Dangerous(u.Username) { - return _e("Username has invalid characters") - } - - if str.Dangerous(u.Nickname) { - return _e("Nickname has invalid characters") - } - - if u.Phone != "" && !str.IsPhone(u.Phone) { - return _e("Phone invalid") - } - - if u.Email != "" && !str.IsMail(u.Email) { - return _e("Email invalid") - } - - return nil -} - -func (u *User) Update(cols ...string) error { - if err := u.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", u.Id).Cols(cols...).Update(u) - if err != nil { - logger.Errorf("mysql.error: update user fail: %v", err) - return internalServerError - } - - return nil -} - -func (u *User) Add() error { - num, err := DB.Where("username=?", u.Username).Count(new(User)) - if err != nil { - logger.Errorf("mysql.error: count user(%s) fail: %v", u.Username, err) - return internalServerError - } - - if num > 0 { - return _e("Username %s already exists", u.Username) - } - - return DBInsertOne(u) -} - -func InitRoot() { - var u User - has, err := DB.Where("username=?", "root").Get(&u) - if err != nil { - fmt.Println("fatal: cannot query user root,", err) - os.Exit(1) - } - - if has { - return - } - - pass, err := CryptoPass("root.2020") - if err != nil { - fmt.Println("fatal: cannot crypto password,", err) - os.Exit(1) - } - - now := time.Now().Unix() - - u = User{ - Username: "root", - Password: pass, - Nickname: "超管", - Portrait: "", - RolesForDB: "Admin", - Contacts: []byte("{}"), - CreateAt: now, - UpdateAt: now, - CreateBy: "system", - UpdateBy: "system", - } - - _, err = DB.Insert(u) - if err != nil { - fmt.Println("fatal: cannot insert user root", err) - os.Exit(1) - } - - fmt.Println("user root init done") -} - -func UserGetByUsername(username string) (*User, error) { - return UserGet("username=?", username) -} - -func UserGetById(id int64) (*User, error) { - return UserGet("id=?", id) -} - -func UserGet(where string, args ...interface{}) (*User, error) { - var obj User - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query user(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - obj.RolesForFE = strings.Fields(obj.RolesForDB) - - return &obj, nil -} - -func UserTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("username like ? or nickname like ? or phone like ? or email like ?", q, q, q, q).Count(new(User)) - } else { - num, err = DB.Count(new(User)) - } - - if err != nil { - logger.Errorf("mysql.error: count user(query: %s) fail: %v", query, err) - return num, internalServerError - } - - return num, nil -} - -func UserGets(query string, limit, offset int) ([]User, error) { - session := DB.Limit(limit, offset).OrderBy("username") - if query != "" { - q := "%" + query + "%" - session = session.Where("username like ? or nickname like ? or phone like ? or email like ?", q, q, q, q) - } - - var users []User - err := session.Find(&users) - if err != nil { - logger.Errorf("mysql.error: select user(query: %s) fail: %v", query, err) - return users, internalServerError - } - - if len(users) == 0 { - return []User{}, nil - } - - for i := 0; i < len(users); i++ { - users[i].RolesForFE = strings.Fields(users[i].RolesForDB) - } - - return users, nil -} - -func UserGetAll() ([]User, error) { - var users []User - - err := DB.Find(&users) - if err != nil { - logger.Errorf("mysql.error: select user fail: %v", err) - return users, internalServerError - } - - if len(users) == 0 { - return []User{}, nil - } - - for i := 0; i < len(users); i++ { - users[i].RolesForFE = strings.Fields(users[i].RolesForDB) - } - - return users, nil -} - -func UserGetsByIds(ids []int64) ([]User, error) { - if len(ids) == 0 { - return []User{}, nil - } - - var users []User - err := DB.In("id", ids).OrderBy("username").Find(&users) - if err != nil { - logger.Errorf("mysql.error: query users by ids fail: %v", err) - return users, internalServerError - } - - if len(users) == 0 { - return []User{}, nil - } - - for i := 0; i < len(users); i++ { - users[i].RolesForFE = strings.Fields(users[i].RolesForDB) - } - - return users, nil -} - -func UserGetsByIdsStr(ids []string) ([]User, error) { - var users []User - - err := DB.Where("id in (" + strings.Join(ids, ",") + ")").Find(&users) - if err != nil { - logger.Errorf("mysql.error: UserGetsByIds fail: %v", err) - return nil, internalServerError - } - - if len(users) == 0 { - return []User{}, nil - } - - for i := 0; i < len(users); i++ { - users[i].RolesForFE = strings.Fields(users[i].RolesForDB) - } - - return users, nil -} - -func PassLogin(username, pass string) (*User, error) { - user, err := UserGetByUsername(username) - if err != nil { - return nil, err - } - - if user == nil { - logger.Infof("password auth fail, no such user: %s", username) - return nil, loginFailError - } - - loginPass, err := CryptoPass(pass) - if err != nil { - return nil, internalServerError - } - - if loginPass != user.Password { - logger.Infof("password auth fail, password error, user: %s", username) - return nil, loginFailError - } - - return user, nil -} - -func LdapLogin(username, pass string) (*User, error) { - sr, err := ldapReq(username, pass) - if err != nil { - return nil, err - } - - user, err := UserGetByUsername(username) - if err != nil { - return nil, err - } - - if user == nil { - // default user settings - user = &User{ - Username: username, - Nickname: username, - } - } - - // copy attributes from ldap - attrs := LDAP.Attributes - if attrs.Nickname != "" { - user.Nickname = sr.Entries[0].GetAttributeValue(attrs.Nickname) - } - if attrs.Email != "" { - user.Email = sr.Entries[0].GetAttributeValue(attrs.Email) - } - if attrs.Phone != "" { - user.Phone = sr.Entries[0].GetAttributeValue(attrs.Phone) - } - - if user.Id > 0 { - if LDAP.CoverAttributes { - _, err := DB.Where("id=?", user.Id).Update(user) - if err != nil { - logger.Errorf("mysql.error: update user %+v fail: %v", user, err) - return nil, internalServerError - } - } - return user, nil - } - - now := time.Now().Unix() - - user.Password = "******" - user.Portrait = "/img/linux.jpeg" - user.RolesForDB = "Standard" - user.RolesForFE = []string{"Standard"} - user.Contacts = []byte("{}") - user.CreateAt = now - user.UpdateAt = now - user.CreateBy = "ldap" - user.UpdateBy = "ldap" - - err = DBInsertOne(user) - return user, err -} - -func (u *User) ChangePassword(oldpass, newpass string) error { - _oldpass, err := CryptoPass(oldpass) - if err != nil { - return err - } - _newpass, err := CryptoPass(newpass) - if err != nil { - return err - } - - if u.Password != _oldpass { - return _e("Incorrect old password") - } - - u.Password = _newpass - return u.Update("password") -} - -func (u *User) _del() error { - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM user_token WHERE user_id=?", u.Id); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM user_group_member WHERE user_id=?", u.Id); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM classpath_favorite WHERE user_id=?", u.Id); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM alert_rule_group_favorite WHERE user_id=?", u.Id); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM user WHERE id=?", u.Id); err != nil { - return err - } - - return session.Commit() -} - -func (u *User) Del() error { - err := u._del() - if err != nil { - logger.Errorf("mysql.error: delete user(%d, %s) fail: %v", u.Id, u.Username, err) - return internalServerError - } - return nil -} - -func (u *User) FavoriteClasspathIds() ([]int64, error) { - return ClasspathFavoriteGetClasspathIds(u.Id) -} - -func (u *User) FavoriteAlertRuleGroupIds() ([]int64, error) { - return AlertRuleGroupFavoriteGetGroupIds(u.Id) -} - -func (u *User) FavoriteDashboardIds() ([]int64, error) { - return DashboardFavoriteGetDashboardIds(u.Id) -} - -// UserGroupIds 我是成员的用户组ID列表 -func (u *User) UserGroupIds() ([]int64, error) { - var ids []int64 - err := DB.Table(new(UserGroupMember)).Select("group_id").Where("user_id=?", u.Id).Find(&ids) - if err != nil { - logger.Errorf("mysql.error: query user_group_member fail: %v", err) - return ids, internalServerError - } - - return ids, nil -} - -func (u *User) FavoriteClasspaths() ([]Classpath, error) { - ids, err := u.FavoriteClasspathIds() - if err != nil { - return nil, err - } - - var objs []Classpath - err = DB.In("id", ids).OrderBy("path").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query my classpath fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []Classpath{}, nil - } - - return objs, nil -} - -func (u *User) FavoriteAlertRuleGroups() ([]AlertRuleGroup, error) { - ids, err := u.FavoriteAlertRuleGroupIds() - if err != nil { - return nil, err - } - - var objs []AlertRuleGroup - err = DB.In("id", ids).OrderBy("name").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query my alert_rule_group fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []AlertRuleGroup{}, nil - } - - return objs, nil -} - -func (u *User) MyUserGroups() ([]UserGroup, error) { - cond := builder.NewCond() - cond = cond.And(builder.Eq{"create_by": u.Username}) - - ids, err := u.UserGroupIds() - if err != nil { - return nil, err - } - - if len(ids) > 0 { - cond = cond.Or(builder.In("id", ids)) - } - - var objs []UserGroup - err = DB.Where(cond).OrderBy("name").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query my user_group fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []UserGroup{}, nil - } - - return objs, nil -} - -func (u *User) CanModifyUserGroup(ug *UserGroup) (bool, error) { - // 我是管理员,自然可以 - roles := strings.Fields(u.RolesForDB) - for i := 0; i < len(roles); i++ { - if roles[i] == "Admin" { - return true, nil - } - } - - // 我是创建者,自然可以 - if ug.CreateBy == u.Username { - return true, nil - } - - // 我是成员,也可以吧,简单搞 - num, err := UserGroupMemberCount("user_id=? and group_id=?", u.Id, ug.Id) - if err != nil { - return false, err - } - - return num > 0, nil -} - -func (u *User) CanDo(op string) (bool, error) { - roles := strings.Fields(u.RolesForDB) - for i := 0; i < len(roles); i++ { - if roles[i] == "Admin" { - return true, nil - } - } - - return RoleHasOperation(roles, op) -} - -// MustPerm return *User for link program -func (u *User) MustPerm(op string) *User { - can, err := u.CanDo(op) - ierr.Dangerous(err, 500) - - if !can { - ierr.Bomb(403, "forbidden") - } - - return u -} diff --git a/models/user_group.go b/models/user_group.go deleted file mode 100644 index fcc29a6f..00000000 --- a/models/user_group.go +++ /dev/null @@ -1,222 +0,0 @@ -package models - -import ( - "strings" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type UserGroup struct { - Id int64 `json:"id"` - Name string `json:"name"` - Note string `json:"note"` - CreateAt int64 `json:"create_at"` - CreateBy string `json:"create_by"` - UpdateAt int64 `json:"update_at"` - UpdateBy string `json:"update_by"` -} - -func (ug *UserGroup) TableName() string { - return "user_group" -} - -func (ug *UserGroup) Validate() error { - if str.Dangerous(ug.Name) { - return _e("Group name has invalid characters") - } - - if str.Dangerous(ug.Note) { - return _e("Group note has invalid characters") - } - - return nil -} - -func (ug *UserGroup) Add() error { - if err := ug.Validate(); err != nil { - return err - } - - num, err := UserGroupCount("name=?", ug.Name) - if err != nil { - return err - } - - if num > 0 { - return _e("UserGroup %s already exists", ug.Name) - } - - now := time.Now().Unix() - ug.CreateAt = now - ug.UpdateAt = now - return DBInsertOne(ug) -} - -func (ug *UserGroup) Update(cols ...string) error { - if err := ug.Validate(); err != nil { - return err - } - - _, err := DB.Where("id=?", ug.Id).Cols(cols...).Update(ug) - if err != nil { - logger.Errorf("mysql.error: update user_group(id=%d) fail: %v", ug.Id, err) - return internalServerError - } - - return nil -} - -func UserGroupTotal(query string) (num int64, err error) { - if query != "" { - q := "%" + query + "%" - num, err = DB.Where("name like ? or note like ?", q, q).Count(new(UserGroup)) - } else { - num, err = DB.Count(new(UserGroup)) - } - - if err != nil { - logger.Errorf("mysql.error: count user_group fail: %v", err) - return 0, internalServerError - } - - return num, nil -} - -func UserGroupCount(where string, args ...interface{}) (num int64, err error) { - num, err = DB.Where(where, args...).Count(new(UserGroup)) - if err != nil { - logger.Errorf("mysql.error: count user_group fail: %v", err) - return num, internalServerError - } - return num, nil -} - -func UserGroupGets(query string, limit, offset int) ([]UserGroup, error) { - session := DB.Limit(limit, offset).OrderBy("name") - if query != "" { - q := "%" + query + "%" - session = session.Where("name like ? or note like ?", q, q) - } - - var objs []UserGroup - err := session.Find(&objs) - if err != nil { - logger.Errorf("mysql.error: query user_group fail: %v", err) - return objs, internalServerError - } - - if len(objs) == 0 { - return []UserGroup{}, nil - } - - return objs, nil -} - -func UserGroupGetsByIdsStr(ids []string) ([]UserGroup, error) { - if len(ids) == 0 { - return []UserGroup{}, nil - } - - var objs []UserGroup - - err := DB.Where("id in (" + strings.Join(ids, ",") + ")").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: UserGroupGetsByIds fail: %v", err) - return nil, internalServerError - } - - if len(objs) == 0 { - return []UserGroup{}, nil - } - - return objs, nil -} - -func UserGroupGet(where string, args ...interface{}) (*UserGroup, error) { - var obj UserGroup - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query user_group(%s)%+v fail: %s", where, args, err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func (ug *UserGroup) MemberIds() ([]int64, error) { - var ids []int64 - err := DB.Table(new(UserGroupMember)).Select("user_id").Where("group_id=?", ug.Id).Find(&ids) - if err != nil { - logger.Errorf("mysql.error: query user_group_member fail: %v", err) - return ids, internalServerError - } - - if len(ids) == 0 { - return []int64{}, nil - } - - return ids, nil -} - -func (ug *UserGroup) AddMembers(userIds []int64) error { - count := len(userIds) - for i := 0; i < count; i++ { - user, err := UserGetById(userIds[i]) - if err != nil { - return err - } - if user == nil { - continue - } - err = UserGroupMemberAdd(ug.Id, user.Id) - if err != nil { - return err - } - } - return nil -} - -func (ug *UserGroup) DelMembers(userIds []int64) error { - return UserGroupMemberDel(ug.Id, userIds) -} - -func (ug *UserGroup) Del() error { - session := DB.NewSession() - defer session.Close() - - if err := session.Begin(); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM user_group_member WHERE group_id=?", ug.Id); err != nil { - return err - } - - if _, err := session.Exec("DELETE FROM user_group WHERE id=?", ug.Id); err != nil { - return err - } - - return session.Commit() -} - -func UserGroupGetAll() ([]UserGroup, error) { - var userGroups []UserGroup - - err := DB.Find(&userGroups) - if err != nil { - logger.Errorf("mysql.error: select user_group fail: %v", err) - return userGroups, internalServerError - } - - if len(userGroups) == 0 { - return []UserGroup{}, nil - } - - return userGroups, nil -} diff --git a/models/user_token.go b/models/user_token.go deleted file mode 100644 index 743a47e9..00000000 --- a/models/user_token.go +++ /dev/null @@ -1,100 +0,0 @@ -package models - -import ( - "fmt" - "os" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" -) - -type UserToken struct { - UserId int64 `json:"user_id"` - Username string `json:"username"` - Token string `json:"token"` -} - -func (UserToken) TableName() string { - return "user_token" -} - -func UserTokenGet(where string, args ...interface{}) (*UserToken, error) { - var obj UserToken - has, err := DB.Where(where, args...).Get(&obj) - if err != nil { - logger.Errorf("mysql.error: query user_token fail: %v", err) - return nil, internalServerError - } - - if !has { - return nil, nil - } - - return &obj, nil -} - -func UserTokenGets(where string, args ...interface{}) ([]UserToken, error) { - var objs []UserToken - err := DB.Where(where, args...).OrderBy("token").Find(&objs) - if err != nil { - logger.Errorf("mysql.error: list user_token fail: %v", err) - return objs, internalServerError - } - - if objs == nil { - return []UserToken{}, nil - } - - return objs, nil -} - -func UserTokenNew(userId int64, username string) (*UserToken, error) { - items, err := UserTokenGets("user_id=?", userId) - if err != nil { - return nil, err - } - - if len(items) >= 2 { - return nil, _e("Each user has at most two tokens") - } - - obj := UserToken{ - UserId: userId, - Username: username, - Token: genToken(userId), - } - - err = DBInsertOne(obj) - if err != nil { - return nil, err - } - - return &obj, nil -} - -func UserTokenReset(userId int64, token string) (*UserToken, error) { - obj, err := UserTokenGet("token=? and user_id=?", token, userId) - if err != nil { - return nil, err - } - - if obj == nil { - return nil, _e("No such token") - } - - obj.Token = genToken(userId) - _, err = DB.Where("user_id=? and token=?", userId, token).Cols("token").Update(obj) - if err != nil { - logger.Errorf("mysql.error: update user_token fail: %v", err) - return nil, internalServerError - } - - return obj, nil -} - -func genToken(userId int64) string { - now := time.Now().UnixNano() - rls := str.RandLetters(6) - return str.MD5(fmt.Sprintf("%d%d%d%s", os.Getpid(), userId, now, rls)) -} diff --git a/pkg/i18n/i18n.go b/pkg/i18n/i18n.go deleted file mode 100644 index 30aa0ae5..00000000 --- a/pkg/i18n/i18n.go +++ /dev/null @@ -1,108 +0,0 @@ -package i18n - -import ( - "encoding/json" - "fmt" - "io" - "path" - "strings" - - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/runner" -) - -type Config struct { - Lang string `yaml:"lang"` - DictPath string `yaml:"dictPath"` -} - -var p *message.Printer -var defaultConfig Config - -// Init will init i18n support via input language. -func Init(configs ...Config) { - defaultConfig.Lang = "zh" - defaultConfig.DictPath = path.Join(runner.Cwd, "etc", "i18n.json") - - config := defaultConfig - if len(configs) > 0 { - config = configs[0] - } - - if config.Lang == "" { - config.Lang = defaultConfig.Lang - } - - if config.DictPath == "" { - config.DictPath = defaultConfig.DictPath - } - - DictFileRegister(config.DictPath) - p = message.NewPrinter(langTag(config.Lang)) -} - -func DictFileRegister(filePath string) { - if !file.IsExist(filePath) { - // fmt.Printf("i18n config file %s not found. donot worry, we'll use default configuration\n", filePath) - return - } - - content, err := file.ToTrimString(filePath) - if err != nil { - fmt.Printf("read i18n config file %s fail: %s\n", filePath, err) - return - } - - m := make(map[string]map[string]string) - err = json.Unmarshal([]byte(content), &m) - if err != nil { - fmt.Printf("parse i18n config file %s fail: %s\n", filePath, err) - return - } - - DictRegister(m) -} - -func DictRegister(m map[string]map[string]string) { - for lang, dict := range m { - tag := langTag(lang) - if tag == language.English { - continue - } - for k, v := range dict { - message.SetString(tag, k, v) - } - } -} - -func langTag(l string) language.Tag { - switch strings.ToLower(l) { - case "zh", "cn": - return language.Chinese - default: - return language.English - } -} - -// Fprintf is like fmt.Fprintf, but using language-specific formatting. -func Fprintf(w io.Writer, key message.Reference, a ...interface{}) (n int, err error) { - return p.Fprintf(w, key, a...) -} - -// Printf is like fmt.Printf, but using language-specific formatting. -func Printf(format string, a ...interface{}) { - _, _ = p.Printf(format, a...) -} - -// Sprintf formats according to a format specifier and returns the resulting string. -func Sprintf(format string, a ...interface{}) string { - return p.Sprintf(format, a...) -} - -// Sprint is like fmt.Sprint, but using language-specific formatting. -func Sprint(a ...interface{}) string { - return p.Sprint(a...) -} diff --git a/pkg/iconf/iconf.go b/pkg/iconf/iconf.go deleted file mode 100644 index 9ef13404..00000000 --- a/pkg/iconf/iconf.go +++ /dev/null @@ -1,26 +0,0 @@ -package iconf - -// just for nightingale only - -import ( - "path" - - "github.com/toolkits/pkg/file" - "github.com/toolkits/pkg/runner" -) - -func GetYmlFile(module string) string { - confdir := path.Join(runner.Cwd, "etc") - - yml := path.Join(confdir, module+".local.yml") - if file.IsExist(yml) { - return yml - } - - yml = path.Join(confdir, module+".yml") - if file.IsExist(yml) { - return yml - } - - return "" -} diff --git a/pkg/ierr/error.go b/pkg/ierr/error.go deleted file mode 100644 index a41344a5..00000000 --- a/pkg/ierr/error.go +++ /dev/null @@ -1,42 +0,0 @@ -package ierr - -import ( - "fmt" -) - -type PageError struct { - Message string - Code int -} - -func (p PageError) Error() string { - return p.Message -} - -func (p PageError) String() string { - return p.Message -} - -func Bomb(code int, format string, a ...interface{}) { - panic(PageError{Code: code, Message: fmt.Sprintf(format, a...)}) -} - -func Dangerous(v interface{}, code ...int) { - if v == nil { - return - } - - c := 200 - if len(code) > 0 { - c = code[0] - } - - switch t := v.(type) { - case string: - if t != "" { - panic(PageError{Code: c, Message: t}) - } - case error: - panic(PageError{Code: c, Message: t.Error()}) - } -} diff --git a/pkg/ilog/ilog.go b/pkg/ilog/ilog.go deleted file mode 100644 index bb2fe1fd..00000000 --- a/pkg/ilog/ilog.go +++ /dev/null @@ -1,38 +0,0 @@ -package ilog - -import ( - "fmt" - "os" - - "github.com/toolkits/pkg/logger" -) - -type Config struct { - Dir string `yaml:"dir"` - Level string `yaml:"level"` - KeepHours uint `yaml:"keepHours"` - Rotatenum int `yaml:"rotatenum"` - Rotatesize uint64 `yaml:"rotatesize"` -} - -// InitLogger init logger toolkit -func Init(c Config) { - lb, err := logger.NewFileBackend(c.Dir) - if err != nil { - fmt.Println("cannot init logger:", err) - os.Exit(1) - } - - //设置了以小时切换文件,优先使用小时切割文件 - if c.KeepHours != 0 { - lb.SetRotateByHour(true) - lb.SetKeepHours(c.KeepHours) - } else if c.Rotatenum != 0 { - lb.Rotate(c.Rotatenum, c.Rotatesize*1024*1024) - } else { - fmt.Println("cannot init logger: KeepHours and Rotatenum both are 0") - os.Exit(2) - } - - logger.SetLogging(c.Level, lb) -} diff --git a/pkg/ipool/ipool.go b/pkg/ipool/ipool.go deleted file mode 100644 index 09253945..00000000 --- a/pkg/ipool/ipool.go +++ /dev/null @@ -1,200 +0,0 @@ -package ipool - -import ( - "bufio" - "fmt" - "io" - "net" - "net/rpc" - "reflect" - "sync" - "time" - - "github.com/toolkits/pkg/pool" - "github.com/ugorji/go/codec" -) - -// ConnPools is responsible for the Connection Pool lifecycle management. -type ConnPools struct { - sync.RWMutex - P map[string]*pool.ConnPool - MaxConns int - MaxIdle int - ConnTimeout int - CallTimeout int -} - -func NewConnPools(maxConns, maxIdle, connTimeout, callTimeout int, cluster []string) *ConnPools { - cp := &ConnPools{ - P: make(map[string]*pool.ConnPool), - MaxConns: maxConns, - MaxIdle: maxIdle, - ConnTimeout: connTimeout, - CallTimeout: callTimeout, - } - - ct := time.Duration(cp.ConnTimeout) * time.Millisecond - for _, address := range cluster { - if _, exist := cp.P[address]; exist { - continue - } - cp.P[address] = createOnePool(address, address, ct, maxConns, maxIdle) - } - return cp -} - -func createOnePool(name, address string, connTimeout time.Duration, maxConns, maxIdle int) *pool.ConnPool { - p := pool.NewConnPool(name, address, maxConns, maxIdle) - p.New = func(connName string) (pool.NConn, error) { - // valid address - _, err := net.ResolveTCPAddr("tcp", p.Address) - if err != nil { - return nil, err - } - - conn, err := net.DialTimeout("tcp", p.Address, connTimeout) - if err != nil { - return nil, err - } - var mh codec.MsgpackHandle - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // bufconn here is a buffered io.ReadWriteCloser - var bufconn = struct { - io.Closer - *bufio.Reader - *bufio.Writer - }{Closer: conn, Reader: bufio.NewReader(conn), Writer: bufio.NewWriter(conn)} - - rpcCodec := codec.MsgpackSpecRpc.ClientCodec(bufconn, &mh) - return RpcClient{cli: rpc.NewClientWithCodec(rpcCodec), name: connName}, nil - } - return p -} - -// Call will block until request failed or timeout. -func (cp *ConnPools) Call(addr, method string, args interface{}, resp interface{}) error { - var selectedPool *pool.ConnPool - var exists bool - - // if address is empty, we will select a available pool from cp.P randomly. - // map-range function gets random keys order every time. - if addr == "" { - for _, p := range cp.P { - if p != nil { - selectedPool = p - break - } - } - } else { - selectedPool, exists = cp.Get(addr) - if !exists { - return fmt.Errorf("%s has no connection pool", addr) - } - } - - // make sure the selected pool alive. - if selectedPool == nil { - return fmt.Errorf("no connection pool available") - } - - connPool := selectedPool - conn, err := connPool.Fetch() - if err != nil { - return fmt.Errorf("%s get connection fail: conn %v, err %v. proc: %s", addr, conn, err, connPool.Proc()) - } - - rpcClient := conn.(RpcClient) - callTimeout := time.Duration(cp.CallTimeout) * time.Millisecond - - done := make(chan error, 1) - go func() { - done <- rpcClient.Call(method, args, resp) - }() - - select { - case <-time.After(callTimeout): - connPool.ForceClose(conn) - return fmt.Errorf("%s, call timeout", addr) - case err = <-done: - if err != nil { - connPool.ForceClose(conn) - err = fmt.Errorf("%s, call failed, err %v. proc: %s", addr, err, connPool.Proc()) - } else { - connPool.Release(conn) - } - return err - } -} - -func (cp *ConnPools) Get(address string) (*pool.ConnPool, bool) { - cp.RLock() - defer cp.RUnlock() - - p, exists := cp.P[address] - return p, exists -} - -func (cp *ConnPools) UpdatePools(addrs []string) []string { - cp.Lock() - defer cp.Unlock() - - newAddrs := make([]string, 0) - if len(addrs) == 0 { - cp.P = make(map[string]*pool.ConnPool) - return newAddrs - } - addrMap := make(map[string]struct{}) - - ct := time.Duration(cp.ConnTimeout) * time.Millisecond - for _, addr := range addrs { - addrMap[addr] = struct{}{} - _, exists := cp.P[addr] - if exists { - continue - } - newAddrs = append(newAddrs, addr) - cp.P[addr] = createOnePool(addr, addr, ct, cp.MaxConns, cp.MaxIdle) - } - - toDel := make(map[string]struct{}) - - for addr := range cp.P { - if _, exists := addrMap[addr]; !exists { - toDel[addr] = struct{}{} - } - } - - for addr := range toDel { - delete(cp.P, addr) - } - - return newAddrs -} - -// RpcClient implements the io.Closer interface -type RpcClient struct { - cli *rpc.Client - name string -} - -func (rc RpcClient) Name() string { - return rc.name -} - -func (rc RpcClient) Closed() bool { - return rc.cli == nil -} - -func (rc RpcClient) Close() error { - if rc.cli != nil { - err := rc.cli.Close() - rc.cli = nil - return err - } - return nil -} - -func (rc RpcClient) Call(method string, args, reply interface{}) error { - return rc.cli.Call(method, args, reply) -} diff --git a/pkg/istr/checker.go b/pkg/istr/checker.go deleted file mode 100644 index 5acacf59..00000000 --- a/pkg/istr/checker.go +++ /dev/null @@ -1,24 +0,0 @@ -package istr - -import ( - "strconv" - "strings" -) - -func SampleKeyInvalid(str string) bool { - idx := strings.IndexFunc(str, func(r rune) bool { - return r == '\t' || - r == '\r' || - r == '\n' || - r == ',' || - r == ' ' || - r == '=' - }) - - if idx != -1 { - return true - } - - _, err := strconv.ParseFloat(str, 64) - return err == nil -} diff --git a/pkg/istr/checksum.go b/pkg/istr/checksum.go deleted file mode 100644 index 6df85574..00000000 --- a/pkg/istr/checksum.go +++ /dev/null @@ -1,17 +0,0 @@ -package istr - -// func Checksum(endpoint string, metric string, tags string) string { -// return str.MD5(PK(endpoint, metric, tags)) -// } - -// func GetKey(filename string) string { -// arr := strings.Split(filename, "/") -// if len(arr) < 2 { -// return "" -// } -// a := strings.Split(arr[1], "_") -// if len(a) > 1 { -// return a[0] -// } -// return "" -// } diff --git a/pkg/istr/format.go b/pkg/istr/format.go deleted file mode 100644 index b954afd3..00000000 --- a/pkg/istr/format.go +++ /dev/null @@ -1,152 +0,0 @@ -package istr - -import ( - "bytes" - "sync" -) - -const SEPERATOR = "/" - -var bufferPool = sync.Pool{New: func() interface{} { return new(bytes.Buffer) }} - -//strs目前有三种类别 -// endpoint/metric/tags -// endpoint/counter -// metric/tags -// strs 参数必须按照上面的顺序来入参 -// func PK(strs ...string) string { -// ret := bufferPool.Get().(*bytes.Buffer) -// ret.Reset() -// defer bufferPool.Put(ret) -// count := len(strs) -// if count == 0 { -// return "" -// } - -// ret.WriteString(strs[0]) -// for i := 1; i < count-1; i++ { -// ret.WriteString(SEPERATOR) -// ret.WriteString(strs[i]) -// } - -// if strs[count-1] != "" { -// ret.WriteString(SEPERATOR) -// ret.WriteString(strs[count-1]) -// } - -// return ret.String() -// } - -// func UUID(endpoint, metric, tags, dstype string, step int) string { -// ret := bufferPool.Get().(*bytes.Buffer) -// ret.Reset() -// defer bufferPool.Put(ret) - -// if tags == "" { -// ret.WriteString(endpoint) -// ret.WriteString(SEPERATOR) -// ret.WriteString(metric) -// ret.WriteString(SEPERATOR) -// ret.WriteString(dstype) -// ret.WriteString(SEPERATOR) -// ret.WriteString(strconv.Itoa(step)) - -// return ret.String() -// } -// ret.WriteString(endpoint) -// ret.WriteString(SEPERATOR) -// ret.WriteString(metric) -// ret.WriteString(SEPERATOR) -// ret.WriteString(tags) -// ret.WriteString(SEPERATOR) -// ret.WriteString(dstype) -// ret.WriteString(SEPERATOR) -// ret.WriteString(strconv.Itoa(step)) - -// return ret.String() -// } - -// func XXhash(strs ...string) uint64 { -// ret := bufferPool.Get().(*bytes.Buffer) -// ret.Reset() -// defer bufferPool.Put(ret) -// count := len(strs) -// if count == 0 { -// return 0 -// } - -// ret.WriteString(strs[0]) -// for i := 1; i < count-1; i++ { -// ret.WriteString(SEPERATOR) -// ret.WriteString(strs[i]) -// } - -// if strs[count-1] != "" { -// ret.WriteString(SEPERATOR) -// ret.WriteString(strs[count-1]) -// } - -// return xxhash.Sum64(ret.Bytes()) -// } - -// func ToMD5(endpoint string, metric string, tags string) string { -// return str.MD5(PK(endpoint, metric, tags)) -// } - -// func SortedTags(tags map[string]string) string { -// if tags == nil { -// return "" -// } - -// size := len(tags) - -// if size == 0 { -// return "" -// } - -// ret := bufferPool.Get().(*bytes.Buffer) -// ret.Reset() -// defer bufferPool.Put(ret) - -// if size == 1 { -// for k, v := range tags { -// ret.WriteString(k) -// ret.WriteString("=") -// ret.WriteString(v) -// } -// return ret.String() -// } - -// keys := make([]string, size) -// i := 0 -// for k := range tags { -// keys[i] = k -// i++ -// } - -// sort.Strings(keys) - -// for j, key := range keys { -// ret.WriteString(key) -// ret.WriteString("=") -// ret.WriteString(tags[key]) -// if j != size-1 { -// ret.WriteString(",") -// } -// } - -// return ret.String() -// } - -// func UnixTsFormat(ts int64) string { -// return time.Unix(ts, 0).Format("2006-01-02 15:04:05") -// } - -// func IdsString(ids []int) string { -// count := len(ids) -// arr := make([]string, count) -// for i := 0; i < count; i++ { -// arr[i] = fmt.Sprintf("%d", ids[i]) -// } -// return strings.Join(arr, ",") -// } diff --git a/pkg/istr/parser.go b/pkg/istr/parser.go deleted file mode 100644 index 2c94ceee..00000000 --- a/pkg/istr/parser.go +++ /dev/null @@ -1,22 +0,0 @@ -package istr - -// import "strings" - -// func TrimStringSlice(raw []string) []string { -// if raw == nil { -// return []string{} -// } - -// cnt := len(raw) -// arr := make([]string, 0, cnt) -// for i := 0; i < cnt; i++ { -// item := strings.TrimSpace(raw[i]) -// if item == "" { -// continue -// } - -// arr = append(arr, item) -// } - -// return arr -// } diff --git a/pkg/istr/slice.go b/pkg/istr/slice.go deleted file mode 100644 index 7085a23b..00000000 --- a/pkg/istr/slice.go +++ /dev/null @@ -1,78 +0,0 @@ -package istr - -// func Contains(smallSlice, bigSlice []string) bool { -// for i := 0; i < len(smallSlice); i++ { -// if !InSlice(smallSlice[i], bigSlice) { -// return false -// } - -// } - -// return true -// } - -// func InSlice(val string, slice []string) bool { -// for i := 0; i < len(slice); i++ { -// if slice[i] == val { -// return true -// } -// } - -// return false -// } - -// 分割m, 每次n个 -// func SplitN(m, n int) [][]int { -// var res [][]int - -// if n <= 0 { -// return [][]int{{0, m}} -// } - -// for i := 0; i < m; i = i + n { -// var start, end int -// start = i -// end = i + n - -// if end >= m { -// end = m -// } - -// res = append(res, []int{start, end}) - -// } -// return res -// } - -// slice去重,这个方法不好 -// func Set(s []string) []string { -// m := make(map[string]interface{}) -// for i := 0; i < len(s); i++ { -// if strings.TrimSpace(s[i]) == "" { -// continue -// } - -// m[s[i]] = 1 -// } - -// s2 := []string{} -// for k := range m { -// s2 = append(s2, k) -// } - -// return s2 -// } - -// func SetInt64(s []int64) []int64 { -// m := make(map[int64]interface{}) -// for i := 0; i < len(s); i++ { -// m[s[i]] = 1 -// } - -// s2 := []int64{} -// for k := range m { -// s2 = append(s2, k) -// } - -// return s2 -// } diff --git a/rpc/ping.go b/rpc/ping.go deleted file mode 100644 index ef37e779..00000000 --- a/rpc/ping.go +++ /dev/null @@ -1,7 +0,0 @@ -package rpc - -// Ping return string 'pong', just for test -func (*Server) Ping(input string, output *string) error { - *output = "pong" - return nil -} diff --git a/rpc/push.go b/rpc/push.go deleted file mode 100644 index 97b86a50..00000000 --- a/rpc/push.go +++ /dev/null @@ -1,22 +0,0 @@ -package rpc - -import ( - "github.com/didi/nightingale/v5/judge" - "github.com/didi/nightingale/v5/trans" - "github.com/didi/nightingale/v5/vos" -) - -// 通过普通rpc的方式(msgpack)上报数据 -func (*Server) PushToTrans(points []*vos.MetricPoint, reply *string) error { - err := trans.Push(points) - if err != nil { - *reply = err.Error() - } - return nil -} - -// server内部做数据重排,推送数据给告警引擎 -func (*Server) PushToJudge(points []*vos.MetricPoint, reply *string) error { - go judge.Send(points) - return nil -} diff --git a/rpc/rpc.go b/rpc/rpc.go deleted file mode 100644 index 3698b2b6..00000000 --- a/rpc/rpc.go +++ /dev/null @@ -1,60 +0,0 @@ -package rpc - -import ( - "bufio" - "fmt" - "io" - "net" - "net/rpc" - "os" - "reflect" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/ugorji/go/codec" - - "github.com/didi/nightingale/v5/config" -) - -type Server int - -func Start() { - go serve() -} - -func serve() { - addr := config.Config.RPC.Listen - - server := rpc.NewServer() - server.Register(new(Server)) - - l, err := net.Listen("tcp", addr) - if err != nil { - fmt.Printf("fail to listen on: %s, error: %v\n", addr, err) - os.Exit(1) - } - - fmt.Println("rpc.listening:", addr) - - var mh codec.MsgpackHandle - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - duration := time.Duration(100) * time.Millisecond - - for { - conn, err := l.Accept() - if err != nil { - logger.Warningf("listener accept error: %v", err) - time.Sleep(duration) - continue - } - - var bufconn = struct { - io.Closer - *bufio.Reader - *bufio.Writer - }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} - - go server.ServeCodec(codec.MsgpackSpecRpc.ServerCodec(bufconn, &mh)) - } -} diff --git a/sql/n9e.sql b/sql/n9e.sql deleted file mode 100644 index aca23cbd..00000000 --- a/sql/n9e.sql +++ /dev/null @@ -1,489 +0,0 @@ -set names utf8; - -drop database if exists n9e; -create database n9e; -use n9e; - -CREATE TABLE `user` ( - `id` bigint unsigned not null auto_increment, - `username` varchar(64) not null comment 'login name, cannot rename', - `nickname` varchar(64) not null comment 'display name, chinese name', - `password` varchar(128) not null, - `phone` varchar(16) not null default '', - `email` varchar(64) not null default '', - `portrait` varchar(255) not null default '' comment 'portrait image url', - `status` tinyint(1) not null default 0 comment '0: active, 1: disabled', - `roles` varchar(255) not null comment 'Admin | Standard | Guest', - `contacts` varchar(1024) default '' comment 'json e.g. {wecom:xx, dingtalk_robot_token:yy}', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`username`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `user_token` ( - `user_id` bigint unsigned not null, - `username` varchar(64) not null, - `token` varchar(128) not null, - KEY (`user_id`), - KEY (`username`), - UNIQUE KEY (`token`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `user_group` ( - `id` bigint unsigned not null auto_increment, - `name` varchar(128) not null default '', - `note` varchar(255) not null default '', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - KEY (`create_by`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `user_group_member` ( - `group_id` bigint unsigned not null, - `user_id` bigint unsigned not null, - KEY (`group_id`), - KEY (`user_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `configs` ( - `id` bigint unsigned not null auto_increment, - `ckey` varchar(255) not null, - `cval` varchar(1024) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`ckey`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `role` ( - `id` bigint unsigned not null auto_increment, - `name` varchar(128) not null default '', - `note` varchar(255) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`name`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -insert into `role`(name, note) values('Admin', 'Administrator role'); -insert into `role`(name, note) values('Standard', 'Ordinary user role'); -insert into `role`(name, note) values('Guest', 'Readonly user role'); - -CREATE TABLE `role_operation`( - `role_name` varchar(128) not null, - `operation` varchar(255) not null, - KEY (`role_name`), - KEY (`operation`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - --- Admin is special, who has no concrete operation but can do anything. -insert into `role_operation`(role_name, operation) values('Standard', 'classpath_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'classpath_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'classpath_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'classpath_add_resource'); -insert into `role_operation`(role_name, operation) values('Standard', 'classpath_del_resource'); -insert into `role_operation`(role_name, operation) values('Standard', 'metric_description_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'metric_description_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'metric_description_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'mute_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'mute_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'dashboard_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'dashboard_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'dashboard_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_group_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_group_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_group_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_rule_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_event_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'alert_event_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'collect_rule_create'); -insert into `role_operation`(role_name, operation) values('Standard', 'collect_rule_modify'); -insert into `role_operation`(role_name, operation) values('Standard', 'collect_rule_delete'); -insert into `role_operation`(role_name, operation) values('Standard', 'resource_modify'); - -CREATE TABLE `instance` ( - `service` varchar(128) not null, - `endpoint` varchar(255) not null comment 'ip:port', - `clock` datetime not null, - KEY (`service`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - --- if mute_etime < now(), the two mute columns should be reset to 0 -CREATE TABLE `resource` ( - `id` bigint unsigned not null auto_increment, - `ident` varchar(255) not null, - `alias` varchar(128) not null default '' comment 'auto detect, just for debug', - `tags` varchar(512) not null default '' comment 'will append to event', - `note` varchar(255) not null default '', - `mute_btime` bigint not null default 0 comment 'mute begin time', - `mute_etime` bigint not null default 0 comment 'mute end time', - PRIMARY KEY (`id`), - UNIQUE KEY (`ident`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `classpath` ( - `id` bigint unsigned not null auto_increment, - `path` varchar(512) not null comment 'required. e.g. duokan.tv.engine.x.y.z', - `note` varchar(255) not null default '', - `preset` tinyint(1) not null default 0 comment 'if preset, cannot delete and modify', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - KEY (`path`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - --- new resource will bind classpath(id=1) automatically -insert into classpath(id, path, note, preset, create_by, update_by, create_at, update_at) values(1, 'all.resources', 'preset classpath, all resources belong to', 1, 'system', 'system', unix_timestamp(now()), unix_timestamp(now())); - -CREATE TABLE `classpath_resource` ( - `id` bigint unsigned not null auto_increment, - `classpath_id` bigint unsigned not null, - `res_ident` varchar(255) not null, - PRIMARY KEY (`id`), - KEY (`classpath_id`), - KEY (`res_ident`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `classpath_favorite` ( - `id` bigint unsigned not null auto_increment, - `classpath_id` bigint not null, - `user_id` bigint not null, - PRIMARY KEY (`id`), - KEY (`user_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `mute` ( - `id` bigint unsigned not null auto_increment, - `classpath_prefix` varchar(255) not null default '' comment 'classpath prefix', - `metric` varchar(255) not null comment 'required', - `res_filters` varchar(4096) not null default 'resource filters', - `tag_filters` varchar(8192) not null default '', - `cause` varchar(255) not null default '', - `btime` bigint not null default 0 comment 'begin time', - `etime` bigint not null default 0 comment 'end time', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - KEY (`metric`), - KEY (`create_by`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `dashboard` ( - `id` bigint unsigned not null auto_increment, - `name` varchar(255) not null, - `tags` varchar(255) not null, - `configs` varchar(4096) comment 'dashboard variables', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`name`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `dashboard_favorite` ( - `id` bigint unsigned not null auto_increment, - `dashboard_id` bigint not null comment 'dashboard id', - `user_id` bigint not null comment 'user id', - PRIMARY KEY (`id`), - KEY (`user_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - --- auto create the first subclass 'Default chart group' of dashboard -CREATE TABLE `chart_group` ( - `id` bigint unsigned not null auto_increment, - `dashboard_id` bigint unsigned not null, - `name` varchar(255) not null, - `weight` int not null default 0, - PRIMARY KEY (`id`), - KEY (`dashboard_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `chart` ( - `id` bigint unsigned not null auto_increment, - `group_id` bigint unsigned not null comment 'chart group id', - `configs` varchar(8192), - `weight` int not null default 0, - PRIMARY KEY (`id`), - KEY (`group_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `chart_tmp` ( - `id` bigint unsigned not null auto_increment, - `configs` varchar(8192), - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - primary key (`id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `collect_rule` ( - `id` bigint unsigned not null auto_increment, - `classpath_id` bigint not null, - `prefix_match` tinyint(1) not null default 0 comment '0: no 1: yes', - `name` varchar(255) not null default '', - `note` varchar(255) not null default '', - `step` int not null, - `type` varchar(64) not null comment 'e.g. port proc log plugin mysql', - `data` text not null, - `append_tags` varchar(255) not null default '' comment 'e.g. mod=n9e', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - KEY (`classpath_id`, `type`), - KEY (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `alert_rule_group` ( - `id` bigint unsigned not null auto_increment, - `name` varchar(255) not null, - `user_group_ids` varchar(255) not null default '' comment 'readwrite user group ids', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`name`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -insert into alert_rule_group(name,create_at,create_by,update_at,update_by) values('Default Rule Group', unix_timestamp(now()), 'system', unix_timestamp(now()), 'system'); - -CREATE TABLE `alert_rule_group_favorite` ( - `id` bigint unsigned not null auto_increment, - `group_id` bigint not null comment 'alert_rule group id', - `user_id` bigint not null comment 'user id', - PRIMARY KEY (`id`), - KEY (`user_id`) -) ENGINE = InnoDB DEFAULT CHARSET = utf8; - -CREATE TABLE `alert_rule` ( - `id` bigint unsigned not null auto_increment, - `group_id` bigint not null default 0 comment 'alert_rule group id', - `name` varchar(255) not null, - `note` varchar(255) not null, - `type` tinyint(1) not null comment '0 n9e 1 promql', - `status` tinyint(1) not null comment '0 enable 1 disable', - `alert_duration` int not null comment 'unit:s', - `expression` varchar(4096) not null comment 'rule expression', - `enable_stime` char(5) not null default '00:00', - `enable_etime` char(5) not null default '23:59', - `enable_days_of_week` varchar(32) not null default '' comment 'split by space: 0 1 2 3 4 5 6', - `recovery_notify` tinyint(1) not null comment 'whether notify when recovery', - `priority` tinyint(1) not null, - `notify_channels` varchar(255) not null default '' comment 'split by space: sms voice email dingtalk wecom', - `notify_groups` varchar(255) not null default '' comment 'split by space: 233 43', - `notify_users` varchar(255) not null default '' comment 'split by space: 2 5', - `callbacks` varchar(255) not null default '' comment 'split by space: http://a.com/api/x http://a.com/api/y', - `runbook_url` varchar(255), - `append_tags` varchar(255) not null default '' comment 'split by space: service=n9e mod=api', - `create_at` bigint not null default 0, - `create_by` varchar(64) not null default '', - `update_at` bigint not null default 0, - `update_by` varchar(64) not null default '', - PRIMARY KEY (`id`), - KEY (`group_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `alert_event` ( - `id` bigint unsigned not null auto_increment, - `hash_id` varchar(255) not null comment 'rule_id + point_pk', - `rule_id` bigint unsigned not null, - `rule_name` varchar(255) not null, - `rule_note` varchar(512) not null default 'alert rule note', - `res_classpaths` varchar(1024) not null default '' comment 'belong classpaths', - `priority` tinyint(1) not null, - `status` tinyint(1) not null, - `is_prome_pull` tinyint(1) not null, - `history_points` text comment 'metric, history points', - `trigger_time` bigint not null, - `notify_channels` varchar(255) not null default '', - `notify_groups` varchar(255) not null default '', - `notify_users` varchar(255) not null default '', - `runbook_url` varchar(255), - `readable_expression` varchar(1024) not null comment 'e.g. mem.bytes.used.percent(all,60s) > 0', - `tags` varchar(1024) not null default '' comment 'merge data_tags rule_tags and res_tags', - PRIMARY KEY (`id`), - KEY (`hash_id`), - KEY (`rule_id`), - KEY (`trigger_time`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `history_alert_event` ( - `id` bigint unsigned not null AUTO_INCREMENT, - `hash_id` varchar(255) not null COMMENT 'rule_id + point_pk', - `rule_id` bigint unsigned not null, - `rule_name` varchar(255) not null, - `rule_note` varchar(512) not null default 'alert rule note', - `res_classpaths` varchar(1024) not null default '' COMMENT 'belong classpaths', - `priority` tinyint(1) not null, - `status` tinyint(1) not null, - `is_prome_pull` tinyint(1) not null, - `is_recovery` tinyint(1) not null, - `history_points` text COMMENT 'metric, history points', - `trigger_time` bigint not null, - `notify_channels` varchar(255) not null default '', - `notify_groups` varchar(255) not null default '', - `notify_users` varchar(255) not null default '', - `runbook_url` varchar(255) default NULL, - `readable_expression` varchar(1024) not null COMMENT 'e.g. mem.bytes.used.percent(all,60s) > 0', - `tags` varchar(1024) not null default '' COMMENT 'merge data_tags rule_tags and res_tags', - PRIMARY KEY (`id`), - KEY `hash_id` (`hash_id`), - KEY `rule_id` (`rule_id`), - KEY `trigger_time` (`trigger_time`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `metric_description` ( - `id` bigint unsigned not null auto_increment, - `metric` varchar(255) not null default '', - `description` varchar(255) not null default '', - PRIMARY KEY (`id`), - UNIQUE KEY (`metric`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -insert into metric_description(metric, description) values('system_ntp_offset', '系统时间偏移量'); -insert into metric_description(metric, description) values('system_proc_count', '系统进程个数'); -insert into metric_description(metric, description) values('system_uptime', '系统运行的时间'); -insert into metric_description(metric, description) values('system_cpu_util', '总体CPU使用率(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_switches', 'cpu上下文交换次数'); -insert into metric_description(metric, description) values('system_cpu_guest', '虚拟处理器CPU时间占比(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_idle', '总体CPU空闲率(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_iowait', '等待I/O的CPU时间占比(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_num_cores', 'CPU核心数'); -insert into metric_description(metric, description) values('system_cpu_steal', '等待处理其他虚拟核的时间占比(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_system', '内核态CPU时间占比(单位:%)'); -insert into metric_description(metric, description) values('system_cpu_user', '用户态CPU时间占比(单位:%)'); -insert into metric_description(metric, description) values('system_disk_bytes_free', '磁盘某分区余量大小(单位:byte)'); -insert into metric_description(metric, description) values('system_disk_used_percent', '磁盘某分区用量占比(单位:%)'); -insert into metric_description(metric, description) values('system_disk_read_time', '设备读操作耗时(单位:ms)'); -insert into metric_description(metric, description) values('system_disk_read_time_percent', '读取磁盘时间百分比(单位:%)'); -insert into metric_description(metric, description) values('system_disk_bytes_total', '磁盘某分区总量(单位:byte)'); -insert into metric_description(metric, description) values('system_disk_bytes_used', '磁盘某分区用量大小(单位:byte)'); -insert into metric_description(metric, description) values('system_disk_write_time', '设备写操作耗时(单位:ms)'); -insert into metric_description(metric, description) values('system_disk_write_time_percent', '写入磁盘时间百分比(单位:%)'); -insert into metric_description(metric, description) values('system_files_allocated', '系统已分配文件句柄数'); -insert into metric_description(metric, description) values('system_files_left', '系统未分配文件句柄数'); -insert into metric_description(metric, description) values('system_files_used_percent', '系统使用文件句柄占已分配百分比(单位:%)'); -insert into metric_description(metric, description) values('system_files_max', '系统可以打开的最大文件句柄数'); -insert into metric_description(metric, description) values('system_files_used', '系统使用的已分配文件句柄数'); -insert into metric_description(metric, description) values('system_disk_inodes_free', '某分区空闲inode数量'); -insert into metric_description(metric, description) values('system_disk_inodes_used_percent', '某分区已用inode占比(单位:%)'); -insert into metric_description(metric, description) values('system_disk_inodes_total', '某分区inode总数量'); -insert into metric_description(metric, description) values('system_disk_inodes_used', '某分区已用inode数量'); -insert into metric_description(metric, description) values('system_io_avgqu_sz', '设备平均队列长度'); -insert into metric_description(metric, description) values('system_io_avgrq_sz', '设备平均请求大小'); -insert into metric_description(metric, description) values('system_io_await', '每次IO平均处理时间(单位:ms)'); -insert into metric_description(metric, description) values('system_io_r_await', '读请求平均耗时(单位:ms)'); -insert into metric_description(metric, description) values('system_io_read_request', '每秒读请求数量'); -insert into metric_description(metric, description) values('system_io_read_bytes', '每秒读取字节数'); -insert into metric_description(metric, description) values('system_io_rrqm_s', '每秒合并到设备队列的读请求数'); -insert into metric_description(metric, description) values('system_io_svctm', '每次IO平均服务时间(单位:ms)'); -insert into metric_description(metric, description) values('system_io_util', 'I/O请求的CPU时间百分比'); -insert into metric_description(metric, description) values('system_io_w_await', '写请求平均耗时(单位:ms)'); -insert into metric_description(metric, description) values('system_io_write_request', '每秒写请求数量'); -insert into metric_description(metric, description) values('system_io_write_bytes', '每秒写取字节数'); -insert into metric_description(metric, description) values('system_io_wrqm_s', '每秒合并到设备队列的写请求数'); -insert into metric_description(metric, description) values('system_load_1', '近1分钟平均负载'); -insert into metric_description(metric, description) values('system_load_5', '近5分钟平均负载'); -insert into metric_description(metric, description) values('system_load_15', '近15分钟平均负载'); -insert into metric_description(metric, description) values('system_mem_buffered', '文件缓冲区的物理RAM量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_cached', '缓存内存的物理RAM量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_commit_limit', '系统当前可分配的内存总量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_committed', '在磁盘分页文件上保留的物理内存量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_committed_as', '系统已分配的包括进程未使用的内存量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_nonpaged', '不能写入磁盘的物理内存量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_paged', '没被使用是可以写入磁盘的物理内存量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_free_percent', '内存空闲率'); -insert into metric_description(metric, description) values('system_mem_used_percent', '内存使用率'); -insert into metric_description(metric, description) values('system_mem_shared', '用作共享内存的物理RAM量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_slab', '内核用来缓存数据结构供自己使用的内存量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_total', '物理内存总量(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_free', '空闲内存大小(单位:*byte*)'); -insert into metric_description(metric, description) values('system_mem_used', '已用内存大小(单位:*byte*)'); -insert into metric_description(metric, description) values('system_swap_cached', '用作缓存的交换空间'); -insert into metric_description(metric, description) values('system_swap_free', '空闲swap大小(单位:*byte*)'); -insert into metric_description(metric, description) values('system_swap_free_percent', '空闲swap占比'); -insert into metric_description(metric, description) values('system_swap_total', 'swap总大小(单位:*byte*)'); -insert into metric_description(metric, description) values('system_swap_used', '已用swap大小(单位:*byte*)'); -insert into metric_description(metric, description) values('system_swap_used_percent', '已用swap占比(单位:%)'); -insert into metric_description(metric, description) values('system_net_bits_rcvd', '每秒设备上收到的bit数'); -insert into metric_description(metric, description) values('system_net_bits_sent', '每秒设备上发送的bit数'); -insert into metric_description(metric, description) values('system_net_conntrack_count', 'conntrack用量'); -insert into metric_description(metric, description) values('system_net_conntrack_count_percent', 'conntrack用量占比(单位:%)'); -insert into metric_description(metric, description) values('system_net_conntrack_max', 'conntrack最大值'); -insert into metric_description(metric, description) values('system_net_packets_in_count', '接收数据包个数'); -insert into metric_description(metric, description) values('system_net_packets_in_error', '接收数据包错误数'); -insert into metric_description(metric, description) values('system_net_packets_out_count', '发送数据包个数'); -insert into metric_description(metric, description) values('system_net_packets_out_error', '发送数据包错误数'); -insert into metric_description(metric, description) values('system_net_tcp4_closing', 'TCPIPv4关闭中连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp4_established', 'TCPIPv4建立的连接数'); -insert into metric_description(metric, description) values('system_net_tcp4_listening', 'TCPIPv4监听连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp4_opening', 'TCPIPv4打开中连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp6_closing', 'TCPIPv6关闭中连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp6_established', 'TCPIPv6建立的连接数'); -insert into metric_description(metric, description) values('system_net_tcp6_listening', 'TCPIPv6监听连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp6_opening', 'TCPIPv6打开中连接的数量'); -insert into metric_description(metric, description) values('system_net_tcp_backlog_drops', '数据包的丢弃数量(TCPbacklog没有空间)'); -insert into metric_description(metric, description) values('system_net_tcp_backlog_drops_count', '数据包丢弃总数(TCPbacklog没有空间)'); -insert into metric_description(metric, description) values('system_net_tcp_failed_retransmits_co', 'retransmit失败的数据包总数'); -insert into metric_description(metric, description) values('system_net_tcp_in_segs', '收到的TCP段数'); -insert into metric_description(metric, description) values('system_net_tcp_in_segs_count', '收到的TCP段的总数'); -insert into metric_description(metric, description) values('system_net_tcp_listen_drops', '采集周期链接被drop的数量'); -insert into metric_description(metric, description) values('system_net_tcp_listen_drops_count', '链接被drop的总数'); -insert into metric_description(metric, description) values('system_net_tcp_out_segs', '发送的TCP段数'); -insert into metric_description(metric, description) values('system_net_tcp_out_segs_count', '发送的TCP段的总数'); -insert into metric_description(metric, description) values('system_net_tcp_recv_q_95percentile', 'TCP接收队列95百分位值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_recv_q_avg', 'TCP接收队列平均值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_recv_q_count', 'TCP接收连接速率'); -insert into metric_description(metric, description) values('system_net_tcp_recv_q_max', 'TCP接收队列最大值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_recv_q_median', 'TCP接收队列中位值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_retrans_segs', 'TCP段的数量重传数'); -insert into metric_description(metric, description) values('system_net_tcp_retrans_segs_count', 'TCP段的数量重传总数'); -insert into metric_description(metric, description) values('system_net_tcp_send_q_95percentile', 'TCP发送队列95百分位值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_send_q_avg', 'TCP发送队列平均值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_send_q_count', 'TCP发送连接速率'); -insert into metric_description(metric, description) values('system_net_tcp_send_q_max', 'TCP发送队列最大值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_tcp_send_q_median', 'TCP发送队列中位值(单位:*byte*)'); -insert into metric_description(metric, description) values('system_net_udp_in_datagrams', '接收UDP数据报的速率'); -insert into metric_description(metric, description) values('system_net_udp_in_datagrams_count', '接收UDP数据报总数'); -insert into metric_description(metric, description) values('system_net_udp_in_errors', '接收的无法交付的UDP数据报的速率'); -insert into metric_description(metric, description) values('system_net_udp_in_errors_count', '接收的无法交付的UDP数据报的总数'); -insert into metric_description(metric, description) values('system_net_udp_no_ports', '收到的目的地端口没有应用程序的UDP数据报的速率'); -insert into metric_description(metric, description) values('system_net_udp_no_ports_count', '收到的目的地端口没有应用程序的UDP数据报的总数'); -insert into metric_description(metric, description) values('system_net_udp_out_datagrams', '发送UDP数据报的速率'); -insert into metric_description(metric, description) values('system_net_udp_out_datagrams_count', '发送UDP数据报的总数'); -insert into metric_description(metric, description) values('system_net_udp_rcv_buf_errors', '丢失的UDP数据报速率'); -insert into metric_description(metric, description) values('system_net_udp_rcv_buf_errors_count', '丢失的UDP数据报总数(因接收缓冲区没有空间)'); -insert into metric_description(metric, description) values('proc_cpu_sys', '进程系统态cpu使用率(单位:%)'); -insert into metric_description(metric, description) values('proc_cpu_threads', '进程中线程数量'); -insert into metric_description(metric, description) values('proc_cpu_util', '进程cpu使用率(单位:%)'); -insert into metric_description(metric, description) values('proc_cpu_user', '进程用户态cpu使用率(单位:%)'); -insert into metric_description(metric, description) values('proc_io_read_rate', '进程io读取频率(单位:hz)'); -insert into metric_description(metric, description) values('proc_io_readbytes_rate', '进程io读取速率(单位:b/s)'); -insert into metric_description(metric, description) values('proc_io_write_rate', '进程io写入频率(单位:hz)'); -insert into metric_description(metric, description) values('proc_io_writebytes_rate', '进程io写入速率(单位:b/s)'); -insert into metric_description(metric, description) values('proc_mem_data', '进程data内存大小'); -insert into metric_description(metric, description) values('proc_mem_dirty', '进程dirty内存大小'); -insert into metric_description(metric, description) values('proc_mem_lib', '进程lib内存大小'); -insert into metric_description(metric, description) values('proc_mem_rss', '进程常驻内存大小'); -insert into metric_description(metric, description) values('proc_mem_shared', '进程共享内存大小'); -insert into metric_description(metric, description) values('proc_mem_swap', '进程交换空间大小'); -insert into metric_description(metric, description) values('proc_mem_text', '进程Text内存大小'); -insert into metric_description(metric, description) values('proc_mem_used', '进程内存使用量(单位:*byte*)'); -insert into metric_description(metric, description) values('proc_mem_util', '进程内存使用率(单位:%)'); -insert into metric_description(metric, description) values('proc_mem_vms', '进程虚拟内存大小'); -insert into metric_description(metric, description) values('proc_net_bits_rate', '进程网络传输率(单位:b/s)'); -insert into metric_description(metric, description) values('proc_net_conn_rate', '进程网络连接频率(单位:hz)'); -insert into metric_description(metric, description) values('proc_num', '进程个数'); -insert into metric_description(metric, description) values('proc_open_fd_count', '进程打开文件句柄数量'); -insert into metric_description(metric, description) values('proc_port_listen', '进程监听端口'); -insert into metric_description(metric, description) values('proc_uptime_avg', '进程组中最短的运行时间'); -insert into metric_description(metric, description) values('proc_uptime_max', '进程组中最久的运行时间'); -insert into metric_description(metric, description) values('proc_uptime_min', '进程组平均运行时间'); diff --git a/src/main.go b/src/main.go new file mode 100644 index 00000000..88500c4d --- /dev/null +++ b/src/main.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "os" + + "github.com/toolkits/pkg/runner" + "github.com/urfave/cli/v2" + + "github.com/didi/nightingale/v5/src/server" + "github.com/didi/nightingale/v5/src/webapi" +) + +// VERSION go build -ldflags "-X main.VERSION=x.x.x" +var VERSION = "not specified" + +func main() { + app := cli.NewApp() + app.Name = "n9e" + app.Version = VERSION + app.Usage = "Nightingale, enterprise prometheus management" + app.Commands = []*cli.Command{ + newWebapiCmd(), + newServerCmd(), + } + app.Run(os.Args) +} + +func newWebapiCmd() *cli.Command { + return &cli.Command{ + Name: "webapi", + Usage: "Run webapi", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "conf", + Aliases: []string{"c"}, + Usage: "specify configuration file(.json,.yaml,.toml)", + }, + }, + Action: func(c *cli.Context) error { + printEnv() + + var opts []webapi.WebapiOption + if c.String("conf") != "" { + opts = append(opts, webapi.SetConfigFile(c.String("conf"))) + } + opts = append(opts, webapi.SetVersion(VERSION)) + + webapi.Run(opts...) + return nil + }, + } +} + +func newServerCmd() *cli.Command { + return &cli.Command{ + Name: "server", + Usage: "Run server", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "conf", + Aliases: []string{"c"}, + Usage: "specify configuration file(.json,.yaml,.toml)", + }, + }, + Action: func(c *cli.Context) error { + printEnv() + + var opts []server.ServerOption + if c.String("conf") != "" { + opts = append(opts, server.SetConfigFile(c.String("conf"))) + } + opts = append(opts, server.SetVersion(VERSION)) + + server.Run(opts...) + return nil + }, + } +} + +func printEnv() { + runner.Init() + fmt.Println("runner.cwd:", runner.Cwd) + fmt.Println("runner.hostname:", runner.Hostname) + fmt.Println("runner.fd_limits:", runner.FdLimits()) + fmt.Println("runner.vm_limits:", runner.VMLimits()) +} diff --git a/src/models/alert_cur_event.go b/src/models/alert_cur_event.go new file mode 100644 index 00000000..6924f43d --- /dev/null +++ b/src/models/alert_cur_event.go @@ -0,0 +1,320 @@ +package models + +import ( + "strconv" + "strings" + "time" +) + +type AlertCurEvent struct { + Id int64 `json:"id" gorm:"primaryKey"` + Cluster string `json:"cluster"` + GroupId int64 `json:"group_id"` // busi group id + Hash string `json:"hash"` // rule_id + vector_key + RuleId int64 `json:"rule_id"` + RuleName string `json:"rule_name"` + RuleNote string `json:"rule_note"` + Severity int `json:"severity"` + PromForDuration int `json:"prom_for_duration"` + PromQl string `json:"prom_ql"` + PromEvalInterval int `json:"prom_eval_interval"` + Callbacks string `json:"-"` // for db + CallbacksJSON []string `json:"callbacks" gorm:"-"` // for fe + RunbookUrl string `json:"runbook_url"` + NotifyRecovered int `json:"notify_recovered"` + NotifyChannels string `json:"-"` // for db + NotifyChannelsJSON []string `json:"notify_channels" gorm:"-"` // for fe + NotifyGroups string `json:"-"` // for db + NotifyGroupsJSON []string `json:"notify_groups" gorm:"-"` // for fe + NotifyGroupsObj []*UserGroup `json:"notify_groups_obj" gorm:"-"` // for fe + NotifyRepeatNext int64 `json:"notify_repeat_next"` + TargetIdent string `json:"target_ident"` + TargetNote string `json:"target_note"` + TriggerTime int64 `json:"trigger_time"` + TriggerValue string `json:"trigger_value"` + Tags string `json:"-"` // for db + TagsJSON []string `json:"tags" gorm:"-"` // for fe + TagsMap map[string]string `json:"-" gorm:"-"` // for internal usage + IsRecovered bool `json:"is_recovered" gorm:"-"` // for notify.py + NotifyUsersObj []*User `json:"notify_users_obj" gorm:"-"` // for notify.py + LastEvalTime int64 `json:"last_eval_time" gorm:"-"` // for internal usage +} + +func (e *AlertCurEvent) TableName() string { + return "alert_cur_event" +} + +func (e *AlertCurEvent) Add() error { + return Insert(e) +} + +func (e *AlertCurEvent) IncRepeatStep(step int64) error { + return DB().Model(e).Where("id=?", e.Id).Update("notify_repeat_next", time.Now().Unix()+step).Error +} + +func (e *AlertCurEvent) ToHis() *AlertHisEvent { + isRecovered := 0 + if e.IsRecovered { + isRecovered = 1 + } + + return &AlertHisEvent{ + IsRecovered: isRecovered, + Cluster: e.Cluster, + GroupId: e.GroupId, + Hash: e.Hash, + RuleId: e.RuleId, + RuleName: e.RuleName, + RuleNote: e.RuleNote, + Severity: e.Severity, + PromForDuration: e.PromForDuration, + PromQl: e.PromQl, + PromEvalInterval: e.PromEvalInterval, + Callbacks: e.Callbacks, + RunbookUrl: e.RunbookUrl, + NotifyRecovered: e.NotifyRecovered, + NotifyChannels: e.NotifyChannels, + NotifyGroups: e.NotifyGroups, + TargetIdent: e.TargetIdent, + TargetNote: e.TargetNote, + TriggerTime: e.TriggerTime, + TriggerValue: e.TriggerValue, + Tags: e.Tags, + } +} + +func (e *AlertCurEvent) DB2FE() { + e.NotifyChannelsJSON = strings.Fields(e.NotifyChannels) + e.NotifyGroupsJSON = strings.Fields(e.NotifyGroups) + e.CallbacksJSON = strings.Fields(e.Callbacks) + e.TagsJSON = strings.Split(e.Tags, ",,") +} + +func (e *AlertCurEvent) DB2Mem() { + e.IsRecovered = false + e.NotifyGroupsJSON = strings.Fields(e.NotifyGroups) + e.CallbacksJSON = strings.Fields(e.Callbacks) + e.NotifyChannelsJSON = strings.Fields(e.NotifyChannels) + e.TagsJSON = strings.Split(e.Tags, ",,") + e.TagsMap = make(map[string]string) + for i := 0; i < len(e.TagsJSON); i++ { + pair := strings.TrimSpace(e.TagsJSON[i]) + if pair == "" { + continue + } + + arr := strings.Split(pair, "=") + if len(arr) != 2 { + continue + } + + e.TagsMap[arr[0]] = arr[1] + } +} + +// for webui +func (e *AlertCurEvent) FillNotifyGroups(cache map[int64]*UserGroup) error { + // some user-group already deleted ? + count := len(e.NotifyGroupsJSON) + if count == 0 { + e.NotifyGroupsObj = []*UserGroup{} + return nil + } + + for i := range e.NotifyGroupsJSON { + id, err := strconv.ParseInt(e.NotifyGroupsJSON[i], 10, 64) + if err != nil { + continue + } + + ug, has := cache[id] + if has { + e.NotifyGroupsObj = append(e.NotifyGroupsObj, ug) + continue + } + + ug, err = UserGroupGetById(id) + if err != nil { + return err + } + + if ug != nil { + e.NotifyGroupsObj = append(e.NotifyGroupsObj, ug) + cache[id] = ug + } + } + + return nil +} + +func AlertCurEventTotal(bgid, stime, etime int64, severity int, clusters []string, query string) (int64, error) { + session := DB().Model(&AlertCurEvent{}).Where("trigger_time between ? and ? and group_id = ?", stime, etime, bgid) + + if severity >= 0 { + session = session.Where("severity = ?", severity) + } + + if len(clusters) > 0 { + session = session.Where("cluster in ?", clusters) + } + + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + qarg := "%" + arr[i] + "%" + session = session.Where("rule_name like ? or tags like ?", qarg) + } + } + + return Count(session) +} + +func AlertCurEventGets(bgid, stime, etime int64, severity int, clusters []string, query string, limit, offset int) ([]AlertCurEvent, error) { + session := DB().Where("trigger_time between ? and ? and group_id = ?", stime, etime, bgid) + + if severity >= 0 { + session = session.Where("severity = ?", severity) + } + + if len(clusters) > 0 { + session = session.Where("cluster in ?", clusters) + } + + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + qarg := "%" + arr[i] + "%" + session = session.Where("rule_name like ? or tags like ?", qarg) + } + } + + var lst []AlertCurEvent + err := session.Order("trigger_time desc").Limit(limit).Offset(offset).Find(&lst).Error + + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].DB2FE() + } + } + + return lst, err +} + +func AlertCurEventDel(ids []int64) error { + if len(ids) == 0 { + return nil + } + + return DB().Where("id in ?", ids).Delete(&AlertCurEvent{}).Error +} + +func AlertCurEventDelByHash(hash string) error { + return DB().Where("hash = ?", hash).Delete(&AlertCurEvent{}).Error +} + +func AlertCurEventExists(where string, args ...interface{}) (bool, error) { + return Exists(DB().Model(&AlertCurEvent{}).Where(where, args...)) +} + +func AlertCurEventGet(where string, args ...interface{}) (*AlertCurEvent, error) { + var lst []*AlertCurEvent + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].DB2FE() + lst[0].FillNotifyGroups(make(map[int64]*UserGroup)) + + return lst[0], nil +} + +func AlertCurEventGetById(id int64) (*AlertCurEvent, error) { + return AlertCurEventGet("id=?", id) +} + +type AlertNumber struct { + GroupId int64 + GroupCount int64 +} + +// for busi_group list page +func AlertNumbers(bgids []int64) (map[int64]int64, error) { + ret := make(map[int64]int64) + if len(bgids) == 0 { + return ret, nil + } + + var arr []AlertNumber + err := DB().Model(&AlertCurEvent{}).Select("group_id", "count(*) as group_count").Where("group_id in ?", bgids).Group("group_id").Find(&arr).Error + if err != nil { + return nil, err + } + + for i := 0; i < len(arr); i++ { + ret[arr[i].GroupId] = arr[i].GroupCount + } + + return ret, nil +} + +func AlertCurEventGetAll(cluster string) ([]*AlertCurEvent, error) { + session := DB().Model(&AlertCurEvent{}) + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertCurEvent + err := session.Find(&lst).Error + return lst, err +} + +func AlertCurEventGetByRule(ruleId int64) ([]*AlertCurEvent, error) { + var lst []*AlertCurEvent + err := DB().Where("rule_id=?", ruleId).Find(&lst).Error + return lst, err +} + +func AlertCurEventNeedRepeat(cluster string) ([]*AlertCurEvent, error) { + session := DB().Model(&AlertCurEvent{}).Where("notify_repeat_next <= ?", time.Now().Unix()) + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertCurEvent + err := session.Find(&lst).Error + return lst, err +} + +func AlertCurEventGetMap(cluster string) (map[int64]map[string]struct{}, error) { + session := DB().Model(&AlertCurEvent{}) + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertCurEvent + err := session.Select("rule_id", "hash").Find(&lst).Error + if err != nil { + return nil, err + } + + ret := make(map[int64]map[string]struct{}) + for i := 0; i < len(lst); i++ { + rid := lst[i].RuleId + hash := lst[i].Hash + if _, has := ret[rid]; has { + ret[rid][hash] = struct{}{} + } else { + ret[rid] = make(map[string]struct{}) + ret[rid][hash] = struct{}{} + } + } + + return ret, nil +} diff --git a/src/models/alert_his_event.go b/src/models/alert_his_event.go new file mode 100644 index 00000000..54deeba6 --- /dev/null +++ b/src/models/alert_his_event.go @@ -0,0 +1,167 @@ +package models + +import ( + "strconv" + "strings" +) + +type AlertHisEvent struct { + Id int64 `json:"id" gorm:"primaryKey"` + IsRecovered int `json:"is_recovered"` + Cluster string `json:"cluster"` + GroupId int64 `json:"group_id"` + Hash string `json:"hash"` + RuleId int64 `json:"rule_id"` + RuleName string `json:"rule_name"` + RuleNote string `json:"rule_note"` + Severity int `json:"severity"` + PromForDuration int `json:"prom_for_duration"` + PromQl string `json:"prom_ql"` + PromEvalInterval int `json:"prom_eval_interval"` + Callbacks string `json:"-"` + CallbacksJSON []string `json:"callbacks" gorm:"-"` + RunbookUrl string `json:"runbook_url"` + NotifyRecovered int `json:"notify_recovered"` + NotifyChannels string `json:"-"` + NotifyChannelsJSON []string `json:"notify_channels" gorm:"-"` + NotifyGroups string `json:"-"` + NotifyGroupsJSON []string `json:"notify_groups" gorm:"-"` + NotifyGroupsObj []UserGroup `json:"notify_groups_obj" gorm:"-"` + TargetIdent string `json:"target_ident"` + TargetNote string `json:"target_note"` + TriggerTime int64 `json:"trigger_time"` + TriggerValue string `json:"trigger_value"` + Tags string `json:"-"` + TagsJSON []string `json:"tags" gorm:"-"` +} + +func (e *AlertHisEvent) TableName() string { + return "alert_his_event" +} + +func (e *AlertHisEvent) Add() error { + return Insert(e) +} + +func (e *AlertHisEvent) DB2FE() { + e.NotifyChannelsJSON = strings.Fields(e.NotifyChannels) + e.NotifyGroupsJSON = strings.Fields(e.NotifyGroups) + e.CallbacksJSON = strings.Fields(e.Callbacks) + e.TagsJSON = strings.Split(e.Tags, ",,") +} + +func (e *AlertHisEvent) FillNotifyGroups(cache map[int64]*UserGroup) error { + // some user-group already deleted ? + count := len(e.NotifyGroupsJSON) + if count == 0 { + e.NotifyGroupsObj = []UserGroup{} + return nil + } + + for i := range e.NotifyGroupsJSON { + id, err := strconv.ParseInt(e.NotifyGroupsJSON[i], 10, 64) + if err != nil { + continue + } + + ug, has := cache[id] + if has { + e.NotifyGroupsObj = append(e.NotifyGroupsObj, *ug) + continue + } + + ug, err = UserGroupGetById(id) + if err != nil { + return err + } + + if ug != nil { + e.NotifyGroupsObj = append(e.NotifyGroupsObj, *ug) + cache[id] = ug + } + } + + return nil +} + +func AlertHisEventTotal(bgid, stime, etime int64, severity int, recovered int, clusters []string, query string) (int64, error) { + session := DB().Model(&AlertHisEvent{}).Where("trigger_time between ? and ? and group_id = ?", stime, etime, bgid) + + if severity >= 0 { + session = session.Where("severity = ?", severity) + } + + if recovered >= 0 { + session = session.Where("is_recovered = ?", recovered) + } + + if len(clusters) > 0 { + session = session.Where("cluster in ?", clusters) + } + + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + qarg := "%" + arr[i] + "%" + session = session.Where("rule_name like ? or tags like ?", qarg) + } + } + + return Count(session) +} + +func AlertHisEventGets(bgid, stime, etime int64, severity int, recovered int, clusters []string, query string, limit, offset int) ([]AlertHisEvent, error) { + session := DB().Where("trigger_time between ? and ? and group_id = ?", stime, etime, bgid) + + if severity >= 0 { + session = session.Where("severity = ?", severity) + } + + if recovered >= 0 { + session = session.Where("is_recovered = ?", recovered) + } + + if len(clusters) > 0 { + session = session.Where("cluster in ?", clusters) + } + + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + qarg := "%" + arr[i] + "%" + session = session.Where("rule_name like ? or tags like ?", qarg) + } + } + + var lst []AlertHisEvent + err := session.Order("trigger_time desc").Limit(limit).Offset(offset).Find(&lst).Error + + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].DB2FE() + } + } + + return lst, err +} + +func AlertHisEventGet(where string, args ...interface{}) (*AlertHisEvent, error) { + var lst []*AlertHisEvent + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].DB2FE() + lst[0].FillNotifyGroups(make(map[int64]*UserGroup)) + + return lst[0], nil +} + +func AlertHisEventGetById(id int64) (*AlertHisEvent, error) { + return AlertHisEventGet("id=?", id) +} diff --git a/src/models/alert_mute.go b/src/models/alert_mute.go new file mode 100644 index 00000000..983960f7 --- /dev/null +++ b/src/models/alert_mute.go @@ -0,0 +1,140 @@ +package models + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/didi/nightingale/v5/src/pkg/ormx" + "github.com/pkg/errors" +) + +type TagFilter struct { + Key string `json:"key"` // tag key + Func string `json:"func"` // == | =~ | in + Value string `json:"value"` // tag value + Regexp *regexp.Regexp // parse value to regexp if func = '=~' + Vset map[string]struct{} // parse value to regexp if func = 'in' +} + +type AlertMute struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + Cluster string `json:"cluster"` + Tags ormx.JSONArr `json:"tags"` + Cause string `json:"cause"` + Btime int64 `json:"btime"` + Etime int64 `json:"etime"` + CreateBy string `json:"create_by"` + CreateAt int64 `json:"create_at"` + ITags []TagFilter `json:"-" gorm:"-"` // inner tags +} + +func (m *AlertMute) TableName() string { + return "alert_mute" +} + +func AlertMuteGets(groupId int64) (lst []AlertMute, err error) { + err = DB().Where("group_id=?", groupId).Order("id desc").Find(&lst).Error + return +} + +func (m *AlertMute) Verify() error { + if m.GroupId <= 0 { + return errors.New("group_id invalid") + } + + if m.Cluster == "" { + return errors.New("cluster invalid") + } + + if m.Etime <= m.Btime { + return fmt.Errorf("Oops... etime(%d) <= btime(%d)", m.Etime, m.Btime) + } + + if err := m.Parse(); err != nil { + return err + } + + if len(m.ITags) == 0 { + return errors.New("tags is blank") + } + + return nil +} + +func (m *AlertMute) Parse() error { + err := json.Unmarshal(m.Tags, &m.ITags) + if err != nil { + return err + } + + for i := 0; i < len(m.ITags); i++ { + if m.ITags[i].Func == "=~" { + m.ITags[i].Regexp, err = regexp.Compile(m.ITags[i].Value) + if err != nil { + return err + } + } else if m.ITags[i].Func == "in" { + arr := strings.Fields(m.ITags[i].Value) + m.ITags[i].Vset = make(map[string]struct{}) + for i := 0; i < len(arr); i++ { + m.ITags[i].Vset[arr[i]] = struct{}{} + } + } + } + + return nil +} + +func (m *AlertMute) Add() error { + if err := m.Verify(); err != nil { + return err + } + m.CreateAt = time.Now().Unix() + return Insert(m) +} + +func AlertMuteDel(ids []int64) error { + if len(ids) == 0 { + return nil + } + return DB().Where("id in ?", ids).Delete(new(AlertMute)).Error +} + +func AlertMuteStatistics(cluster string, btime int64) (*Statistics, error) { + session := DB().Model(&AlertMute{}).Select("count(*) as total", "max(create_at) as last_updated").Where("btime <= ?", btime) + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} + +func AlertMuteGetsByCluster(cluster string, btime int64) ([]*AlertMute, error) { + // clean expired first + buf := int64(30) + err := DB().Where("etime < ?", time.Now().Unix()+buf).Delete(new(AlertMute)).Error + if err != nil { + return nil, err + } + + // get my cluster's mutes + session := DB().Model(&AlertMute{}).Where("btime <= ?", btime) + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertMute + err = session.Find(&lst).Error + return lst, err +} diff --git a/src/models/alert_rule.go b/src/models/alert_rule.go new file mode 100644 index 00000000..eb3f6e91 --- /dev/null +++ b/src/models/alert_rule.go @@ -0,0 +1,321 @@ +package models + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/slice" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/webapi/config" +) + +type AlertRule struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` // busi group id + Cluster string `json:"cluster"` // take effect by cluster + Name string `json:"name"` // rule name + Note string `json:"note"` // will sent in notify + Severity int `json:"severity"` // 0: Emergency 1: Warning 2: Notice + Disabled int `json:"disabled"` // 0: enabled, 1: disabled + PromForDuration int `json:"prom_for_duration"` // prometheus for, unit:s + PromQl string `json:"prom_ql"` // just one ql + PromEvalInterval int `json:"prom_eval_interval"` // unit:s + EnableStime string `json:"enable_stime"` // e.g. 00:00 + EnableEtime string `json:"enable_etime"` // e.g. 23:59 + EnableDaysOfWeek string `json:"-"` // split by space: 0 1 2 3 4 5 6 + EnableDaysOfWeekJSON []string `json:"enable_days_of_week" gorm:"-"` // for fe + NotifyRecovered int `json:"notify_recovered"` // whether notify when recovery + NotifyChannels string `json:"-"` // split by space: sms voice email dingtalk wecom + NotifyChannelsJSON []string `json:"notify_channels" gorm:"-"` // for fe + NotifyGroups string `json:"-"` // split by space: 233 43 + NotifyGroupsObj []UserGroup `json:"notify_groups_obj" gorm:"-"` // for fe + NotifyGroupsJSON []string `json:"notify_groups" gorm:"-"` // for fe + NotifyRepeatStep int `json:"notify_repeat_step"` // notify repeat interval, unit: min + Callbacks string `json:"-"` // split by space: http://a.com/api/x http://a.com/api/y' + CallbacksJSON []string `json:"callbacks" gorm:"-"` // for fe + RunbookUrl string `json:"runbook_url"` // sop url + AppendTags string `json:"-"` // split by space: service=n9e mod=api + AppendTagsJSON []string `json:"append_tags" gorm:"-"` // for fe + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` +} + +func (ar *AlertRule) TableName() string { + return "alert_rule" +} + +func (ar *AlertRule) Verify() error { + if ar.GroupId <= 0 { + return fmt.Errorf("GroupId(%d) invalid", ar.GroupId) + } + + if ar.Cluster == "" { + return errors.New("cluster is blank") + } + + if str.Dangerous(ar.Name) { + return errors.New("Name has invalid characters") + } + + if ar.Name == "" { + return errors.New("name is blank") + } + + if ar.PromQl == "" { + return errors.New("prom_ql is blank") + } + + if ar.PromEvalInterval <= 0 { + ar.PromEvalInterval = 15 + } + + // check in front-end + // if _, err := parser.ParseExpr(ar.PromQl); err != nil { + // return errors.New("prom_ql parse error: %") + // } + + ar.AppendTags = strings.TrimSpace(ar.AppendTags) + arr := strings.Fields(ar.AppendTags) + for i := 0; i < len(arr); i++ { + if len(strings.Split(arr[i], "=")) != 2 { + return fmt.Errorf("AppendTags(%s) invalid", arr[i]) + } + } + + gids := strings.Fields(ar.NotifyGroups) + for i := 0; i < len(gids); i++ { + if _, err := strconv.ParseInt(gids[i], 10, 64); err != nil { + return fmt.Errorf("NotifyGroups(%s) invalid", ar.NotifyGroups) + } + } + + channels := strings.Fields(ar.NotifyChannels) + if len(channels) > 0 { + nlst := make([]string, 0, len(channels)) + for i := 0; i < len(channels); i++ { + if slice.ContainsString(config.C.NotifyChannels, channels[i]) { + nlst = append(nlst, channels[i]) + } + } + ar.NotifyChannels = strings.Join(nlst, " ") + } else { + ar.NotifyChannels = "" + } + + return nil +} + +func (ar *AlertRule) Add() error { + if err := ar.Verify(); err != nil { + return err + } + + exists, err := AlertRuleExists("group_id=? and cluster=? and name=?", ar.GroupId, ar.Cluster, ar.Name) + if err != nil { + return err + } + + if exists { + return errors.New("AlertRule already exists") + } + + now := time.Now().Unix() + ar.CreateAt = now + ar.UpdateAt = now + + return Insert(ar) +} + +func (ar *AlertRule) Update(arf AlertRule) error { + if ar.Name != arf.Name { + exists, err := AlertRuleExists("group_id=? and cluster=? and name=? and id <> ?", ar.GroupId, ar.Cluster, arf.Name, ar.Id) + if err != nil { + return err + } + + if exists { + return errors.New("AlertRule already exists") + } + } + + arf.FE2DB() + arf.Id = ar.Id + arf.GroupId = ar.GroupId + arf.CreateAt = ar.CreateAt + arf.CreateBy = ar.CreateBy + arf.UpdateAt = time.Now().Unix() + + return DB().Model(ar).Select("*").Updates(arf).Error +} + +func (ar *AlertRule) UpdateFieldsMap(fields map[string]interface{}) error { + return DB().Model(ar).Updates(fields).Error +} + +func (ar *AlertRule) FillNotifyGroups(cache map[int64]*UserGroup) error { + // some user-group already deleted ? + count := len(ar.NotifyGroupsJSON) + if count == 0 { + ar.NotifyGroupsObj = []UserGroup{} + return nil + } + + exists := make([]string, 0, count) + delete := false + for i := range ar.NotifyGroupsJSON { + id, _ := strconv.ParseInt(ar.NotifyGroupsJSON[i], 10, 64) + + ug, has := cache[id] + if has { + exists = append(exists, ar.NotifyGroupsJSON[i]) + ar.NotifyGroupsObj = append(ar.NotifyGroupsObj, *ug) + continue + } + + ug, err := UserGroupGetById(id) + if err != nil { + return err + } + + if ug == nil { + delete = true + } else { + exists = append(exists, ar.NotifyGroupsJSON[i]) + ar.NotifyGroupsObj = append(ar.NotifyGroupsObj, *ug) + cache[id] = ug + } + } + + if delete { + // some user-group already deleted + ar.NotifyGroupsJSON = exists + ar.NotifyGroups = strings.Join(exists, " ") + DB().Model(ar).Update("notify_groups", ar.NotifyGroups) + } + + return nil +} + +func (ar *AlertRule) FE2DB() { + ar.EnableDaysOfWeek = strings.Join(ar.EnableDaysOfWeekJSON, " ") + ar.NotifyChannels = strings.Join(ar.NotifyChannelsJSON, " ") + ar.NotifyGroups = strings.Join(ar.NotifyGroupsJSON, " ") + ar.Callbacks = strings.Join(ar.CallbacksJSON, " ") + ar.AppendTags = strings.Join(ar.AppendTagsJSON, " ") +} + +func (ar *AlertRule) DB2FE() { + ar.EnableDaysOfWeekJSON = strings.Fields(ar.EnableDaysOfWeek) + ar.NotifyChannelsJSON = strings.Fields(ar.NotifyChannels) + ar.NotifyGroupsJSON = strings.Fields(ar.NotifyGroups) + ar.CallbacksJSON = strings.Fields(ar.Callbacks) + ar.AppendTagsJSON = strings.Fields(ar.AppendTags) +} + +func AlertRuleDels(ids []int64, busiGroupId int64) error { + for i := 0; i < len(ids); i++ { + ret := DB().Where("id = ? and group_id=?", ids[i], busiGroupId).Delete(&AlertRule{}) + if ret.Error != nil { + return ret.Error + } + + // 说明确实删掉了,把相关的活跃告警也删了,这些告警永远都不会恢复了,而且策略都没了,说明没人关心了 + if ret.RowsAffected > 0 { + DB().Where("rule_id = ?", ids[i]).Delete(new(AlertCurEvent)) + } + } + + return nil +} + +func AlertRuleExists(where string, args ...interface{}) (bool, error) { + return Exists(DB().Model(&AlertRule{}).Where(where, args...)) +} + +func AlertRuleGets(groupId int64) ([]AlertRule, error) { + session := DB().Where("group_id=?", groupId).Order("name") + + var lst []AlertRule + err := session.Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].DB2FE() + } + } + + return lst, err +} + +func AlertRuleGetsByCluster(cluster string) ([]*AlertRule, error) { + session := DB().Where("disabled = ?", 0) + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertRule + err := session.Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].DB2FE() + } + } + + return lst, err +} + +func AlertRuleGet(where string, args ...interface{}) (*AlertRule, error) { + var lst []*AlertRule + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].DB2FE() + + return lst[0], nil +} + +func AlertRuleGetById(id int64) (*AlertRule, error) { + return AlertRuleGet("id=?", id) +} + +func AlertRuleGetName(id int64) (string, error) { + var names []string + err := DB().Where("id = ?", id).Pluck("name", &names).Error + if err != nil { + return "", err + } + + if len(names) == 0 { + return "", nil + } + + return names[0], nil +} + +func AlertRuleStatistics(cluster string) (*Statistics, error) { + session := DB().Model(&AlertRule{}).Select("count(*) as total", "max(update_at) as last_updated").Where("disabled = ?", 0) + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} diff --git a/src/models/alert_subscribe.go b/src/models/alert_subscribe.go new file mode 100644 index 00000000..51306b16 --- /dev/null +++ b/src/models/alert_subscribe.go @@ -0,0 +1,212 @@ +package models + +import ( + "encoding/json" + "regexp" + "strconv" + "strings" + "time" + + "github.com/didi/nightingale/v5/src/pkg/ormx" + "github.com/pkg/errors" +) + +type AlertSubscribe struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + Cluster string `json:"cluster"` + RuleId int64 `json:"rule_id"` + RuleName string `json:"rule_name" gorm:"-"` // for fe + Tags ormx.JSONArr `json:"tags"` + RedefineSeverity int `json:"redefine_severity"` + NewSeverity int `json:"new_severity"` + RedefineChannels int `json:"redefine_channels"` + NewChannels string `json:"new_channels"` + UserGroupIds string `json:"user_group_ids"` + UserGroups []UserGroup `json:"user_groups" gorm:"-"` // for fe + CreateBy string `json:"create_by"` + CreateAt int64 `json:"create_at"` + UpdateBy string `json:"update_by"` + UpdateAt int64 `json:"update_at"` + ITags []TagFilter `json:"-" gorm:"-"` // inner tags +} + +func (s *AlertSubscribe) TableName() string { + return "alert_subscribe" +} + +func AlertSubscribeGets(groupId int64) (lst []AlertSubscribe, err error) { + err = DB().Where("group_id=?", groupId).Order("id desc").Find(&lst).Error + return +} + +func (s *AlertSubscribe) Verify() error { + if s.Cluster == "" { + return errors.New("cluster invalid") + } + + if err := s.Parse(); err != nil { + return err + } + + if len(s.ITags) == 0 && s.RuleId == 0 { + return errors.New("rule_id and tags are both blank") + } + + ugids := strings.Fields(s.UserGroupIds) + for i := 0; i < len(ugids); i++ { + if _, err := strconv.ParseInt(ugids[i], 10, 64); err != nil { + return errors.New("user_group_ids invalid") + } + } + + return nil +} + +func (s *AlertSubscribe) Parse() error { + err := json.Unmarshal(s.Tags, &s.ITags) + if err != nil { + return err + } + + for i := 0; i < len(s.ITags); i++ { + if s.ITags[i].Func == "=~" { + s.ITags[i].Regexp, err = regexp.Compile(s.ITags[i].Value) + if err != nil { + return err + } + } else if s.ITags[i].Func == "in" { + arr := strings.Fields(s.ITags[i].Value) + s.ITags[i].Vset = make(map[string]struct{}) + for j := 0; j < len(arr); j++ { + s.ITags[i].Vset[arr[j]] = struct{}{} + } + } + } + + return nil +} + +func (s *AlertSubscribe) Add() error { + if err := s.Verify(); err != nil { + return err + } + + now := time.Now().Unix() + s.CreateAt = now + s.UpdateAt = now + return Insert(s) +} + +func (s *AlertSubscribe) FillRuleName(cache map[int64]string) error { + if s.RuleId <= 0 { + s.RuleName = "" + return nil + } + + name, has := cache[s.RuleId] + if has { + s.RuleName = name + return nil + } + + name, err := AlertRuleGetName(s.RuleId) + if err != nil { + return err + } + + if name == "" { + name = "Error: AlertRule not found" + } + + s.RuleName = name + cache[s.RuleId] = name + return nil +} + +func (s *AlertSubscribe) FillUserGroups(cache map[int64]*UserGroup) error { + // some user-group already deleted ? + ugids := strings.Fields(s.UserGroupIds) + + count := len(ugids) + if count == 0 { + s.UserGroups = []UserGroup{} + return nil + } + + exists := make([]string, 0, count) + delete := false + for i := range ugids { + id, _ := strconv.ParseInt(ugids[i], 10, 64) + + ug, has := cache[id] + if has { + exists = append(exists, ugids[i]) + s.UserGroups = append(s.UserGroups, *ug) + continue + } + + ug, err := UserGroupGetById(id) + if err != nil { + return err + } + + if ug == nil { + delete = true + } else { + exists = append(exists, ugids[i]) + s.UserGroups = append(s.UserGroups, *ug) + cache[id] = ug + } + } + + if delete { + // some user-group already deleted + DB().Model(s).Update("user_group_ids", strings.Join(exists, " ")) + } + + return nil +} + +func (s *AlertSubscribe) Update(selectField interface{}, selectFields ...interface{}) error { + if err := s.Verify(); err != nil { + return err + } + + return DB().Model(s).Select(selectField, selectFields...).Updates(s).Error +} + +func AlertSubscribeDel(ids []int64) error { + if len(ids) == 0 { + return nil + } + return DB().Where("id in ?", ids).Delete(new(AlertSubscribe)).Error +} + +func AlertSubscribeStatistics(cluster string) (*Statistics, error) { + session := DB().Model(&AlertSubscribe{}).Select("count(*) as total", "max(update_at) as last_updated") + + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} + +func AlertSubscribeGetsByCluster(cluster string) ([]*AlertSubscribe, error) { + // get my cluster's subscribes + session := DB().Model(&AlertSubscribe{}) + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*AlertSubscribe + err := session.Find(&lst).Error + return lst, err +} diff --git a/src/models/busi_group.go b/src/models/busi_group.go new file mode 100644 index 00000000..677512c7 --- /dev/null +++ b/src/models/busi_group.go @@ -0,0 +1,257 @@ +package models + +import ( + "time" + + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type BusiGroup struct { + Id int64 `json:"id" gorm:"primaryKey"` + Name string `json:"name"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` + UserGroups []UserGroupWithPermFlag `json:"user_groups" gorm:"-"` +} + +type UserGroupWithPermFlag struct { + UserGroup *UserGroup `json:"user_group"` + PermFlag string `json:"perm_flag"` +} + +func (bg *BusiGroup) TableName() string { + return "busi_group" +} + +func (bg *BusiGroup) FillUserGroups() error { + members, err := BusiGroupMemberGetsByBusiGroupId(bg.Id) + if err != nil { + return err + } + + if len(members) == 0 { + return nil + } + + for i := 0; i < len(members); i++ { + ug, err := UserGroupGetById(members[i].UserGroupId) + if err != nil { + return err + } + bg.UserGroups = append(bg.UserGroups, UserGroupWithPermFlag{ + UserGroup: ug, + PermFlag: members[i].PermFlag, + }) + } + + return nil +} + +func BusiGroupGet(where string, args ...interface{}) (*BusiGroup, error) { + var lst []*BusiGroup + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + return lst[0], nil +} + +func BusiGroupGetById(id int64) (*BusiGroup, error) { + return BusiGroupGet("id=?", id) +} + +func BusiGroupExists(where string, args ...interface{}) (bool, error) { + num, err := Count(DB().Model(&BusiGroup{}).Where(where, args...)) + return num > 0, err +} + +func (bg *BusiGroup) Del() error { + has, err := Exists(DB().Model(&AlertMute{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some alert mutes still in the BusiGroup") + } + + has, err = Exists(DB().Model(&AlertSubscribe{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some alert subscribes still in the BusiGroup") + } + + has, err = Exists(DB().Model(&Target{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some targets still in the BusiGroup") + } + + has, err = Exists(DB().Model(&Dashboard{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some dashboards still in the BusiGroup") + } + + has, err = Exists(DB().Model(&TaskTpl{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some recovery scripts still in the BusiGroup") + } + + // hasCR, err := Exists(DB().Table("collect_rule").Where("group_id=?", bg.Id)) + // if err != nil { + // return err + // } + + // if hasCR { + // return errors.New("Some collect rules still in the BusiGroup") + // } + + has, err = Exists(DB().Model(&AlertRule{}).Where("group_id=?", bg.Id)) + if err != nil { + return err + } + + if has { + return errors.New("Some alert rules still in the BusiGroup") + } + + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Where("busi_group_id=?", bg.Id).Delete(&BusiGroupMember{}).Error; err != nil { + return err + } + + if err := tx.Where("id=?", bg.Id).Delete(&BusiGroup{}).Error; err != nil { + return err + } + + // 这个需要好好斟酌一下,删掉BG,对应的活跃告警事件也一并删除 + // BG都删了,说明下面已经没有告警规则了,说明这些活跃告警永远都不会恢复了 + // 而且这些活跃告警已经没人关心了,既然是没人关心的,删了吧 + if err := tx.Where("group_id=?", bg.Id).Delete(&AlertCurEvent{}).Error; err != nil { + return err + } + + return nil + }) +} + +func (bg *BusiGroup) AddMembers(members []BusiGroupMember, username string) error { + for i := 0; i < len(members); i++ { + err := BusiGroupMemberAdd(members[i]) + if err != nil { + return err + } + } + + return DB().Model(bg).Updates(map[string]interface{}{ + "update_at": time.Now().Unix(), + "update_by": username, + }).Error +} + +func (bg *BusiGroup) DelMembers(members []BusiGroupMember, username string) error { + for i := 0; i < len(members); i++ { + err := BusiGroupMemberDel("busi_group_id = ? and user_group_id = ?", members[i].BusiGroupId, members[i].UserGroupId) + if err != nil { + return err + } + } + + return DB().Model(bg).Updates(map[string]interface{}{ + "update_at": time.Now().Unix(), + "update_by": username, + }).Error +} + +func (bg *BusiGroup) Update(name string, updateBy string) error { + if bg.Name == name { + return nil + } + + exists, err := BusiGroupExists("name = ? and id <> ?", name, bg.Id) + if err != nil { + return errors.WithMessage(err, "failed to count BusiGroup") + } + + if exists { + return errors.New("BusiGroup already exists") + } + + return DB().Model(bg).Updates(map[string]interface{}{ + "name": name, + "update_at": time.Now().Unix(), + "update_by": updateBy, + }).Error +} + +func BusiGroupAdd(name string, members []BusiGroupMember, creator string) error { + exists, err := BusiGroupExists("name=?", name) + if err != nil { + return errors.WithMessage(err, "failed to count BusiGroup") + } + + if exists { + return errors.New("BusiGroup already exists") + } + + count := len(members) + for i := 0; i < count; i++ { + ug, err := UserGroupGet("id=?", members[i].UserGroupId) + if err != nil { + return errors.WithMessage(err, "failed to get UserGroup") + } + + if ug == nil { + return errors.New("Some UserGroup id not exists") + } + } + + now := time.Now().Unix() + obj := &BusiGroup{ + Name: name, + CreateAt: now, + CreateBy: creator, + UpdateAt: now, + UpdateBy: creator, + } + + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Create(obj).Error; err != nil { + return err + } + + for i := 0; i < len(members); i++ { + if err := tx.Create(&BusiGroupMember{ + BusiGroupId: obj.Id, + UserGroupId: members[i].UserGroupId, + PermFlag: members[i].PermFlag, + }).Error; err != nil { + return err + } + } + + return nil + }) +} diff --git a/src/models/busi_group_member.go b/src/models/busi_group_member.go new file mode 100644 index 00000000..0d4b1d3c --- /dev/null +++ b/src/models/busi_group_member.go @@ -0,0 +1,92 @@ +package models + +type BusiGroupMember struct { + BusiGroupId int64 `json:"busi_group_id"` + UserGroupId int64 `json:"user_group_id"` + PermFlag string `json:"perm_flag"` +} + +func (BusiGroupMember) TableName() string { + return "busi_group_member" +} + +func BusiGroupIds(userGroupIds []int64, permFlag ...string) ([]int64, error) { + if len(userGroupIds) == 0 { + return []int64{}, nil + } + + session := DB().Model(&BusiGroupMember{}).Where("user_group_id in ?", userGroupIds) + if len(permFlag) > 0 { + session = session.Where("perm_flag=?", permFlag[0]) + } + + var ids []int64 + err := session.Pluck("busi_group_id", &ids).Error + return ids, err +} + +func UserGroupIdsOfBusiGroup(busiGroupId int64, permFlag ...string) ([]int64, error) { + session := DB().Model(&BusiGroupMember{}).Where("busi_group_id = ?", busiGroupId) + if len(permFlag) > 0 { + session = session.Where("perm_flag=?", permFlag[0]) + } + + var ids []int64 + err := session.Pluck("user_group_id", &ids).Error + return ids, err +} + +func BusiGroupMemberCount(where string, args ...interface{}) (int64, error) { + return Count(DB().Model(&BusiGroupMember{}).Where(where, args...)) +} + +func BusiGroupMemberAdd(member BusiGroupMember) error { + obj, err := BusiGroupMemberGet("busi_group_id = ? and user_group_id = ?", member.BusiGroupId, member.UserGroupId) + if err != nil { + return err + } + + if obj == nil { + // insert + return Insert(&BusiGroupMember{ + BusiGroupId: member.BusiGroupId, + UserGroupId: member.UserGroupId, + PermFlag: member.PermFlag, + }) + } else { + // update + if obj.PermFlag == member.PermFlag { + return nil + } + + return DB().Model(&BusiGroupMember{}).Where("busi_group_id = ? and user_group_id = ?", member.BusiGroupId, member.UserGroupId).Update("perm_flag", member.PermFlag).Error + } +} + +func BusiGroupMemberGet(where string, args ...interface{}) (*BusiGroupMember, error) { + var lst []*BusiGroupMember + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + return lst[0], nil +} + +func BusiGroupMemberDel(where string, args ...interface{}) error { + return DB().Where(where, args...).Delete(&BusiGroupMember{}).Error +} + +func BusiGroupMemberGets(where string, args ...interface{}) ([]BusiGroupMember, error) { + var lst []BusiGroupMember + err := DB().Where(where, args...).Order("perm_flag").Find(&lst).Error + return lst, err +} + +func BusiGroupMemberGetsByBusiGroupId(busiGroupId int64) ([]BusiGroupMember, error) { + return BusiGroupMemberGets("busi_group_id=?", busiGroupId) +} diff --git a/src/models/chart.go b/src/models/chart.go new file mode 100644 index 00000000..3dbf9dfa --- /dev/null +++ b/src/models/chart.go @@ -0,0 +1,30 @@ +package models + +type Chart struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + Configs string `json:"configs"` + Weight int `json:"weight"` +} + +func (c *Chart) TableName() string { + return "chart" +} + +func ChartsOf(chartGroupId int64) ([]Chart, error) { + var objs []Chart + err := DB().Where("group_id = ?", chartGroupId).Order("weight").Find(&objs).Error + return objs, err +} + +func (c *Chart) Add() error { + return Insert(c) +} + +func (c *Chart) Update(selectField interface{}, selectFields ...interface{}) error { + return DB().Model(c).Select(selectField, selectFields...).Updates(c).Error +} + +func (c *Chart) Del() error { + return DB().Where("id=?", c.Id).Delete(&Chart{}).Error +} diff --git a/src/models/chart_group.go b/src/models/chart_group.go new file mode 100644 index 00000000..a8b43e02 --- /dev/null +++ b/src/models/chart_group.go @@ -0,0 +1,80 @@ +package models + +import ( + "github.com/pkg/errors" + "github.com/toolkits/pkg/str" + "gorm.io/gorm" +) + +type ChartGroup struct { + Id int64 `json:"id" gorm:"primaryKey"` + DashboardId int64 `json:"dashboard_id"` + Name string `json:"name"` + Weight int `json:"weight"` +} + +func (cg *ChartGroup) TableName() string { + return "chart_group" +} + +func (cg *ChartGroup) Verify() error { + if cg.DashboardId <= 0 { + return errors.New("Arg(dashboard_id) invalid") + } + + if str.Dangerous(cg.Name) { + return errors.New("Name has invalid characters") + } + + return nil +} + +func (cg *ChartGroup) Add() error { + if err := cg.Verify(); err != nil { + return err + } + + return Insert(cg) +} + +func (cg *ChartGroup) Update(selectField interface{}, selectFields ...interface{}) error { + if err := cg.Verify(); err != nil { + return err + } + + return DB().Model(cg).Select(selectField, selectFields...).Updates(cg).Error +} + +func (cg *ChartGroup) Del() error { + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Where("group_id=?", cg.Id).Delete(&Chart{}).Error; err != nil { + return err + } + + if err := tx.Where("id=?", cg.Id).Delete(&ChartGroup{}).Error; err != nil { + return err + } + + return nil + }) +} + +func NewDefaultChartGroup(dashId int64) error { + return Insert(&ChartGroup{ + DashboardId: dashId, + Name: "Default chart group", + Weight: 0, + }) +} + +func ChartGroupIdsOf(dashId int64) ([]int64, error) { + var ids []int64 + err := DB().Model(&ChartGroup{}).Where("dashboard_id = ?", dashId).Pluck("id", &ids).Error + return ids, err +} + +func ChartGroupsOf(dashId int64) ([]ChartGroup, error) { + var objs []ChartGroup + err := DB().Where("dashboard_id = ?", dashId).Order("weight").Find(&objs).Error + return objs, err +} diff --git a/src/models/chart_share.go b/src/models/chart_share.go new file mode 100644 index 00000000..69dc002b --- /dev/null +++ b/src/models/chart_share.go @@ -0,0 +1,27 @@ +package models + +type ChartShare struct { + Id int64 `json:"id" gorm:"primaryKey"` + Cluster string `json:"cluster"` + Configs string `json:"configs"` + CreateBy string `json:"create_by"` + CreateAt int64 `json:"create_at"` +} + +func (cs *ChartShare) TableName() string { + return "chart_share" +} + +func (cs *ChartShare) Add() error { + return Insert(cs) +} + +func ChartShareGetsByIds(ids []int64) ([]ChartShare, error) { + var lst []ChartShare + if len(ids) == 0 { + return lst, nil + } + + err := DB().Where("id in ?", ids).Order("id").Find(&lst).Error + return lst, err +} diff --git a/src/models/collect_rule.go b/src/models/collect_rule.go new file mode 100644 index 00000000..e35d557e --- /dev/null +++ b/src/models/collect_rule.go @@ -0,0 +1,212 @@ +package models + +import ( + "encoding/json" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/str" +) + +type CollectRule struct { + Id int64 `json:"id"` + GroupId int64 `json:"group_id"` + Cluster string `json:"cluster"` + TargetIdents string `json:"-"` + TargetIdentsJSON []string `json:"target_idents" gorm:"-"` + TargetTags string `json:"-"` + TargetTagsJSON []string `json:"target_tags" gorm:"-"` + Name string `json:"name"` + Note string `json:"note"` + Step int `json:"step"` + Type string `json:"type"` + Data string `json:"data"` + AppendTags string `json:"-"` + AppendTagsJSON []string `json:"append_tags" gorm:"-"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` +} + +type PortConfig struct { + Port int `json:"port"` + Protocol string `json:"protocol"` // tcp or udp + Timeout int `json:"timeout"` // second +} + +type ProcConfig struct { + Method string `json:"method"` + Param string `json:"param"` +} + +type ScriptConfig struct { + Path string `json:"path"` + Params string `json:"params"` + Stdin string `json:"stdin"` + Env map[string]string `json:"env"` + Timeout int `json:"timeout"` // second +} + +type LogConfig struct { + FilePath string `json:"file_path"` + Func string `json:"func"` + Pattern string `json:"pattern"` + TagsPattern map[string]string `json:"tags_pattern"` +} + +func (cr *CollectRule) TableName() string { + return "collect_rule" +} + +func (cr *CollectRule) FE2DB() { + cr.TargetIdents = strings.Join(cr.TargetIdentsJSON, " ") + cr.TargetTags = strings.Join(cr.TargetTagsJSON, " ") + cr.AppendTags = strings.Join(cr.AppendTagsJSON, " ") +} + +func (cr *CollectRule) DB2FE() { + cr.TargetIdentsJSON = strings.Fields(cr.TargetIdents) + cr.TargetTagsJSON = strings.Fields(cr.TargetTags) + cr.AppendTagsJSON = strings.Fields(cr.AppendTags) +} + +func (cr *CollectRule) Verify() error { + if str.Dangerous(cr.Name) { + return errors.New("Name has invalid characters") + } + + if cr.TargetIdents == "" && cr.TargetTags == "" { + return errors.New("target_idents and target_tags are both blank") + } + + if cr.Step <= 0 { + cr.Step = 15 + } + + if cr.Cluster == "" { + return errors.New("cluster is blank") + } + + switch cr.Type { + case "port": + var conf PortConfig + err := json.Unmarshal([]byte(cr.Data), &conf) + if err != nil { + return err + } + case "script": + var conf ScriptConfig + err := json.Unmarshal([]byte(cr.Data), &conf) + if err != nil { + return err + } + case "log": + var conf LogConfig + err := json.Unmarshal([]byte(cr.Data), &conf) + if err != nil { + return err + } + case "process": + var conf ProcConfig + err := json.Unmarshal([]byte(cr.Data), &conf) + if err != nil { + return err + } + default: + return errors.New("unsupported type") + } + + return nil +} + +func CollectRuleDels(ids []int64, busiGroupId int64) error { + return DB().Where("id in ? and group_id=?", ids, busiGroupId).Delete(&CollectRule{}).Error +} + +func CollectRuleExists(where string, args ...interface{}) (bool, error) { + return Exists(DB().Model(&CollectRule{}).Where(where, args...)) +} + +func CollectRuleGets(groupId int64, typ string) ([]CollectRule, error) { + session := DB().Where("group_id=?", groupId).Order("name") + + if typ != "" { + session = session.Where("type = ?", typ) + } + + var lst []CollectRule + err := session.Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].DB2FE() + } + } + + return lst, err +} + +func CollectRuleGet(where string, args ...interface{}) (*CollectRule, error) { + var lst []*CollectRule + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].DB2FE() + + return lst[0], nil +} + +func CollectRuleGetById(id int64) (*CollectRule, error) { + return CollectRuleGet("id=?", id) +} + +func (cr *CollectRule) Add() error { + if err := cr.Verify(); err != nil { + return err + } + + exists, err := CollectRuleExists("group_id=? and type=? and name=? and cluster=?", cr.GroupId, cr.Type, cr.Name, cr.Cluster) + if err != nil { + return err + } + + if exists { + return errors.New("CollectRule already exists") + } + + now := time.Now().Unix() + cr.CreateAt = now + cr.UpdateAt = now + + return Insert(cr) +} + +func (cr *CollectRule) Update(crf CollectRule) error { + if cr.Name != crf.Name { + exists, err := CollectRuleExists("group_id=? and type=? and name=? and id <> ? and cluster=?", cr.GroupId, cr.Type, crf.Name, cr.Id, cr.Cluster) + if err != nil { + return err + } + + if exists { + return errors.New("CollectRule already exists") + } + } + + crf.FE2DB() + crf.Id = cr.Id + crf.GroupId = cr.GroupId + crf.Type = cr.Type + crf.CreateAt = cr.CreateAt + crf.CreateBy = cr.CreateBy + crf.UpdateAt = time.Now().Unix() + + return DB().Model(cr).Select("*").Updates(crf).Error +} diff --git a/src/models/common.go b/src/models/common.go new file mode 100644 index 00000000..10906db3 --- /dev/null +++ b/src/models/common.go @@ -0,0 +1,42 @@ +package models + +import ( + "github.com/toolkits/pkg/str" + "gorm.io/gorm" + + "github.com/didi/nightingale/v5/src/storage" +) + +func DB() *gorm.DB { + return storage.DB +} + +func Count(tx *gorm.DB) (int64, error) { + var cnt int64 + err := tx.Count(&cnt).Error + return cnt, err +} + +func Exists(tx *gorm.DB) (bool, error) { + num, err := Count(tx) + return num > 0, err +} + +func Insert(obj interface{}) error { + return DB().Create(obj).Error +} + +// CryptoPass crypto password use salt +func CryptoPass(raw string) (string, error) { + salt, err := ConfigsGet("salt") + if err != nil { + return "", err + } + + return str.MD5(salt + "<-*Uk30^96eY*->" + raw), nil +} + +type Statistics struct { + Total int64 `gorm:"total"` + LastUpdated int64 `gorm:"last_updated"` +} diff --git a/models/configs.go b/src/models/configs.go similarity index 56% rename from models/configs.go rename to src/models/configs.go index feee570d..226a8ae3 100644 --- a/models/configs.go +++ b/src/models/configs.go @@ -6,17 +6,21 @@ import ( "os" "time" - "github.com/toolkits/pkg/logger" + "github.com/pkg/errors" "github.com/toolkits/pkg/runner" "github.com/toolkits/pkg/str" ) type Configs struct { - Id int64 + Id int64 `gorm:"primaryKey"` Ckey string Cval string } +func (Configs) TableName() string { + return "configs" +} + // InitSalt generate random salt func InitSalt() { val, err := ConfigsGet("salt") @@ -37,52 +41,44 @@ func InitSalt() { } func ConfigsGet(ckey string) (string, error) { - var obj Configs - has, err := DB.Where("ckey=?", ckey).Get(&obj) + var lst []string + err := DB().Model(&Configs{}).Where("ckey=?", ckey).Pluck("cval", &lst).Error if err != nil { - logger.Errorf("mysql.error: query configs(ckey=%s) fail: %v", ckey, err) - return "", internalServerError + return "", errors.WithMessage(err, "failed to query configs") } - if !has { - return "", nil + if len(lst) > 0 { + return lst[0], nil } - return obj.Cval, nil + return "", nil } func ConfigsSet(ckey, cval string) error { - var obj Configs - has, err := DB.Where("ckey=?", ckey).Get(&obj) + num, err := Count(DB().Model(&Configs{}).Where("ckey=?", ckey)) if err != nil { - logger.Errorf("mysql.error: get configs(ckey=%s) fail: %v", ckey, err) - return internalServerError + return errors.WithMessage(err, "failed to count configs") } - if !has { - _, err = DB.Insert(Configs{ + if num == 0 { + // insert + err = DB().Create(&Configs{ Ckey: ckey, Cval: cval, - }) + }).Error } else { - obj.Cval = cval - _, err = DB.Where("ckey=?", ckey).Cols("cval").Update(obj) + // update + err = DB().Model(&Configs{}).Where("ckey=?", ckey).Update("cval", cval).Error } - if err != nil { - logger.Errorf("mysql.error: set configs(ckey=%s, cval=%s) fail: %v", ckey, cval, err) - return internalServerError - } - - return nil + return err } func ConfigsGets(ckeys []string) (map[string]string, error) { var objs []Configs - err := DB.In("ckey", ckeys).Find(&objs) + err := DB().Where("ckey in ?", ckeys).Find(&objs).Error if err != nil { - logger.Errorf("mysql.error: gets configs fail: %v", err) - return nil, internalServerError + return nil, errors.WithMessage(err, "failed to gets configs") } count := len(ckeys) diff --git a/src/models/dashboard.go b/src/models/dashboard.go new file mode 100644 index 00000000..98f6964a --- /dev/null +++ b/src/models/dashboard.go @@ -0,0 +1,157 @@ +package models + +import ( + "strings" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/str" + "gorm.io/gorm" +) + +type Dashboard struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + Name string `json:"name"` + Tags string `json:"-"` + TagsLst []string `json:"tags" gorm:"-"` + Configs string `json:"configs"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` +} + +func (d *Dashboard) TableName() string { + return "dashboard" +} + +func (d *Dashboard) Verify() error { + if d.Name == "" { + return errors.New("Name is blank") + } + + if str.Dangerous(d.Name) { + return errors.New("Name has invalid characters") + } + + return nil +} + +func (d *Dashboard) Add() error { + if err := d.Verify(); err != nil { + return err + } + + exists, err := DashboardExists("group_id=? and name=?", d.GroupId, d.Name) + if err != nil { + return errors.WithMessage(err, "failed to count dashboard") + } + + if exists { + return errors.New("Dashboard already exists") + } + + now := time.Now().Unix() + d.CreateAt = now + d.UpdateAt = now + + return Insert(d) +} + +func (d *Dashboard) Update(selectField interface{}, selectFields ...interface{}) error { + if err := d.Verify(); err != nil { + return err + } + + return DB().Model(d).Select(selectField, selectFields...).Updates(d).Error +} + +func (d *Dashboard) Del() error { + cgids, err := ChartGroupIdsOf(d.Id) + if err != nil { + return err + } + + if len(cgids) == 0 { + return nil + } + + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Where("group_id in ?", cgids).Delete(&Chart{}).Error; err != nil { + return err + } + + if err := tx.Where("dashboard_id=?", d.Id).Delete(&ChartGroup{}).Error; err != nil { + return err + } + + if err := tx.Where("id=?", d.Id).Delete(&Dashboard{}).Error; err != nil { + return err + } + + return nil + }) +} + +func DashboardGet(where string, args ...interface{}) (*Dashboard, error) { + var lst []*Dashboard + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].TagsLst = strings.Fields(lst[0].Tags) + + return lst[0], nil +} + +func DashboardCount(where string, args ...interface{}) (num int64, err error) { + return Count(DB().Model(&Dashboard{}).Where(where, args...)) +} + +func DashboardExists(where string, args ...interface{}) (bool, error) { + num, err := DashboardCount(where, args...) + return num > 0, err +} + +func DashboardGets(groupId int64, query string) ([]Dashboard, error) { + session := DB().Where("group_id=?", groupId).Order("name") + + arr := strings.Fields(query) + if len(arr) > 0 { + for i := 0; i < len(arr); i++ { + if strings.HasPrefix(arr[i], "-") { + q := "%" + arr[i][1:] + "%" + session = session.Where("name not like ? and tags not like ?", q, q) + } else { + q := "%" + arr[i] + "%" + session = session.Where("(name like ? or tags like ?)", q, q) + } + } + } + + var objs []Dashboard + err := session.Select("id", "group_id", "name", "tags", "create_at", "create_by", "update_at", "update_by").Find(&objs).Error + if err == nil { + for i := 0; i < len(objs); i++ { + objs[i].TagsLst = strings.Fields(objs[i].Tags) + } + } + + return objs, err +} + +func DashboardGetsByIds(ids []int64) ([]Dashboard, error) { + if len(ids) == 0 { + return []Dashboard{}, nil + } + + var lst []Dashboard + err := DB().Where("id in ?", ids).Order("name").Find(&lst).Error + return lst, err +} diff --git a/src/models/metric_description.go b/src/models/metric_description.go new file mode 100644 index 00000000..7dd13e25 --- /dev/null +++ b/src/models/metric_description.go @@ -0,0 +1,138 @@ +package models + +import ( + "strings" + "time" +) + +type MetricDescription struct { + Id int64 `json:"id"` + Metric string `json:"metric"` + Description string `json:"description"` + UpdateAt int64 `json:"update_at"` +} + +func (md *MetricDescription) TableName() string { + return "metric_description" +} + +func MetricDescriptionUpdate(mds []MetricDescription) error { + now := time.Now().Unix() + + for i := 0; i < len(mds); i++ { + mds[i].Metric = strings.TrimSpace(mds[i].Metric) + md, err := MetricDescriptionGet("metric = ?", mds[i].Metric) + if err != nil { + return err + } + + if md == nil { + // insert + mds[i].UpdateAt = now + err = Insert(&mds[i]) + if err != nil { + return err + } + } else { + // update + err = md.Update(mds[i].Description, now) + if err != nil { + return err + } + } + } + return nil +} + +func (md *MetricDescription) Update(desn string, now int64) error { + md.Description = desn + md.UpdateAt = now + return DB().Model(md).Select("Description", "UpdateAt").Updates(md).Error +} + +func MetricDescriptionGet(where string, args ...interface{}) (*MetricDescription, error) { + var lst []*MetricDescription + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + return lst[0], nil +} + +func MetricDescriptionTotal(query string) (int64, error) { + session := DB().Model(&MetricDescription{}) + + if query != "" { + q := "%" + query + "%" + session = session.Where("metric like ? or description like ?", q, q) + } + + return Count(session) +} + +func MetricDescriptionGets(query string, limit, offset int) ([]MetricDescription, error) { + session := DB().Order("metric").Limit(limit).Offset(offset) + if query != "" { + q := "%" + query + "%" + session = session.Where("metric like ? or description like ?", q, q) + } + + var objs []MetricDescription + err := session.Find(&objs).Error + return objs, err +} + +func MetricDescGetAll() ([]MetricDescription, error) { + var objs []MetricDescription + err := DB().Find(&objs).Error + return objs, err +} + +func MetricDescStatistics() (*Statistics, error) { + session := DB().Model(&MetricDescription{}).Select("count(*) as total", "max(update_at) as last_updated") + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} + +func MetricDescriptionMapper(metrics []string) (map[string]string, error) { + if len(metrics) == 0 { + return map[string]string{}, nil + } + + var objs []MetricDescription + err := DB().Where("metric in ?", metrics).Find(&objs).Error + if err != nil { + return nil, err + } + + count := len(objs) + if count == 0 { + return map[string]string{}, nil + } + + mapper := make(map[string]string, count) + for i := 0; i < count; i++ { + mapper[objs[i].Metric] = objs[i].Description + } + + return mapper, nil +} + +func MetricDescriptionDel(ids []int64) error { + if len(ids) == 0 { + return nil + } + + return DB().Where("id in ?", ids).Delete(new(MetricDescription)).Error +} diff --git a/models/role.go b/src/models/role.go similarity index 60% rename from models/role.go rename to src/models/role.go index 7a854891..00579363 100644 --- a/models/role.go +++ b/src/models/role.go @@ -1,9 +1,11 @@ package models -import "github.com/toolkits/pkg/logger" +import ( + "github.com/pkg/errors" +) type Role struct { - Id int64 `json:"id"` + Id int64 `json:"id" gorm:"primaryKey"` Name string `json:"name"` Note string `json:"note"` } @@ -14,10 +16,9 @@ func (Role) TableName() string { func RoleGets(where string, args ...interface{}) ([]Role, error) { var objs []Role - err := DB.Where(where, args...).OrderBy("name").Find(&objs) + err := DB().Where(where, args...).Order("name").Find(&objs).Error if err != nil { - logger.Errorf("mysql.error: list role fail: %v", err) - return objs, internalServerError + return nil, errors.WithMessage(err, "failed to query roles") } return objs, nil } diff --git a/src/models/role_operation.go b/src/models/role_operation.go new file mode 100644 index 00000000..10f27558 --- /dev/null +++ b/src/models/role_operation.go @@ -0,0 +1,35 @@ +package models + +import ( + "github.com/didi/nightingale/v5/src/webapi/config" + "github.com/toolkits/pkg/slice" +) + +type RoleOperation struct { + RoleName string + Operation string +} + +func (RoleOperation) TableName() string { + return "role_operation" +} + +func RoleHasOperation(roles []string, operation string) (bool, error) { + if len(roles) == 0 { + return false, nil + } + + return Exists(DB().Model(&RoleOperation{}).Where("operation = ? and role_name in ?", operation, roles)) +} + +func OperationsOfRole(roles []string) ([]string, error) { + session := DB().Model(&RoleOperation{}).Select("distinct(operation) as operation") + + if !slice.ContainsString(roles, config.C.AdminRole) { + session = session.Where("role_name in ?", roles) + } + + var ret []string + err := session.Pluck("operation", &ret).Error + return ret, err +} diff --git a/src/models/target.go b/src/models/target.go new file mode 100644 index 00000000..5e94fd0c --- /dev/null +++ b/src/models/target.go @@ -0,0 +1,269 @@ +package models + +import ( + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "gorm.io/gorm" +) + +type Target struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + GroupObj *BusiGroup `json:"group_obj" gorm:"-"` + Cluster string `json:"cluster"` + Ident string `json:"ident"` + Note string `json:"note"` + Tags string `json:"-"` + TagsJSON []string `json:"tags" gorm:"-"` + TagsMap map[string]string `json:"-" gorm:"-"` // internal use, append tags to series + UpdateAt int64 `json:"update_at"` +} + +func (t *Target) TableName() string { + return "target" +} + +func (t *Target) Add() error { + obj, err := TargetGet("ident = ?", t.Ident) + if err != nil { + return err + } + + if obj == nil { + return Insert(t) + } + + return nil +} + +func (t *Target) FillGroup(cache map[int64]*BusiGroup) error { + if t.GroupId <= 0 { + return nil + } + + bg, has := cache[t.GroupId] + if has { + t.GroupObj = bg + return nil + } + + bg, err := BusiGroupGetById(t.GroupId) + if err != nil { + return errors.WithMessage(err, "failed to get busi group") + } + + t.GroupObj = bg + cache[t.GroupId] = bg + return nil +} + +func TargetStatistics(cluster string) (*Statistics, error) { + session := DB().Model(&Target{}).Select("count(*) as total", "max(update_at) as last_updated") + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} + +func TargetDel(idents []string) error { + if len(idents) == 0 { + panic("idents empty") + } + return DB().Where("ident in ?", idents).Delete(new(Target)).Error +} + +func buildTargetWhere(bgid int64, clusters []string, query string) *gorm.DB { + session := DB().Model(&Target{}) + + if bgid >= 0 { + session = session.Where("group_id=?", bgid) + } + + if len(clusters) > 0 { + session = session.Where("cluster in ?", clusters) + } + + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + q := "%" + arr[i] + "%" + session = session.Where("ident like ? or note like ? or tags like ?", q, q, q) + } + } + + return session +} + +func TargetTotal(bgid int64, clusters []string, query string) (int64, error) { + return Count(buildTargetWhere(bgid, clusters, query)) +} + +func TargetGets(bgid int64, clusters []string, query string, limit, offset int) ([]*Target, error) { + var lst []*Target + err := buildTargetWhere(bgid, clusters, query).Order("ident").Limit(limit).Offset(offset).Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].TagsJSON = strings.Fields(lst[i].Tags) + } + } + return lst, err +} + +func TargetGetsByCluster(cluster string) ([]*Target, error) { + session := DB().Model(&Target{}) + if cluster != "" { + session = session.Where("cluster = ?", cluster) + } + + var lst []*Target + err := session.Find(&lst).Error + return lst, err +} + +func TargetUpdateNote(idents []string, note string) error { + return DB().Model(&Target{}).Where("ident in ?", idents).Updates(map[string]interface{}{ + "note": note, + "update_at": time.Now().Unix(), + }).Error +} + +func TargetUpdateBgid(idents []string, bgid int64, clearTags bool) error { + fields := map[string]interface{}{ + "group_id": bgid, + "update_at": time.Now().Unix(), + } + + if clearTags { + fields["tags"] = "" + } + + return DB().Model(&Target{}).Where("ident in ?", idents).Updates(fields).Error +} + +func TargetGet(where string, args ...interface{}) (*Target, error) { + var lst []*Target + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].TagsJSON = strings.Fields(lst[0].Tags) + + return lst[0], nil +} + +func TargetGetById(id int64) (*Target, error) { + return TargetGet("id = ?", id) +} + +func TargetGetByIdent(ident string) (*Target, error) { + return TargetGet("ident = ?", ident) +} + +func TargetGetTags(idents []string) ([]string, error) { + if len(idents) == 0 { + return []string{}, nil + } + + var arr []string + err := DB().Model(new(Target)).Where("ident in ?", idents).Select("distinct(tags) as tags").Pluck("tags", &arr).Error + if err != nil { + return nil, err + } + + cnt := len(arr) + if cnt == 0 { + return []string{}, nil + } + + set := make(map[string]struct{}) + for i := 0; i < cnt; i++ { + tags := strings.Fields(arr[i]) + for j := 0; j < len(tags); j++ { + set[tags[j]] = struct{}{} + } + } + + cnt = len(set) + ret := make([]string, 0, cnt) + for key := range set { + ret = append(ret, key) + } + + sort.Strings(ret) + + return ret, err +} + +func (t *Target) AddTags(tags []string) error { + for i := 0; i < len(tags); i++ { + if -1 == strings.Index(t.Tags, tags[i]+" ") { + t.Tags += tags[i] + " " + } + } + + arr := strings.Fields(t.Tags) + sort.Strings(arr) + + return DB().Model(t).Updates(map[string]interface{}{ + "tags": strings.Join(arr, " ") + " ", + "update_at": time.Now().Unix(), + }).Error +} + +func (t *Target) DelTags(tags []string) error { + for i := 0; i < len(tags); i++ { + t.Tags = strings.ReplaceAll(t.Tags, tags[i]+" ", "") + } + + return DB().Model(t).Updates(map[string]interface{}{ + "tags": t.Tags, + "update_at": time.Now().Unix(), + }).Error +} + +func TargetIdents(ids []int64) ([]string, error) { + var ret []string + + if len(ids) == 0 { + return ret, nil + } + + err := DB().Model(&Target{}).Where("id in ?", ids).Pluck("ident", &ret).Error + return ret, err +} + +func TargetIds(idents []string) ([]int64, error) { + var ret []int64 + + if len(idents) == 0 { + return ret, nil + } + + err := DB().Model(&Target{}).Where("ident in ?", idents).Pluck("id", &ret).Error + return ret, err +} + +func IdentsFilter(idents []string, where string, args ...interface{}) ([]string, error) { + var arr []string + if len(idents) == 0 { + return arr, nil + } + + err := DB().Model(&Target{}).Where("ident in ?", idents).Where(where, args...).Pluck("ident", &arr).Error + return arr, err +} diff --git a/src/models/task_record.go b/src/models/task_record.go new file mode 100644 index 00000000..2a5812f0 --- /dev/null +++ b/src/models/task_record.go @@ -0,0 +1,64 @@ +package models + +type TaskRecord struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + IbexAddress string `json:"ibex_address"` + IbexAuthUser string `json:"ibex_auth_user"` + IbexAuthPass string `json:"ibex_auth_pass"` + Title string `json:"title"` + Account string `json:"account"` + Batch int `json:"batch"` + Tolerance int `json:"tolerance"` + Timeout int `json:"timeout"` + Pause string `json:"pause"` + Script string `json:"script"` + Args string `json:"args"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` +} + +func (r *TaskRecord) TableName() string { + return "task_record" +} + +// create task +func (r *TaskRecord) Add() error { + return Insert(r) +} + +// list task, filter by group_id, create_by +func TaskRecordTotal(bgid, beginTime int64, createBy, query string) (int64, error) { + session := DB().Model(new(TaskRecord)).Where("create_at > ? and group_id = ?", beginTime, bgid) + + if createBy != "" { + session = session.Where("create_by = ?", createBy) + } + + if query != "" { + session = session.Where("title like ?", "%"+query+"%") + } + + return Count(session) +} + +func TaskRecordGets(bgid, beginTime int64, createBy, query string, limit, offset int) ([]*TaskRecord, error) { + session := DB().Where("create_at > ? and group_id = ?", beginTime, bgid).Order("create_at desc").Limit(limit).Offset(offset) + + if createBy != "" { + session = session.Where("create_by = ?", createBy) + } + + if query != "" { + session = session.Where("title like ?", "%"+query+"%") + } + + var lst []*TaskRecord + err := session.Find(&lst).Error + return lst, err +} + +// update is_done field +func (r *TaskRecord) UpdateIsDone(isDone int) error { + return DB().Model(r).Update("is_done", isDone).Error +} diff --git a/src/models/task_tpl.go b/src/models/task_tpl.go new file mode 100644 index 00000000..53ed0dc5 --- /dev/null +++ b/src/models/task_tpl.go @@ -0,0 +1,294 @@ +package models + +import ( + "errors" + "fmt" + "sort" + "strings" + "time" + + "github.com/toolkits/pkg/str" + "gorm.io/gorm" +) + +type TaskTpl struct { + Id int64 `json:"id" gorm:"primaryKey"` + GroupId int64 `json:"group_id"` + Title string `json:"title"` + Batch int `json:"batch"` + Tolerance int `json:"tolerance"` + Timeout int `json:"timeout"` + Pause string `json:"pause"` + Script string `json:"script"` + Args string `json:"args"` + Tags string `json:"-"` + TagsJSON []string `json:"tags" gorm:"-"` + Account string `json:"account"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` +} + +func (t *TaskTpl) TableName() string { + return "task_tpl" +} + +func TaskTplTotal(groupId int64, query string) (int64, error) { + session := DB().Model(&TaskTpl{}).Where("group_id = ?", groupId) + if query == "" { + return Count(session) + } + + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + arg := "%" + arr[i] + "%" + session = session.Where("title like ? or tags like ?", arg, arg) + } + + return Count(session) +} + +func TaskTplGets(groupId int64, query string, limit, offset int) ([]TaskTpl, error) { + session := DB().Where("group_id = ?", groupId).Order("title").Limit(limit).Offset(offset) + + var tpls []TaskTpl + if query != "" { + arr := strings.Fields(query) + for i := 0; i < len(arr); i++ { + arg := "%" + arr[i] + "%" + session = session.Where("title like ? or tags like ?", arg, arg) + } + } + + err := session.Find(&tpls).Error + if err == nil { + for i := 0; i < len(tpls); i++ { + tpls[i].TagsJSON = strings.Fields(tpls[i].Tags) + } + } + + return tpls, err +} + +func TaskTplGet(where string, args ...interface{}) (*TaskTpl, error) { + var arr []*TaskTpl + err := DB().Where(where, args...).Find(&arr).Error + if err != nil { + return nil, err + } + + if len(arr) == 0 { + return nil, nil + } + + arr[0].TagsJSON = strings.Fields(arr[0].Tags) + + return arr[0], nil +} + +func (t *TaskTpl) CleanFields() error { + if t.Batch < 0 { + return errors.New("arg(batch) should be nonnegative") + } + + if t.Tolerance < 0 { + return errors.New("arg(tolerance) should be nonnegative") + } + + if t.Timeout < 0 { + return errors.New("arg(timeout) should be nonnegative") + } + + if t.Timeout == 0 { + t.Timeout = 30 + } + + if t.Timeout > 3600*24 { + return errors.New("arg(timeout) longer than one day") + } + + t.Pause = strings.Replace(t.Pause, ",", ",", -1) + t.Pause = strings.Replace(t.Pause, " ", "", -1) + t.Args = strings.Replace(t.Args, ",", ",", -1) + t.Tags = strings.Replace(t.Tags, ",", ",", -1) + + if t.Title == "" { + return errors.New("arg(title) is required") + } + + if str.Dangerous(t.Title) { + return errors.New("arg(title) is dangerous") + } + + if t.Script == "" { + return errors.New("arg(script) is required") + } + + if str.Dangerous(t.Args) { + return errors.New("arg(args) is dangerous") + } + + if str.Dangerous(t.Pause) { + return errors.New("arg(pause) is dangerous") + } + + if str.Dangerous(t.Tags) { + return errors.New("arg(tags) is dangerous") + } + + return nil +} + +func (t *TaskTpl) Save(hosts []string) error { + if err := t.CleanFields(); err != nil { + return err + } + + cnt, err := Count(DB().Model(&TaskTpl{}).Where("group_id=? and title=?", t.GroupId, t.Title)) + if err != nil { + return err + } + + if cnt > 0 { + return fmt.Errorf("task template already exists") + } + + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Create(t).Error; err != nil { + return err + } + + for i := 0; i < len(hosts); i++ { + host := strings.TrimSpace(hosts[i]) + if host == "" { + continue + } + + err := tx.Table("task_tpl_host").Create(map[string]interface{}{ + "id": t.Id, + "host": host, + }).Error + + if err != nil { + return err + } + } + + return nil + }) +} + +func (t *TaskTpl) Hosts() ([]string, error) { + var arr []string + err := DB().Table("task_tpl_host").Where("id=?", t.Id).Order("ii").Pluck("host", &arr).Error + return arr, err +} + +func (t *TaskTpl) Update(hosts []string) error { + if err := t.CleanFields(); err != nil { + return err + } + + cnt, err := Count(DB().Model(&TaskTpl{}).Where("group_id=? and title=? and id <> ?", t.GroupId, t.Title, t.Id)) + if err != nil { + return err + } + + if cnt > 0 { + return fmt.Errorf("task template already exists") + } + + return DB().Transaction(func(tx *gorm.DB) error { + err := tx.Model(t).Updates(map[string]interface{}{ + "title": t.Title, + "batch": t.Batch, + "tolerance": t.Tolerance, + "timeout": t.Timeout, + "pause": t.Pause, + "script": t.Script, + "args": t.Args, + "tags": t.Tags, + "account": t.Account, + "update_by": t.UpdateBy, + "update_at": t.UpdateAt, + }).Error + + if err != nil { + return err + } + + if err = tx.Exec("DELETE FROM task_tpl_host WHERE id = ?", t.Id).Error; err != nil { + return err + } + + for i := 0; i < len(hosts); i++ { + host := strings.TrimSpace(hosts[i]) + if host == "" { + continue + } + + err := tx.Table("task_tpl_host").Create(map[string]interface{}{ + "id": t.Id, + "host": host, + }).Error + + if err != nil { + return err + } + } + + return nil + }) +} + +func (t *TaskTpl) Del() error { + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Exec("DELETE FROM task_tpl_host WHERE id=?", t.Id).Error; err != nil { + return err + } + + if err := tx.Delete(t).Error; err != nil { + return err + } + + return nil + }) +} + +func (t *TaskTpl) AddTags(tags []string, updateBy string) error { + for i := 0; i < len(tags); i++ { + if -1 == strings.Index(t.Tags, tags[i]+" ") { + t.Tags += tags[i] + " " + } + } + + arr := strings.Fields(t.Tags) + sort.Strings(arr) + + return DB().Model(t).Updates(map[string]interface{}{ + "tags": strings.Join(arr, " ") + " ", + "update_by": updateBy, + "update_at": time.Now().Unix(), + }).Error +} + +func (t *TaskTpl) DelTags(tags []string, updateBy string) error { + for i := 0; i < len(tags); i++ { + t.Tags = strings.ReplaceAll(t.Tags, tags[i]+" ", "") + } + + return DB().Model(t).Updates(map[string]interface{}{ + "tags": t.Tags, + "update_by": updateBy, + "update_at": time.Now().Unix(), + }).Error +} + +func (t *TaskTpl) UpdateGroup(groupId int64, updateBy string) error { + return DB().Model(t).Updates(map[string]interface{}{ + "group_id": groupId, + "update_by": updateBy, + "update_at": time.Now().Unix(), + }).Error +} diff --git a/src/models/user.go b/src/models/user.go new file mode 100644 index 00000000..094c34f6 --- /dev/null +++ b/src/models/user.go @@ -0,0 +1,459 @@ +package models + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/slice" + "github.com/toolkits/pkg/str" + "gorm.io/gorm" + + "github.com/didi/nightingale/v5/src/pkg/ldapx" + "github.com/didi/nightingale/v5/src/pkg/ormx" + "github.com/didi/nightingale/v5/src/webapi/config" +) + +type User struct { + Id int64 `json:"id" gorm:"primaryKey"` + Username string `json:"username"` + Nickname string `json:"nickname"` + Password string `json:"-"` + Phone string `json:"phone"` + Email string `json:"email"` + Portrait string `json:"portrait"` + Roles string `json:"-"` // 这个字段写入数据库 + RolesLst []string `json:"roles" gorm:"-"` // 这个字段和前端交互 + Contacts ormx.JSONObj `json:"contacts"` // 内容为 map[string]string 结构 + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` + Admin bool `json:"admin" gorm:"-"` // 方便前端使用 +} + +func (u *User) TableName() string { + return "user" +} + +func (u *User) IsAdmin() bool { + for i := 0; i < len(u.RolesLst); i++ { + if u.RolesLst[i] == config.C.AdminRole { + return true + } + } + return false +} + +func (u *User) Verify() error { + u.Username = strings.TrimSpace(u.Username) + + if u.Username == "" { + return errors.New("Username is blank") + } + + if str.Dangerous(u.Username) { + return errors.New("Username has invalid characters") + } + + if str.Dangerous(u.Nickname) { + return errors.New("Nickname has invalid characters") + } + + if u.Phone != "" && !str.IsPhone(u.Phone) { + return errors.New("Phone invalid") + } + + if u.Email != "" && !str.IsMail(u.Email) { + return errors.New("Email invalid") + } + + return nil +} + +func (u *User) Add() error { + user, err := UserGetByUsername(u.Username) + if err != nil { + return errors.WithMessage(err, "failed to query user") + } + + if user != nil { + return errors.New("Username already exists") + } + + now := time.Now().Unix() + u.CreateAt = now + u.UpdateAt = now + return Insert(u) +} + +func (u *User) UpdateAllFields() error { + if err := u.Verify(); err != nil { + return err + } + + u.UpdateAt = time.Now().Unix() + return DB().Model(u).Select("*").Updates(u).Error +} + +func (u *User) UpdatePassword(password, updateBy string) error { + return DB().Model(u).Updates(map[string]interface{}{ + "password": password, + "update_at": time.Now().Unix(), + "update_by": updateBy, + }).Error +} + +func (u *User) Del() error { + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Where("user_id=?", u.Id).Delete(&UserGroupMember{}).Error; err != nil { + return err + } + + if err := tx.Where("id=?", u.Id).Delete(&User{}).Error; err != nil { + return err + } + + return nil + }) +} + +func (u *User) ChangePassword(oldpass, newpass string) error { + _oldpass, err := CryptoPass(oldpass) + if err != nil { + return err + } + + _newpass, err := CryptoPass(newpass) + if err != nil { + return err + } + + if u.Password != _oldpass { + return errors.New("Incorrect old password") + } + + return u.UpdatePassword(_newpass, u.Username) +} + +func UserGet(where string, args ...interface{}) (*User, error) { + var lst []*User + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + lst[0].RolesLst = strings.Fields(lst[0].Roles) + lst[0].Admin = lst[0].IsAdmin() + + return lst[0], nil +} + +func UserGetByUsername(username string) (*User, error) { + return UserGet("username=?", username) +} + +func UserGetById(id int64) (*User, error) { + return UserGet("id=?", id) +} + +func InitRoot() { + user, err := UserGetByUsername("root") + if err != nil { + fmt.Println("failed to query user root:", err) + os.Exit(1) + } + + if user == nil { + return + } + + if len(user.Password) > 31 { + // already done before + return + } + + newPass, err := CryptoPass(user.Password) + if err != nil { + fmt.Println("failed to crypto pass:", err) + os.Exit(1) + } + + err = DB().Model(user).Update("password", newPass).Error + if err != nil { + fmt.Println("failed to update root password:", err) + os.Exit(1) + } + + fmt.Println("root password init done") +} + +func PassLogin(username, pass string) (*User, error) { + user, err := UserGetByUsername(username) + if err != nil { + return nil, err + } + + if user == nil { + return nil, fmt.Errorf("Username or password invalid") + } + + loginPass, err := CryptoPass(pass) + if err != nil { + return nil, err + } + + if loginPass != user.Password { + return nil, fmt.Errorf("Username or password invalid") + } + + return user, nil +} + +func LdapLogin(username, pass string) (*User, error) { + sr, err := ldapx.LdapReq(username, pass) + if err != nil { + return nil, err + } + + user, err := UserGetByUsername(username) + if err != nil { + return nil, err + } + + if user == nil { + // default user settings + user = &User{ + Username: username, + Nickname: username, + } + } + + // copy attributes from ldap + attrs := ldapx.LDAP.Attributes + if attrs.Nickname != "" { + user.Nickname = sr.Entries[0].GetAttributeValue(attrs.Nickname) + } + if attrs.Email != "" { + user.Email = sr.Entries[0].GetAttributeValue(attrs.Email) + } + if attrs.Phone != "" { + user.Phone = sr.Entries[0].GetAttributeValue(attrs.Phone) + } + + if user.Id > 0 { + if ldapx.LDAP.CoverAttributes { + err := DB().Updates(user).Error + if err != nil { + return nil, errors.WithMessage(err, "failed to update user") + } + } + return user, nil + } + + now := time.Now().Unix() + + user.Password = "******" + user.Portrait = "" + user.Roles = "Standard" + user.RolesLst = []string{"Standard"} + user.Contacts = []byte("{}") + user.CreateAt = now + user.UpdateAt = now + user.CreateBy = "ldap" + user.UpdateBy = "ldap" + + err = DB().Create(user).Error + return user, err +} + +func UserTotal(query string) (num int64, err error) { + if query != "" { + q := "%" + query + "%" + num, err = Count(DB().Model(&User{}).Where("username like ? or nickname like ? or phone like ? or email like ?", q, q, q, q)) + } else { + num, err = Count(DB().Model(&User{})) + } + + if err != nil { + return num, errors.WithMessage(err, "failed to count user") + } + + return num, nil +} + +func UserGets(query string, limit, offset int) ([]User, error) { + session := DB().Limit(limit).Offset(offset).Order("username") + if query != "" { + q := "%" + query + "%" + session = session.Where("username like ? or nickname like ? or phone like ? or email like ?", q, q, q, q) + } + + var users []User + err := session.Find(&users).Error + if err != nil { + return users, errors.WithMessage(err, "failed to query user") + } + + for i := 0; i < len(users); i++ { + users[i].RolesLst = strings.Fields(users[i].Roles) + users[i].Admin = users[i].IsAdmin() + } + + return users, nil +} + +func UserGetAll() ([]*User, error) { + var lst []*User + err := DB().Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].RolesLst = strings.Fields(lst[i].Roles) + lst[i].Admin = lst[i].IsAdmin() + } + } + return lst, err +} + +func UserGetsByIds(ids []int64) ([]User, error) { + if len(ids) == 0 { + return []User{}, nil + } + + var lst []User + err := DB().Where("id in ?", ids).Order("username").Find(&lst).Error + if err == nil { + for i := 0; i < len(lst); i++ { + lst[i].RolesLst = strings.Fields(lst[i].Roles) + lst[i].Admin = lst[i].IsAdmin() + } + } + + return lst, err +} + +func (u *User) CanModifyUserGroup(ug *UserGroup) (bool, error) { + // 我是管理员,自然可以 + if u.IsAdmin() { + return true, nil + } + + // 我是创建者,自然可以 + if ug.CreateBy == u.Username { + return true, nil + } + + // 我是成员,也可以吧,简单搞 + num, err := UserGroupMemberCount("user_id=? and group_id=?", u.Id, ug.Id) + if err != nil { + return false, err + } + + return num > 0, nil +} + +func (u *User) CanDoBusiGroup(bg *BusiGroup, permFlag ...string) (bool, error) { + if u.IsAdmin() { + return true, nil + } + + // 我在任意一个UserGroup里,就有权限 + ugids, err := UserGroupIdsOfBusiGroup(bg.Id, permFlag...) + if err != nil { + return false, err + } + + if len(ugids) == 0 { + return false, nil + } + + num, err := UserGroupMemberCount("user_id = ? and group_id in ?", u.Id, ugids) + return num > 0, err +} + +func (u *User) CheckPerm(operation string) (bool, error) { + if u.IsAdmin() { + return true, nil + } + + return RoleHasOperation(u.RolesLst, operation) +} + +func UserStatistics() (*Statistics, error) { + session := DB().Model(&User{}).Select("count(*) as total", "max(update_at) as last_updated") + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} + +func (u *User) NopriIdents(idents []string) ([]string, error) { + if u.IsAdmin() { + return []string{}, nil + } + + ugids, err := MyGroupIds(u.Id) + if err != nil { + return []string{}, err + } + + if len(ugids) == 0 { + return idents, nil + } + + bgids, err := BusiGroupIds(ugids, "rw") + if err != nil { + return []string{}, err + } + + if len(bgids) == 0 { + return idents, nil + } + + var arr []string + err = DB().Model(&Target{}).Where("group_id in ?", bgids).Pluck("ident", &arr).Error + if err != nil { + return []string{}, err + } + + return slice.SubString(idents, arr), nil +} + +// 我是管理员,返回所有 +// 或者我是成员 +func (u *User) BusiGroups(limit int, query string) ([]BusiGroup, error) { + session := DB().Order("name").Limit(limit) + + var lst []BusiGroup + if u.IsAdmin() { + err := session.Where("name like ?", "%"+query+"%").Find(&lst).Error + return lst, err + } + + userGroupIds, err := MyGroupIds(u.Id) + if err != nil { + return nil, errors.WithMessage(err, "failed to get MyGroupIds") + } + + busiGroupIds, err := BusiGroupIds(userGroupIds) + if err != nil { + return nil, errors.WithMessage(err, "failed to get BusiGroupIds") + } + + if len(busiGroupIds) == 0 { + return lst, nil + } + + err = session.Where("id in ?", busiGroupIds).Where("name like ?", "%"+query+"%").Find(&lst).Error + return lst, err +} diff --git a/src/models/user_group.go b/src/models/user_group.go new file mode 100644 index 00000000..15de67f7 --- /dev/null +++ b/src/models/user_group.go @@ -0,0 +1,166 @@ +package models + +import ( + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/str" + "gorm.io/gorm" +) + +type UserGroup struct { + Id int64 `json:"id" gorm:"primaryKey"` + Name string `json:"name"` + Note string `json:"note"` + CreateAt int64 `json:"create_at"` + CreateBy string `json:"create_by"` + UpdateAt int64 `json:"update_at"` + UpdateBy string `json:"update_by"` + UserIds []int64 `json:"-" gorm:"-"` +} + +func (ug *UserGroup) TableName() string { + return "user_group" +} + +func (ug *UserGroup) Verify() error { + if str.Dangerous(ug.Name) { + return errors.New("Name has invalid characters") + } + + if str.Dangerous(ug.Note) { + return errors.New("Note has invalid characters") + } + + return nil +} + +func (ug *UserGroup) Update(selectField interface{}, selectFields ...interface{}) error { + if err := ug.Verify(); err != nil { + return err + } + + return DB().Model(ug).Select(selectField, selectFields...).Updates(ug).Error +} + +func UserGroupCount(where string, args ...interface{}) (num int64, err error) { + return Count(DB().Model(&UserGroup{}).Where(where, args...)) +} + +func (ug *UserGroup) Add() error { + if err := ug.Verify(); err != nil { + return err + } + + num, err := UserGroupCount("name=?", ug.Name) + if err != nil { + return errors.WithMessage(err, "failed to count user-groups") + } + + if num > 0 { + return errors.New("UserGroup already exists") + } + + now := time.Now().Unix() + ug.CreateAt = now + ug.UpdateAt = now + return Insert(ug) +} + +func (ug *UserGroup) Del() error { + return DB().Transaction(func(tx *gorm.DB) error { + if err := tx.Where("group_id=?", ug.Id).Delete(&UserGroupMember{}).Error; err != nil { + return err + } + + if err := tx.Where("id=?", ug.Id).Delete(&UserGroup{}).Error; err != nil { + return err + } + + return nil + }) +} + +func GroupsOf(u *User) ([]UserGroup, error) { + ids, err := MyGroupIds(u.Id) + if err != nil { + return nil, errors.WithMessage(err, "failed to get MyGroupIds") + } + + session := DB().Where("create_by = ?", u.Username) + if len(ids) > 0 { + session = session.Or("id in ?", ids) + } + + var lst []UserGroup + err = session.Order("name").Find(&lst).Error + return lst, err +} + +func UserGroupGet(where string, args ...interface{}) (*UserGroup, error) { + var lst []*UserGroup + err := DB().Where(where, args...).Find(&lst).Error + if err != nil { + return nil, err + } + + if len(lst) == 0 { + return nil, nil + } + + return lst[0], nil +} + +func UserGroupGetById(id int64) (*UserGroup, error) { + return UserGroupGet("id = ?", id) +} + +func UserGroupGetByIds(ids []int64) ([]UserGroup, error) { + var lst []UserGroup + if len(ids) == 0 { + return lst, nil + } + + err := DB().Where("id in ?", ids).Order("name").Find(&lst).Error + return lst, err +} + +func UserGroupGetAll() ([]*UserGroup, error) { + var lst []*UserGroup + err := DB().Find(&lst).Error + return lst, err +} + +func (ug *UserGroup) AddMembers(userIds []int64) error { + count := len(userIds) + for i := 0; i < count; i++ { + user, err := UserGetById(userIds[i]) + if err != nil { + return err + } + if user == nil { + continue + } + err = UserGroupMemberAdd(ug.Id, user.Id) + if err != nil { + return err + } + } + return nil +} + +func (ug *UserGroup) DelMembers(userIds []int64) error { + return UserGroupMemberDel(ug.Id, userIds) +} + +func UserGroupStatistics() (*Statistics, error) { + session := DB().Model(&UserGroup{}).Select("count(*) as total", "max(update_at) as last_updated") + + var stats []*Statistics + err := session.Find(&stats).Error + if err != nil { + return nil, err + } + + return stats[0], nil +} diff --git a/models/user_group_member.go b/src/models/user_group_member.go similarity index 52% rename from models/user_group_member.go rename to src/models/user_group_member.go index daddca5a..8d536568 100644 --- a/models/user_group_member.go +++ b/src/models/user_group_member.go @@ -1,9 +1,5 @@ package models -import ( - "github.com/toolkits/pkg/logger" -) - type UserGroupMember struct { GroupId int64 UserId int64 @@ -13,19 +9,20 @@ func (UserGroupMember) TableName() string { return "user_group_member" } -func UserGroupMemberGetAll() ([]UserGroupMember, error) { - var objs []UserGroupMember - err := DB.Find(&objs) - return objs, err +func MyGroupIds(userId int64) ([]int64, error) { + var ids []int64 + err := DB().Model(&UserGroupMember{}).Where("user_id=?", userId).Pluck("group_id", &ids).Error + return ids, err +} + +func MemberIds(groupId int64) ([]int64, error) { + var ids []int64 + err := DB().Model(&UserGroupMember{}).Where("group_id=?", groupId).Pluck("user_id", &ids).Error + return ids, err } func UserGroupMemberCount(where string, args ...interface{}) (int64, error) { - num, err := DB.Where(where, args...).Count(new(UserGroupMember)) - if err != nil { - logger.Errorf("mysql.error: count user_group_member(where=%s, args=%+v) fail: %v", where, args, err) - return 0, internalServerError - } - return num, nil + return Count(DB().Model(&UserGroupMember{}).Where(where, args...)) } func UserGroupMemberAdd(groupId, userId int64) error { @@ -44,7 +41,7 @@ func UserGroupMemberAdd(groupId, userId int64) error { UserId: userId, } - return DBInsertOne(obj) + return Insert(obj) } func UserGroupMemberDel(groupId int64, userIds []int64) error { @@ -52,11 +49,11 @@ func UserGroupMemberDel(groupId int64, userIds []int64) error { return nil } - _, err := DB.Where("group_id=?", groupId).In("user_id", userIds).Delete(new(UserGroupMember)) - if err != nil { - logger.Errorf("mysql.error: delete user_group_member fail: %v", err) - return internalServerError - } - - return nil + return DB().Where("group_id = ? and user_id in ?", groupId, userIds).Delete(&UserGroupMember{}).Error +} + +func UserGroupMemberGetAll() ([]UserGroupMember, error) { + var lst []UserGroupMember + err := DB().Find(&lst).Error + return lst, err } diff --git a/pkg/iaop/logger.go b/src/pkg/aop/logger.go similarity index 99% rename from pkg/iaop/logger.go rename to src/pkg/aop/logger.go index ca12d8c8..0af299f8 100644 --- a/pkg/iaop/logger.go +++ b/src/pkg/aop/logger.go @@ -1,4 +1,4 @@ -package iaop +package aop import ( "bytes" @@ -8,10 +8,9 @@ import ( "os" "time" - "github.com/toolkits/pkg/logger" - "github.com/gin-gonic/gin" "github.com/mattn/go-isatty" + "github.com/toolkits/pkg/logger" ) type consoleColorModeValue int diff --git a/pkg/iaop/recovery.go b/src/pkg/aop/recovery.go similarity index 93% rename from pkg/iaop/recovery.go rename to src/pkg/aop/recovery.go index 55754c54..5672a2ab 100644 --- a/pkg/iaop/recovery.go +++ b/src/pkg/aop/recovery.go @@ -1,4 +1,4 @@ -package iaop +package aop // Copyright 2014 Manu Martinez-Almeida. All rights reserved. // Use of this source code is governed by a MIT style @@ -19,8 +19,8 @@ import ( "time" "github.com/gin-gonic/gin" - - "github.com/didi/nightingale/v5/pkg/ierr" + "github.com/toolkits/pkg/errorx" + "github.com/toolkits/pkg/i18n" ) var ( @@ -45,8 +45,12 @@ func RecoveryWithWriter(out io.Writer) gin.HandlerFunc { defer func() { if err := recover(); err != nil { // custom error - if e, ok := err.(ierr.PageError); ok { - c.JSON(e.Code, gin.H{"err": e.Message}) + if e, ok := err.(errorx.PageError); ok { + if e.Code != 200 { + c.String(e.Code, i18n.Sprintf(c.GetHeader("X-Language"), e.Message)) + } else { + c.JSON(e.Code, gin.H{"err": i18n.Sprintf(c.GetHeader("X-Language"), e.Message)}) + } c.Abort() return } diff --git a/src/pkg/httpx/httpx.go b/src/pkg/httpx/httpx.go new file mode 100644 index 00000000..0baa31dc --- /dev/null +++ b/src/pkg/httpx/httpx.go @@ -0,0 +1,66 @@ +package httpx + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "time" +) + +type Config struct { + Host string + Port int + CertFile string + KeyFile string + PProf bool + PrintAccessLog bool + ShutdownTimeout int + MaxContentLength int64 + ReadTimeout int + WriteTimeout int + IdleTimeout int +} + +func Init(cfg Config, handler http.Handler) func() { + addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port) + srv := &http.Server{ + Addr: addr, + Handler: handler, + ReadTimeout: time.Duration(cfg.ReadTimeout) * time.Second, + WriteTimeout: time.Duration(cfg.WriteTimeout) * time.Second, + IdleTimeout: time.Duration(cfg.IdleTimeout) * time.Second, + } + + go func() { + fmt.Println("http server listening on:", addr) + + var err error + if cfg.CertFile != "" && cfg.KeyFile != "" { + srv.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + err = srv.ListenAndServeTLS(cfg.CertFile, cfg.KeyFile) + } else { + err = srv.ListenAndServe() + } + if err != nil && err != http.ErrServerClosed { + panic(err) + } + }() + + return func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(cfg.ShutdownTimeout)) + defer cancel() + + srv.SetKeepAlivesEnabled(false) + if err := srv.Shutdown(ctx); err != nil { + fmt.Println("cannot shutdown http server:", err) + } + + select { + case <-ctx.Done(): + fmt.Println("http exiting") + default: + fmt.Println("http server stopped") + } + } +} diff --git a/src/pkg/ibex/ibex.go b/src/pkg/ibex/ibex.go new file mode 100644 index 00000000..a80182ed --- /dev/null +++ b/src/pkg/ibex/ibex.go @@ -0,0 +1,181 @@ +package ibex + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +type Ibex struct { + address string + authUser string + authPass string + timeout time.Duration + method string + urlPath string + inValue interface{} + outPtr interface{} + headers map[string]string + queries map[string][]string +} + +func New(addr, user, pass string, timeout int64) *Ibex { + if !strings.HasPrefix(addr, "http") { + addr = "http://" + addr + } + + return &Ibex{ + address: addr, + authUser: user, + authPass: pass, + timeout: time.Duration(timeout) * time.Millisecond, + headers: make(map[string]string), + queries: make(map[string][]string), + } +} + +func (i *Ibex) In(v interface{}) *Ibex { + i.inValue = v + return i +} + +func (i *Ibex) Out(ptr interface{}) *Ibex { + i.outPtr = ptr + return i +} + +func (i *Ibex) Path(p string) *Ibex { + i.urlPath = p + return i +} + +func (i *Ibex) Method(m string) *Ibex { + i.method = strings.ToUpper(m) + return i +} + +func (i *Ibex) Header(key, value string) *Ibex { + i.headers[key] = value + return i +} + +func (i *Ibex) QueryString(key, value string) *Ibex { + if param, ok := i.queries[key]; ok { + i.queries[key] = append(param, value) + } else { + i.queries[key] = []string{value} + } + return i +} + +func (i *Ibex) buildUrl() { + var queries string + if len(i.queries) > 0 { + var buf bytes.Buffer + for k, v := range i.queries { + for _, vv := range v { + buf.WriteString(url.QueryEscape(k)) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv)) + buf.WriteByte('&') + } + } + queries = buf.String() + queries = queries[0 : len(queries)-1] + } + + if len(queries) > 0 { + if strings.Contains(i.urlPath, "?") { + i.urlPath += "&" + queries + } else { + i.urlPath = i.urlPath + "?" + queries + } + } +} + +func (i *Ibex) do() error { + i.buildUrl() + + var req *http.Request + var err error + + if i.inValue != nil { + bs, err := json.Marshal(i.inValue) + if err != nil { + return err + } + req, err = http.NewRequest(i.method, i.address+i.urlPath, bytes.NewBuffer(bs)) + } else { + req, err = http.NewRequest(i.method, i.address+i.urlPath, nil) + } + + if err != nil { + return err + } + + for key, value := range i.headers { + req.Header.Set(key, value) + } + + if i.authUser != "" { + req.SetBasicAuth(i.authUser, i.authPass) + } + + if i.method != http.MethodGet { + req.Header.Set("Content-Type", "application/json") + } + + client := http.Client{ + Timeout: i.timeout, + } + + res, err := client.Do(req) + if err != nil { + return err + } + + if res.StatusCode != 200 { + return fmt.Errorf("url(%s) response code: %v", i.urlPath, res.StatusCode) + } + + if res.Body != nil { + defer res.Body.Close() + } + + payload, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + + return json.Unmarshal(payload, i.outPtr) +} + +func (i *Ibex) GET() error { + i.Method(http.MethodGet) + return i.do() +} + +func (i *Ibex) POST() error { + i.Method(http.MethodPost) + return i.do() +} + +func (i *Ibex) PUT() error { + i.Method(http.MethodPut) + return i.do() +} + +func (i *Ibex) DELETE() error { + i.Method(http.MethodDelete) + return i.do() +} + +func (i *Ibex) PATCH() error { + i.Method(http.MethodPatch) + return i.do() +} diff --git a/src/pkg/ldapx/ldapx.go b/src/pkg/ldapx/ldapx.go new file mode 100644 index 00000000..e8e4de77 --- /dev/null +++ b/src/pkg/ldapx/ldapx.go @@ -0,0 +1,109 @@ +package ldapx + +import ( + "crypto/tls" + "fmt" + + "github.com/go-ldap/ldap/v3" +) + +type LdapSection struct { + Enable bool + Host string + Port int + BaseDn string + BindUser string + BindPass string + AuthFilter string + Attributes LdapAttributes + CoverAttributes bool + TLS bool + StartTLS bool +} + +type LdapAttributes struct { + Nickname string `yaml:"nickname"` + Phone string `yaml:"phone"` + Email string `yaml:"email"` +} + +var LDAP LdapSection + +func Init(ldap LdapSection) { + LDAP = ldap +} + +func genLdapAttributeSearchList() []string { + var ldapAttributes []string + attrs := LDAP.Attributes + if attrs.Nickname != "" { + ldapAttributes = append(ldapAttributes, attrs.Nickname) + } + if attrs.Email != "" { + ldapAttributes = append(ldapAttributes, attrs.Email) + } + if attrs.Phone != "" { + ldapAttributes = append(ldapAttributes, attrs.Phone) + } + return ldapAttributes +} + +func LdapReq(user, pass string) (*ldap.SearchResult, error) { + var conn *ldap.Conn + var err error + lc := LDAP + addr := fmt.Sprintf("%s:%d", lc.Host, lc.Port) + + if lc.TLS { + conn, err = ldap.DialTLS("tcp", addr, &tls.Config{InsecureSkipVerify: true}) + } else { + conn, err = ldap.Dial("tcp", addr) + } + + if err != nil { + return nil, fmt.Errorf("ldap.error: cannot dial ldap(%s): %v", addr, err) + } + + defer conn.Close() + + if !lc.TLS && lc.StartTLS { + if err := conn.StartTLS(&tls.Config{InsecureSkipVerify: true}); err != nil { + return nil, fmt.Errorf("ldap.error: conn startTLS fail: %v", err) + } + } + + // if bindUser is empty, anonymousSearch mode + if lc.BindUser != "" { + // BindSearch mode + if err := conn.Bind(lc.BindUser, lc.BindPass); err != nil { + return nil, fmt.Errorf("ldap.error: bind ldap fail: %v, use user(%s) to bind", err, lc.BindUser) + } + } + + searchRequest := ldap.NewSearchRequest( + lc.BaseDn, // The base dn to search + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf(lc.AuthFilter, user), // The filter to apply + genLdapAttributeSearchList(), // A list attributes to retrieve + nil, + ) + + sr, err := conn.Search(searchRequest) + if err != nil { + return nil, fmt.Errorf("ldap.error: ldap search fail: %v", err) + } + + if len(sr.Entries) == 0 { + return nil, fmt.Errorf("Username or password invalid") + } + + if len(sr.Entries) > 1 { + return nil, fmt.Errorf("ldap.error: search user(%s), multi entries found", user) + } + + if err := conn.Bind(sr.Entries[0].DN, pass); err != nil { + return nil, fmt.Errorf("Username or password invalid") + } + + return sr, nil +} diff --git a/src/pkg/logx/logx.go b/src/pkg/logx/logx.go new file mode 100644 index 00000000..5710e762 --- /dev/null +++ b/src/pkg/logx/logx.go @@ -0,0 +1,46 @@ +package logx + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" +) + +type Config struct { + Dir string + Level string + Output string + KeepHours uint + RotateNum int + RotateSize uint64 +} + +func Init(c Config) (func(), error) { + logger.SetSeverity(c.Level) + + if c.Output == "stderr" { + logger.LogToStderr() + } else if c.Output == "file" { + lb, err := logger.NewFileBackend(c.Dir) + if err != nil { + return nil, errors.WithMessage(err, "NewFileBackend failed") + } + + if c.KeepHours != 0 { + lb.SetRotateByHour(true) + lb.SetKeepHours(c.KeepHours) + } else if c.RotateNum != 0 { + lb.Rotate(c.RotateNum, c.RotateSize*1024*1024) + } else { + return nil, errors.New("KeepHours and Rotatenum both are 0") + } + + logger.SetLogging(c.Level, lb) + } + + return func() { + fmt.Println("logger exiting") + logger.Close() + }, nil +} diff --git a/src/pkg/ormx/ormx.go b/src/pkg/ormx/ormx.go new file mode 100644 index 00000000..6eb403a5 --- /dev/null +++ b/src/pkg/ormx/ormx.go @@ -0,0 +1,64 @@ +package ormx + +import ( + "fmt" + "strings" + "time" + + "gorm.io/driver/mysql" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/schema" +) + +// Config GORM Config +type Config struct { + Debug bool + DBType string + DSN string + MaxLifetime int + MaxOpenConns int + MaxIdleConns int + TablePrefix string +} + +// New Create gorm.DB instance +func New(c Config) (*gorm.DB, error) { + var dialector gorm.Dialector + + switch strings.ToLower(c.DBType) { + case "mysql": + dialector = mysql.Open(c.DSN) + case "postgres": + dialector = postgres.Open(c.DSN) + default: + return nil, fmt.Errorf("dialector(%s) not supported", c.DBType) + } + + gconfig := &gorm.Config{ + NamingStrategy: schema.NamingStrategy{ + TablePrefix: c.TablePrefix, + SingularTable: true, + }, + } + + db, err := gorm.Open(dialector, gconfig) + if err != nil { + return nil, err + } + + if c.Debug { + db = db.Debug() + } + + sqlDB, err := db.DB() + if err != nil { + return nil, err + } + + sqlDB.SetMaxIdleConns(c.MaxIdleConns) + sqlDB.SetMaxOpenConns(c.MaxOpenConns) + sqlDB.SetConnMaxLifetime(time.Duration(c.MaxLifetime) * time.Second) + + return db, nil +} diff --git a/src/pkg/ormx/types.go b/src/pkg/ormx/types.go new file mode 100644 index 00000000..51790b6f --- /dev/null +++ b/src/pkg/ormx/types.go @@ -0,0 +1,87 @@ +package ormx + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" +) + +type JSONObj json.RawMessage +type JSONArr json.RawMessage + +// 实现 sql.Scanner 接口,Scan 将 value 扫描至 Jsonb +func (j *JSONObj) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value)) + } + + result := json.RawMessage{} + err := json.Unmarshal(bytes, &result) + *j = JSONObj(result) + return err +} + +// 实现 driver.Valuer 接口,Value 返回 json value +func (j JSONObj) Value() (driver.Value, error) { + if len(j) == 0 { + return nil, nil + } + return json.RawMessage(j).MarshalJSON() +} + +func (j *JSONObj) MarshalJSON() ([]byte, error) { + ret := []byte(*j) + if len(ret) == 0 { + return []byte(`{}`), nil + } + // not valid json + if ret[0] == '"' { + return []byte(`{}`), nil + } + return ret, nil +} + +func (j *JSONObj) UnmarshalJSON(data []byte) error { + *j = JSONObj(data) + return nil +} + +// 实现 sql.Scanner 接口,Scan 将 value 扫描至 Jsonb +func (j *JSONArr) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value)) + } + + result := json.RawMessage{} + err := json.Unmarshal(bytes, &result) + *j = JSONArr(result) + return err +} + +// 实现 driver.Valuer 接口,Value 返回 json value +func (j JSONArr) Value() (driver.Value, error) { + if len(j) == 0 { + return nil, nil + } + return json.RawMessage(j).MarshalJSON() +} + +func (j *JSONArr) MarshalJSON() ([]byte, error) { + ret := []byte(*j) + if len(ret) == 0 { + return []byte(`[]`), nil + } + // not valid json + if ret[0] == '"' { + return []byte(`[]`), nil + } + return ret, nil +} + +func (j *JSONArr) UnmarshalJSON(data []byte) error { + *j = JSONArr(data) + return nil +} diff --git a/src/server/config/config.go b/src/server/config/config.go new file mode 100644 index 00000000..e1990f7d --- /dev/null +++ b/src/server/config/config.go @@ -0,0 +1,144 @@ +package config + +import ( + "fmt" + "net" + "os" + "strings" + "sync" + + "github.com/gin-gonic/gin" + "github.com/koding/multiconfig" + + "github.com/didi/nightingale/v5/src/pkg/httpx" + "github.com/didi/nightingale/v5/src/pkg/logx" + "github.com/didi/nightingale/v5/src/server/naming" + "github.com/didi/nightingale/v5/src/server/reader" + "github.com/didi/nightingale/v5/src/server/writer" + "github.com/didi/nightingale/v5/src/storage" +) + +var ( + C = new(Config) + once sync.Once +) + +func MustLoad(fpaths ...string) { + once.Do(func() { + loaders := []multiconfig.Loader{ + &multiconfig.TagLoader{}, + &multiconfig.EnvironmentLoader{}, + } + + for _, fpath := range fpaths { + handled := false + + if strings.HasSuffix(fpath, "toml") { + loaders = append(loaders, &multiconfig.TOMLLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "conf") { + loaders = append(loaders, &multiconfig.TOMLLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "json") { + loaders = append(loaders, &multiconfig.JSONLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "yaml") { + loaders = append(loaders, &multiconfig.YAMLLoader{Path: fpath}) + handled = true + } + + if !handled { + fmt.Println("config file invalid, valid file exts: .conf,.yaml,.toml,.json") + os.Exit(1) + } + } + + m := multiconfig.DefaultLoader{ + Loader: multiconfig.MultiLoader(loaders...), + Validator: multiconfig.MultiValidator(&multiconfig.RequiredValidator{}), + } + m.MustLoad(C) + + if C.Heartbeat.IP == "" { + // auto detect + C.Heartbeat.IP = fmt.Sprint(GetOutboundIP()) + + if C.Heartbeat.IP == "" { + fmt.Println("heartbeat ip auto got is blank") + os.Exit(1) + } + } + + C.Heartbeat.Endpoint = fmt.Sprintf("%s:%d", C.Heartbeat.IP, C.HTTP.Port) + C.Heartbeat.Cluster = C.ClusterName + + C.Alerting.RedisPub.ChannelKey = C.Alerting.RedisPub.ChannelPrefix + C.ClusterName + + fmt.Println("heartbeat.ip:", C.Heartbeat.IP) + fmt.Printf("heartbeat.interval: %dms\n", C.Heartbeat.Interval) + }) +} + +type Config struct { + RunMode string + ClusterName string + Log logx.Config + HTTP httpx.Config + BasicAuth gin.Accounts + Heartbeat naming.HeartbeatConfig + Alerting Alerting + NoData NoData + Redis storage.RedisConfig + Gorm storage.Gorm + MySQL storage.MySQL + Postgres storage.Postgres + WriterOpt writer.GlobalOpt + Writers []writer.Options + Reader reader.Options + Ibex Ibex +} + +type Alerting struct { + NotifyScriptPath string + NotifyConcurrency int + RedisPub RedisPub +} + +type RedisPub struct { + Enable bool + ChannelPrefix string + ChannelKey string +} + +type NoData struct { + Metric string + Interval int64 +} + +type Ibex struct { + Address string + BasicAuthUser string + BasicAuthPass string + Timeout int64 +} + +func (c *Config) IsDebugMode() bool { + return c.RunMode == "debug" +} + +// Get preferred outbound ip of this machine +func GetOutboundIP() net.IP { + conn, err := net.Dial("udp", "8.8.8.8:80") + if err != nil { + fmt.Println("auto get outbound ip fail:", err) + os.Exit(1) + } + defer conn.Close() + + localAddr := conn.LocalAddr().(*net.UDPAddr) + + return localAddr.IP +} diff --git a/src/server/engine/callback.go b/src/server/engine/callback.go new file mode 100644 index 00000000..292acbca --- /dev/null +++ b/src/server/engine/callback.go @@ -0,0 +1,192 @@ +package engine + +import ( + "strconv" + "strings" + "time" + + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/net/httplib" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/pkg/ibex" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" +) + +func callback(event *models.AlertCurEvent) { + urls := strings.Fields(event.Callbacks) + for _, url := range urls { + if url == "" { + continue + } + + if strings.HasPrefix(url, "${ibex}") && !event.IsRecovered { + handleIbex(url, event) + continue + } + + if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) { + url = "http://" + url + } + + resp, code, err := httplib.PostJSON(url, 5*time.Second, event, map[string]string{}) + if err != nil { + logger.Errorf("event_callback(rule_id=%d url=%s) fail, resp: %s, err: %v, code: %d", event.RuleId, url, string(resp), err, code) + } else { + logger.Infof("event_callback(rule_id=%d url=%s) succ, resp: %s, code: %d", event.RuleId, url, string(resp), code) + } + } +} + +type TaskForm struct { + Title string `json:"title"` + Account string `json:"account"` + Batch int `json:"batch"` + Tolerance int `json:"tolerance"` + Timeout int `json:"timeout"` + Pause string `json:"pause"` + Script string `json:"script"` + Args string `json:"args"` + Action string `json:"action"` + Creator string `json:"creator"` + Hosts []string `json:"hosts"` +} + +type TaskCreateReply struct { + Err string `json:"err"` + Dat int64 `json:"dat"` // task.id +} + +func handleIbex(url string, event *models.AlertCurEvent) { + arr := strings.Split(url, "/") + + var idstr string + var host string + + if len(arr) > 1 { + idstr = arr[1] + } + + if len(arr) > 2 { + host = arr[2] + } + + id, err := strconv.ParseInt(idstr, 10, 64) + if err != nil { + logger.Errorf("event_callback_ibex: failed to parse url: %s", url) + return + } + + if host == "" { + // 用户在callback url中没有传入host,就从event中解析 + host = event.TargetIdent + } + + if host == "" { + logger.Error("event_callback_ibex: failed to get host") + return + } + + tpl, err := models.TaskTplGet("id = ?", id) + if err != nil { + logger.Errorf("event_callback_ibex: failed to get tpl: %v", err) + return + } + + if tpl == nil { + logger.Errorf("event_callback_ibex: no such tpl(%d)", id) + return + } + + // check perm + // tpl.GroupId - host - account 三元组校验权限 + can, err := canDoIbex(tpl.UpdateBy, tpl, host) + if err != nil { + logger.Errorf("event_callback_ibex: check perm fail: %v", err) + return + } + + if !can { + logger.Errorf("event_callback_ibex: user(%s) no permission", tpl.UpdateBy) + return + } + + // call ibex + in := TaskForm{ + Title: tpl.Title + " FH: " + host, + Account: tpl.Account, + Batch: tpl.Batch, + Tolerance: tpl.Tolerance, + Timeout: tpl.Timeout, + Pause: tpl.Pause, + Script: tpl.Script, + Args: tpl.Args, + Action: "start", + Creator: tpl.UpdateBy, + Hosts: []string{host}, + } + + var res TaskCreateReply + err = ibex.New( + config.C.Ibex.Address, + config.C.Ibex.BasicAuthUser, + config.C.Ibex.BasicAuthPass, + config.C.Ibex.Timeout, + ). + Path("/ibex/v1/tasks"). + In(in). + Out(&res). + POST() + + if err != nil { + logger.Errorf("event_callback_ibex: call ibex fail: %v", err) + return + } + + if res.Err != "" { + logger.Errorf("event_callback_ibex: call ibex response error: %v", res.Err) + return + } + + // write db + record := models.TaskRecord{ + Id: res.Dat, + GroupId: tpl.GroupId, + IbexAddress: config.C.Ibex.Address, + IbexAuthUser: config.C.Ibex.BasicAuthUser, + IbexAuthPass: config.C.Ibex.BasicAuthPass, + Title: in.Title, + Account: in.Account, + Batch: in.Batch, + Tolerance: in.Tolerance, + Timeout: in.Timeout, + Pause: in.Pause, + Script: in.Script, + Args: in.Args, + CreateAt: time.Now().Unix(), + CreateBy: in.Creator, + } + + if err = record.Add(); err != nil { + logger.Errorf("event_callback_ibex: persist task_record fail: %v", err) + } +} + +func canDoIbex(username string, tpl *models.TaskTpl, host string) (bool, error) { + user, err := models.UserGetByUsername(username) + if err != nil { + return false, err + } + + if user != nil && user.IsAdmin() { + return true, nil + } + + target, has := memsto.TargetCache.Get(host) + if !has { + return false, nil + } + + return target.GroupId == tpl.GroupId, nil +} diff --git a/src/server/engine/consume.go b/src/server/engine/consume.go new file mode 100644 index 00000000..2388f5be --- /dev/null +++ b/src/server/engine/consume.go @@ -0,0 +1,151 @@ +package engine + +import ( + "context" + "strconv" + "time" + + "github.com/toolkits/pkg/concurrent/semaphore" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" +) + +func loopConsume(ctx context.Context) { + sema := semaphore.NewSemaphore(config.C.Alerting.NotifyConcurrency) + duration := time.Duration(100) * time.Millisecond + for { + events := EventQueue.PopBackBy(100) + if len(events) == 0 { + time.Sleep(duration) + continue + } + consume(events, sema) + } +} + +func consume(events []interface{}, sema *semaphore.Semaphore) { + for i := range events { + if events[i] == nil { + continue + } + + event := events[i].(*models.AlertCurEvent) + sema.Acquire() + go func(event *models.AlertCurEvent) { + defer sema.Release() + consumeOne(event) + }(event) + } +} + +func consumeOne(event *models.AlertCurEvent) { + logEvent(event, "consume") + persist(event) + fillUsers(event) + callback(event) + notify(event) +} + +func persist(event *models.AlertCurEvent) { + has, err := models.AlertCurEventExists("hash=?", event.Hash) + if err != nil { + logger.Errorf("event_persist_check_exists_fail: %v rule_id=%d hash=%s", err, event.RuleId, event.Hash) + return + } + + his := event.ToHis() + + if has { + // 数据库里有这个事件,说明之前触发过了 + if event.IsRecovered { + // 本次恢复了,把未恢复的事件删除,在全量告警里添加记录 + err := models.AlertCurEventDelByHash(event.Hash) + if err != nil { + logger.Errorf("event_del_cur_fail: %v hash=%s", err, event.Hash) + } + + if err := his.Add(); err != nil { + logger.Errorf( + "event_persist_his_fail: %v rule_id=%d hash=%s tags=%v timestamp=%d value=%s", + err, + event.RuleId, + event.Hash, + event.TagsJSON, + event.TriggerTime, + event.TriggerValue, + ) + } + } + return + } + + if event.IsRecovered { + // alert_cur_event表里没有数据,表示之前没告警,结果现在报了恢复,神奇....理论上不应该出现的 + return + } + + // 本次是告警,alert_cur_event表里也没有数据 + if err := his.Add(); err != nil { + logger.Errorf( + "event_persist_his_fail: %v rule_id=%d hash=%s tags=%v timestamp=%d value=%s", + err, + event.RuleId, + event.Hash, + event.TagsJSON, + event.TriggerTime, + event.TriggerValue, + ) + } + + // use his id as cur id + event.Id = his.Id + if event.Id > 0 { + if err := event.Add(); err != nil { + logger.Errorf( + "event_persist_cur_fail: %v rule_id=%d hash=%s tags=%v timestamp=%d value=%s", + err, + event.RuleId, + event.Hash, + event.TagsJSON, + event.TriggerTime, + event.TriggerValue, + ) + } + } +} + +// for alerting +func fillUsers(e *models.AlertCurEvent) { + gids := make([]int64, 0, len(e.NotifyGroupsJSON)) + for i := 0; i < len(e.NotifyGroupsJSON); i++ { + gid, err := strconv.ParseInt(e.NotifyGroupsJSON[i], 10, 64) + if err != nil { + continue + } + + gids = append(gids, gid) + } + + e.NotifyGroupsObj = memsto.UserGroupCache.GetByUserGroupIds(gids) + + uids := make(map[int64]struct{}) + for i := 0; i < len(e.NotifyGroupsObj); i++ { + ug := e.NotifyGroupsObj[i] + for j := 0; j < len(ug.UserIds); j++ { + uids[ug.UserIds[j]] = struct{}{} + } + } + + e.NotifyUsersObj = memsto.UserCache.GetByUserIds(mapKeys(uids)) +} + +func mapKeys(m map[int64]struct{}) []int64 { + lst := make([]int64, 0, len(m)) + for k := range m { + lst = append(lst, k) + } + return lst +} diff --git a/src/server/engine/effective.go b/src/server/engine/effective.go new file mode 100644 index 00000000..70499867 --- /dev/null +++ b/src/server/engine/effective.go @@ -0,0 +1,37 @@ +package engine + +import ( + "strconv" + "strings" + "time" + + "github.com/didi/nightingale/v5/src/models" +) + +func isNoneffective(timestamp int64, alertRule *models.AlertRule) bool { + if alertRule.Disabled == 1 { + return true + } + + tm := time.Unix(timestamp, 0) + triggerTime := tm.Format("15:04") + triggerWeek := strconv.Itoa(int(tm.Weekday())) + + if alertRule.EnableStime <= alertRule.EnableEtime { + if triggerTime < alertRule.EnableStime || triggerTime > alertRule.EnableEtime { + return true + } + } else { + if triggerTime < alertRule.EnableStime && triggerTime > alertRule.EnableEtime { + return true + } + } + + alertRule.EnableDaysOfWeek = strings.Replace(alertRule.EnableDaysOfWeek, "7", "0", 1) + + if !strings.Contains(alertRule.EnableDaysOfWeek, triggerWeek) { + return true + } + + return false +} diff --git a/src/server/engine/engine.go b/src/server/engine/engine.go new file mode 100644 index 00000000..c092f7ed --- /dev/null +++ b/src/server/engine/engine.go @@ -0,0 +1,36 @@ +package engine + +import ( + "context" + "time" + + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +func Start(ctx context.Context) error { + err := initTpls() + if err != nil { + return err + } + + // start loop consumer + go loopConsume(ctx) + + // filter my rules and start worker + go loopFilterRules(ctx) + + // repeat notifier + go loopRepeat(ctx) + + go reportQueueSize() + + return nil +} + +func reportQueueSize() { + for { + time.Sleep(time.Second) + promstat.GaugeAlertQueueSize.WithLabelValues(config.C.ClusterName).Set(float64(EventQueue.Len())) + } +} diff --git a/src/server/engine/logger.go b/src/server/engine/logger.go new file mode 100644 index 00000000..9ee48936 --- /dev/null +++ b/src/server/engine/logger.go @@ -0,0 +1,30 @@ +package engine + +import ( + "github.com/didi/nightingale/v5/src/models" + "github.com/toolkits/pkg/logger" +) + +func logEvent(event *models.AlertCurEvent, location string, err ...error) { + status := "triggered" + if event.IsRecovered { + status = "recovered" + } + + message := "" + if len(err) > 0 && err[0] != nil { + message = "error_message: " + err[0].Error() + } + + logger.Infof( + "event(%s %s) %s: rule_id=%d %v%s@%d %s", + event.Hash, + status, + location, + event.RuleId, + event.TagsJSON, + event.TriggerValue, + event.TriggerTime, + message, + ) +} diff --git a/src/server/engine/mute.go b/src/server/engine/mute.go new file mode 100644 index 00000000..40e5a095 --- /dev/null +++ b/src/server/engine/mute.go @@ -0,0 +1,58 @@ +package engine + +import ( + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/memsto" +) + +func isMuted(event *models.AlertCurEvent) bool { + mutes, has := memsto.AlertMuteCache.Gets(event.GroupId) + if !has || len(mutes) == 0 { + return false + } + + for i := 0; i < len(mutes); i++ { + if matchMute(event, mutes[i]) { + return true + } + } + + return false +} + +func matchMute(event *models.AlertCurEvent, mute *models.AlertMute) bool { + if event.TriggerTime < mute.Btime || event.TriggerTime > mute.Etime { + return false + } + + return matchTags(event.TagsMap, mute.ITags) +} + +func matchTags(eventTagsMap map[string]string, itags []models.TagFilter) bool { + for i := 0; i < len(itags); i++ { + filter := itags[i] + value, exists := eventTagsMap[filter.Key] + if !exists { + return false + } + + if filter.Func == "==" { + // == + if filter.Value != value { + return false + } + } else if filter.Func == "in" { + // in + if _, has := filter.Vset[value]; !has { + return false + } + } else { + // =~ + if !filter.Regexp.MatchString(value) { + return false + } + } + } + + return true +} diff --git a/src/server/engine/notify.go b/src/server/engine/notify.go new file mode 100644 index 00000000..19cf992e --- /dev/null +++ b/src/server/engine/notify.go @@ -0,0 +1,214 @@ +package engine + +import ( + "bytes" + "context" + "encoding/json" + "html/template" + "os/exec" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/file" + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/runner" + "github.com/toolkits/pkg/sys" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" + "github.com/didi/nightingale/v5/src/storage" +) + +var tpls = make(map[string]*template.Template) + +var fns = template.FuncMap{ + "unescaped": func(str string) interface{} { return template.HTML(str) }, + "urlconvert": func(str string) interface{} { return template.URL(str) }, + "timeformat": func(ts int64, pattern ...string) string { + defp := "2006-01-02 15:04:05" + if pattern != nil && len(pattern) > 0 { + defp = pattern[0] + } + return time.Unix(ts, 0).Format(defp) + }, + "timestamp": func(pattern ...string) string { + defp := "2006-01-02 15:04:05" + if pattern != nil && len(pattern) > 0 { + defp = pattern[0] + } + return time.Now().Format(defp) + }, +} + +func initTpls() error { + tplDir := path.Join(runner.Cwd, "etc", "template") + + filenames, err := file.FilesUnder(tplDir) + if err != nil { + return errors.WithMessage(err, "failed to exec FilesUnder") + } + + if len(filenames) == 0 { + return errors.New("no tpl files under " + tplDir) + } + + tplFiles := make([]string, 0, len(filenames)) + for i := 0; i < len(filenames); i++ { + if strings.HasSuffix(filenames[i], ".tpl") { + tplFiles = append(tplFiles, filenames[i]) + } + } + + if len(tplFiles) == 0 { + return errors.New("no tpl files under " + tplDir) + } + + for i := 0; i < len(tplFiles); i++ { + tplpath := path.Join(tplDir, tplFiles[i]) + + tpl, err := template.New(tplFiles[i]).Funcs(fns).ParseFiles(tplpath) + if err != nil { + return errors.WithMessage(err, "failed to parse tpl: "+tplpath) + } + + tpls[tplFiles[i]] = tpl + } + + return nil +} + +type Notice struct { + Event *models.AlertCurEvent `json:"event"` + Tpls map[string]string `json:"tpls"` +} + +func buildStdin(event *models.AlertCurEvent) ([]byte, error) { + // build notice body with templates + ntpls := make(map[string]string) + for filename, tpl := range tpls { + var body bytes.Buffer + if err := tpl.Execute(&body, event); err != nil { + ntpls[filename] = err.Error() + } else { + ntpls[filename] = body.String() + } + } + + return json.Marshal(Notice{Event: event, Tpls: ntpls}) +} + +func notify(event *models.AlertCurEvent) { + logEvent(event, "notify") + + stdin, err := buildStdin(event) + if err != nil { + logger.Errorf("event_notify: build stdin failed: %v", err) + return + } + + // pub all alerts to redis + if config.C.Alerting.RedisPub.Enable { + err = storage.Redis.Publish(context.Background(), config.C.Alerting.RedisPub.ChannelKey, stdin).Err() + if err != nil { + logger.Errorf("event_notify: redis publish %s err: %v", config.C.Alerting.RedisPub.ChannelKey, err) + } + } + + // no notify.py? do nothing + if config.C.Alerting.NotifyScriptPath == "" { + return + } + + callScript(stdin) + + // handle alert subscribes + subs, has := memsto.AlertSubscribeCache.Get(event.RuleId) + if has { + handleSubscribes(*event, subs) + } + + subs, has = memsto.AlertSubscribeCache.Get(0) + if has { + handleSubscribes(*event, subs) + } +} + +func handleSubscribes(event models.AlertCurEvent, subs []*models.AlertSubscribe) { + for i := 0; i < len(subs); i++ { + handleSubscribe(event, subs[i]) + } +} + +func handleSubscribe(event models.AlertCurEvent, sub *models.AlertSubscribe) { + if !matchTags(event.TagsMap, sub.ITags) { + return + } + + if sub.RedefineSeverity == 1 { + event.Severity = sub.NewSeverity + } + + if sub.RedefineChannels == 1 { + event.NotifyChannels = sub.NewChannels + event.NotifyChannelsJSON = strings.Fields(sub.NewChannels) + } + + event.NotifyGroups = sub.UserGroupIds + event.NotifyGroupsJSON = strings.Fields(sub.UserGroupIds) + if len(event.NotifyGroupsJSON) == 0 { + return + } + + logEvent(&event, "subscribe") + + fillUsers(&event) + + stdin, err := buildStdin(&event) + if err != nil { + logger.Errorf("event_notify: build stdin failed when handle subscribe: %v", err) + return + } + + callScript(stdin) +} + +func callScript(stdinBytes []byte) { + fpath := config.C.Alerting.NotifyScriptPath + cmd := exec.Command(fpath) + cmd.Stdin = bytes.NewReader(stdinBytes) + + // combine stdout and stderr + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + err := cmd.Start() + if err != nil { + logger.Errorf("event_notify: run cmd err: %v", err) + return + } + + err, isTimeout := sys.WrapTimeout(cmd, time.Duration(30)*time.Second) + + if isTimeout { + if err == nil { + logger.Errorf("event_notify: timeout and killed process %s", fpath) + } + + if err != nil { + logger.Errorf("event_notify: kill process %s occur error %v", fpath, err) + } + + return + } + + if err != nil { + logger.Errorf("event_notify: exec script %s occur error: %v, output: %s", fpath, err, buf.String()) + return + } + + logger.Infof("event_notify: exec %s output: %s", fpath, buf.String()) +} diff --git a/src/server/engine/queue.go b/src/server/engine/queue.go new file mode 100644 index 00000000..84efc674 --- /dev/null +++ b/src/server/engine/queue.go @@ -0,0 +1,5 @@ +package engine + +import "github.com/toolkits/pkg/container/list" + +var EventQueue = list.NewSafeListLimited(10000000) diff --git a/src/server/engine/repeat.go b/src/server/engine/repeat.go new file mode 100644 index 00000000..61d2bdce --- /dev/null +++ b/src/server/engine/repeat.go @@ -0,0 +1,66 @@ +package engine + +import ( + "context" + "time" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" + "github.com/toolkits/pkg/logger" +) + +func loopRepeat(ctx context.Context) { + duration := time.Duration(9000) * time.Millisecond + for { + select { + case <-ctx.Done(): + return + case <-time.After(duration): + repeat() + } + } +} + +// 拉取未恢复的告警表中需要重复通知的数据 +func repeat() { + events, err := models.AlertCurEventNeedRepeat(config.C.ClusterName) + if err != nil { + logger.Errorf("repeat: AlertCurEventNeedRepeat: %v", err) + return + } + + if len(events) == 0 { + return + } + + for i := 0; i < len(events); i++ { + event := events[i] + rule := memsto.AlertRuleCache.Get(event.RuleId) + if rule == nil { + continue + } + + if rule.NotifyRepeatStep == 0 { + // 用户后来调整了这个字段,不让继续发送了 + continue + } + + event.DB2Mem() + + if isNoneffective(event.TriggerTime, rule) { + continue + } + + if isMuted(event) { + continue + } + + fillUsers(event) + notify(event) + + if err = event.IncRepeatStep(int64(rule.NotifyRepeatStep * 60)); err != nil { + logger.Errorf("repeat: IncRepeatStep: %v", err) + } + } +} diff --git a/src/server/engine/vector.go b/src/server/engine/vector.go new file mode 100644 index 00000000..4b6ef5eb --- /dev/null +++ b/src/server/engine/vector.go @@ -0,0 +1,81 @@ +package engine + +import ( + "math" + + "github.com/prometheus/common/model" +) + +type Vector struct { + Key string `json:"key"` + Labels model.Metric `json:"labels"` + Timestamp int64 `json:"timestamp"` + Value float64 `json:"value"` +} + +func ConvertVectors(value model.Value) (lst []Vector) { + switch value.Type() { + case model.ValVector: + items, ok := value.(model.Vector) + if !ok { + return + } + + for _, item := range items { + if math.IsNaN(float64(item.Value)) { + continue + } + + lst = append(lst, Vector{ + Key: item.Metric.String(), + Timestamp: item.Timestamp.Unix(), + Value: float64(item.Value), + Labels: item.Metric, + }) + } + case model.ValMatrix: + items, ok := value.(model.Matrix) + if !ok { + return + } + + for _, item := range items { + if len(item.Values) == 0 { + return + } + + last := item.Values[len(item.Values)-1] + + if math.IsNaN(float64(last.Value)) { + continue + } + + lst = append(lst, Vector{ + Key: item.Metric.String(), + Labels: item.Metric, + Timestamp: last.Timestamp.Unix(), + Value: float64(last.Value), + }) + } + case model.ValScalar: + item, ok := value.(*model.Scalar) + if !ok { + return + } + + if math.IsNaN(float64(item.Value)) { + return + } + + lst = append(lst, Vector{ + Key: "{}", + Timestamp: item.Timestamp.Unix(), + Value: float64(item.Value), + Labels: model.Metric{}, + }) + default: + return + } + + return +} diff --git a/src/server/engine/worker.go b/src/server/engine/worker.go new file mode 100644 index 00000000..361ecf5d --- /dev/null +++ b/src/server/engine/worker.go @@ -0,0 +1,330 @@ +package engine + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" + "github.com/didi/nightingale/v5/src/server/naming" + "github.com/didi/nightingale/v5/src/server/reader" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +func loopFilterRules(ctx context.Context) { + duration := time.Duration(9000) * time.Millisecond + for { + select { + case <-ctx.Done(): + return + case <-time.After(duration): + filterRules() + } + } +} + +func filterRules() { + ids := memsto.AlertRuleCache.GetRuleIds() + + count := len(ids) + mines := make([]int64, 0, count) + + for i := 0; i < count; i++ { + node, err := naming.HashRing.GetNode(fmt.Sprint(ids[i])) + if err != nil { + logger.Warning("failed to get node from hashring:", err) + continue + } + + if node == config.C.Heartbeat.Endpoint { + mines = append(mines, ids[i]) + } + } + + Workers.Build(mines) +} + +type RuleEval struct { + rule *models.AlertRule + fires map[string]*models.AlertCurEvent + pendings map[string]*models.AlertCurEvent + quit chan struct{} +} + +func (r RuleEval) Stop() { + logger.Infof("rule_eval:%d stopping", r.RuleID()) + close(r.quit) +} + +func (r RuleEval) RuleID() int64 { + return r.rule.Id +} + +func (r RuleEval) Start() { + logger.Infof("rule_eval:%d started", r.RuleID()) + for { + select { + case <-r.quit: + // logger.Infof("rule_eval:%d stopped", r.RuleID()) + return + default: + r.Work() + interval := r.rule.PromEvalInterval + if interval <= 0 { + interval = 10 + } + time.Sleep(time.Duration(interval) * time.Second) + } + } +} + +func (r RuleEval) Work() { + promql := strings.TrimSpace(r.rule.PromQl) + if promql == "" { + logger.Errorf("rule_eval:%d promql is blank", r.RuleID()) + return + } + + value, warnings, err := reader.Reader.Client.Query(context.Background(), promql, time.Now()) + if err != nil { + logger.Errorf("rule_eval:%d promql:%s, error:%v", r.RuleID(), promql, err) + return + } + + if len(warnings) > 0 { + logger.Errorf("rule_eval:%d promql:%s, warnings:%v", r.RuleID(), promql, warnings) + return + } + + r.judge(ConvertVectors(value)) +} + +type WorkersType struct { + rules map[string]RuleEval +} + +var Workers = &WorkersType{rules: make(map[string]RuleEval)} + +func (ws *WorkersType) Build(rids []int64) { + rules := make(map[string]*models.AlertRule) + + for i := 0; i < len(rids); i++ { + rule := memsto.AlertRuleCache.Get(rids[i]) + if rule == nil { + continue + } + + hash := str.MD5(fmt.Sprintf("%d_%d_%s", + rule.Id, + rule.PromEvalInterval, + rule.PromQl, + )) + + rules[hash] = rule + } + + // stop old + for hash := range Workers.rules { + if _, has := rules[hash]; !has { + Workers.rules[hash].Stop() + delete(Workers.rules, hash) + } + } + + // start new + for hash := range rules { + if _, has := Workers.rules[hash]; has { + // already exists + continue + } + + elst, err := models.AlertCurEventGetByRule(rules[hash].Id) + if err != nil { + logger.Errorf("worker_build: AlertCurEventGetByRule failed: %v", err) + continue + } + + firemap := make(map[string]*models.AlertCurEvent) + for i := 0; i < len(elst); i++ { + elst[i].DB2Mem() + firemap[elst[i].Hash] = elst[i] + } + + re := RuleEval{ + rule: rules[hash], + quit: make(chan struct{}), + fires: firemap, + pendings: make(map[string]*models.AlertCurEvent), + } + + go re.Start() + Workers.rules[hash] = re + } +} + +func (r RuleEval) judge(vectors []Vector) { + count := len(vectors) + alertingKeys := make(map[string]struct{}) + now := time.Now().Unix() + for i := 0; i < count; i++ { + // rule disabled in this time span? + if isNoneffective(vectors[i].Timestamp, r.rule) { + continue + } + + // handle series tags + tagsMap := make(map[string]string) + for label, value := range vectors[i].Labels { + tagsMap[string(label)] = string(value) + } + + // handle target note and target_tags + targetIdent, has := vectors[i].Labels["ident"] + targetNote := "" + if has { + target, exists := memsto.TargetCache.Get(string(targetIdent)) + if exists { + targetNote = target.Note + for label, value := range target.TagsMap { + tagsMap[label] = value + } + } + } + + // handle rule tags + for _, tag := range r.rule.AppendTagsJSON { + arr := strings.SplitN(tag, "=", 2) + tagsMap[arr[0]] = arr[1] + } + + event := &models.AlertCurEvent{ + TriggerTime: vectors[i].Timestamp, + TagsMap: tagsMap, + } + + // isMuted only need TriggerTime and TagsMap + if isMuted(event) { + logger.Infof("event_muted: rule_id=%d %s", r.rule.Id, vectors[i].Key) + continue + } + + // compute hash + hash := str.MD5(fmt.Sprintf("%d_%s", r.rule.Id, vectors[i].Key)) + alertingKeys[hash] = struct{}{} + + tagsArr := labelMapToArr(tagsMap) + sort.Strings(tagsArr) + + event.Cluster = r.rule.Cluster + event.GroupId = r.rule.GroupId + event.Hash = hash + event.RuleId = r.rule.Id + event.RuleName = r.rule.Name + event.RuleNote = r.rule.Note + event.Severity = r.rule.Severity + event.PromForDuration = r.rule.PromForDuration + event.PromQl = r.rule.PromQl + event.PromEvalInterval = r.rule.PromEvalInterval + event.Callbacks = r.rule.Callbacks + event.CallbacksJSON = r.rule.CallbacksJSON + event.RunbookUrl = r.rule.RunbookUrl + event.NotifyRecovered = r.rule.NotifyRecovered + event.NotifyChannels = r.rule.NotifyChannels + event.NotifyChannelsJSON = r.rule.NotifyChannelsJSON + event.NotifyGroups = r.rule.NotifyGroups + event.NotifyGroupsJSON = r.rule.NotifyGroupsJSON + event.NotifyRepeatNext = now + int64(r.rule.NotifyRepeatStep*60) + event.TargetIdent = string(targetIdent) + event.TargetNote = targetNote + event.TriggerValue = readableValue(vectors[i].Value) + event.TagsJSON = tagsArr + event.Tags = strings.Join(tagsArr, ",,") + event.IsRecovered = false + event.LastEvalTime = now + + r.handleNewEvent(event) + } + + // handle recovered events + r.recoverRule(alertingKeys, now) +} + +func readableValue(value float64) string { + ret := fmt.Sprintf("%.5f", value) + ret = strings.TrimRight(ret, "0") + return strings.TrimRight(ret, ".") +} + +func labelMapToArr(m map[string]string) []string { + numLabels := len(m) + + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%s", label, value)) + } + + if numLabels > 1 { + sort.Strings(labelStrings) + } + + return labelStrings +} + +func (r RuleEval) handleNewEvent(event *models.AlertCurEvent) { + if _, has := r.fires[event.Hash]; has { + // fired before, nothing to do + return + } + + if event.PromForDuration == 0 { + r.fires[event.Hash] = event + pushEventToQueue(event) + return + } + + _, has := r.pendings[event.Hash] + if has { + r.pendings[event.Hash].LastEvalTime = event.TriggerTime + } else { + r.pendings[event.Hash] = event + } + + if r.pendings[event.Hash].LastEvalTime-r.pendings[event.Hash].TriggerTime > int64(event.PromForDuration) { + r.fires[event.Hash] = event + pushEventToQueue(event) + } +} + +func (r RuleEval) recoverRule(alertingKeys map[string]struct{}, now int64) { + for hash, event := range r.fires { + if _, has := alertingKeys[hash]; has { + continue + } + + // 没查到触发阈值的vector,姑且就认为这个vector的值恢复了 + // 我确实无法分辨,是prom中有值但是未满足阈值所以没返回,还是prom中确实丢了一些点导致没有数据可以返回,尴尬 + delete(r.fires, hash) + delete(r.pendings, hash) + + if r.rule.NotifyRecovered == 1 { + event.IsRecovered = true + event.LastEvalTime = now + pushEventToQueue(event) + } + } +} + +func pushEventToQueue(event *models.AlertCurEvent) { + promstat.CounterAlertsTotal.WithLabelValues(config.C.ClusterName).Inc() + logEvent(event, "push_queue") + if !EventQueue.PushFront(event) { + logger.Warningf("event_push_queue: queue is full") + } +} diff --git a/src/server/idents/idents.go b/src/server/idents/idents.go new file mode 100644 index 00000000..a3ab67f5 --- /dev/null +++ b/src/server/idents/idents.go @@ -0,0 +1,222 @@ +package idents + +import ( + "context" + "fmt" + "sort" + "strconv" + "time" + + cmap "github.com/orcaman/concurrent-map" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/memsto" + "github.com/didi/nightingale/v5/src/server/naming" + "github.com/didi/nightingale/v5/src/server/writer" + "github.com/didi/nightingale/v5/src/storage" +) + +// ident -> timestamp +var Idents = cmap.New() + +func loopToRedis(ctx context.Context) { + duration := time.Duration(4) * time.Second + for { + select { + case <-ctx.Done(): + return + case <-time.After(duration): + toRedis() + } + } +} + +func toRedis() { + items := Idents.Items() + if len(items) == 0 { + return + } + + now := time.Now().Unix() + + // clean old idents + for key, at := range items { + if at.(int64) < now-10 { + Idents.Remove(key) + } else { + // use now as timestamp to redis + err := storage.Redis.HSet(context.Background(), redisKey(config.C.ClusterName), key, now).Err() + if err != nil { + logger.Errorf("redis hset idents failed: %v", err) + } + } + } +} + +// hash struct: +// /idents/Default -> { +// $ident => $timestamp +// $ident => $timestamp +// } +func redisKey(cluster string) string { + return fmt.Sprintf("/idents/%s", cluster) +} + +func clearDeadIdent(ctx context.Context, cluster, ident string) { + key := redisKey(cluster) + err := storage.Redis.HDel(ctx, key, ident).Err() + if err != nil { + logger.Warningf("failed to hdel %s %s, error: %v", key, ident, err) + } +} + +func Handle(ctx context.Context) { + go loopToRedis(ctx) + go loopPushMetrics(ctx) +} + +func loopPushMetrics(ctx context.Context) { + duration := time.Duration(10) * time.Second + for { + select { + case <-ctx.Done(): + return + case <-time.After(duration): + pushMetrics() + } + } +} + +func pushMetrics() { + servers, err := naming.ActiveServers(context.Background(), config.C.ClusterName) + if err != nil { + logger.Errorf("handle_idents: failed to get active servers: %v", err) + return + } + + if len(servers) == 0 { + logger.Errorf("handle_idents: active servers empty") + return + } + + sort.Strings(servers) + + if config.C.Heartbeat.Endpoint != servers[0] { + logger.Info("handle_idents: i am not leader") + return + } + + // get all the target heartbeat timestamp + ret, err := storage.Redis.HGetAll(context.Background(), redisKey(config.C.ClusterName)).Result() + if err != nil { + logger.Errorf("handle_idents: redis hgetall fail: %v", err) + return + } + + now := time.Now().Unix() + dur := config.C.NoData.Interval + + actives := make(map[string]struct{}) + for ident, clockstr := range ret { + clock, err := strconv.ParseInt(clockstr, 10, 64) + if err != nil { + continue + } + + if now-clock > dur { + clearDeadIdent(context.Background(), config.C.ClusterName, ident) + } else { + actives[ident] = struct{}{} + } + } + + var series []interface{} + + // 有心跳,target_up = 1 + // 如果找到target,就把target的tags补充到series上 + // 如果没有target,就在数据库创建target + for active := range actives { + // build metrics + pt := &prompb.TimeSeries{} + pt.Samples = append(pt.Samples, prompb.Sample{ + // use ms + Timestamp: now * 1000, + Value: 1, + }) + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: config.C.NoData.Metric, + }) + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: "ident", + Value: active, + }) + + target, has := memsto.TargetCache.Get(active) + if !has { + // target not exists + target = &models.Target{ + Cluster: config.C.ClusterName, + Ident: active, + Tags: "", + TagsJSON: []string{}, + TagsMap: make(map[string]string), + UpdateAt: now, + } + + if err := target.Add(); err != nil { + logger.Errorf("handle_idents: insert target(%s) fail: %v", active, err) + } + } else { + // target already exists + for label, value := range target.TagsMap { + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: label, + Value: value, + }) + } + } + + series = append(series, pt) + } + + // 把actives传给TargetCache,看看除了active的部分,还有别的target么?有的话返回,设置target_up = 0 + deads := memsto.TargetCache.GetDeads(actives) + for ident, dead := range deads { + // build metrics + pt := &prompb.TimeSeries{} + pt.Samples = append(pt.Samples, prompb.Sample{ + // use ms + Timestamp: now * 1000, + Value: 0, + }) + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: config.C.NoData.Metric, + }) + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: "ident", + Value: ident, + }) + + for label, value := range dead.TagsMap { + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: label, + Value: value, + }) + } + series = append(series, pt) + } + + if !writer.Writers.PushQueue(series) { + logger.Errorf("handle_idents: writer queue full") + } +} diff --git a/src/server/memsto/alert_mute_cache.go b/src/server/memsto/alert_mute_cache.go new file mode 100644 index 00000000..9efa17de --- /dev/null +++ b/src/server/memsto/alert_mute_cache.go @@ -0,0 +1,131 @@ +package memsto + +import ( + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +type AlertMuteCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + mutes map[int64][]*models.AlertMute // key: busi_group_id +} + +var AlertMuteCache = AlertMuteCacheType{ + statTotal: -1, + statLastUpdated: -1, + mutes: make(map[int64][]*models.AlertMute), +} + +func (amc *AlertMuteCacheType) StatChanged(total, lastUpdated int64) bool { + if amc.statTotal == total && amc.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (amc *AlertMuteCacheType) Set(ms map[int64][]*models.AlertMute, total, lastUpdated int64) { + amc.Lock() + amc.mutes = ms + amc.Unlock() + + // only one goroutine used, so no need lock + amc.statTotal = total + amc.statLastUpdated = lastUpdated +} + +func (amc *AlertMuteCacheType) Gets(bgid int64) ([]*models.AlertMute, bool) { + amc.RLock() + defer amc.RUnlock() + lst, has := amc.mutes[bgid] + return lst, has +} + +func (amc *AlertMuteCacheType) GetAllStructs() map[int64][]models.AlertMute { + amc.RLock() + defer amc.RUnlock() + + ret := make(map[int64][]models.AlertMute) + for bgid := range amc.mutes { + lst := amc.mutes[bgid] + for i := 0; i < len(lst); i++ { + ret[bgid] = append(ret[bgid], *lst[i]) + } + } + + return ret +} + +func SyncAlertMutes() { + err := syncAlertMutes() + if err != nil { + fmt.Println("failed to sync alert mutes:", err) + exit(1) + } + + go loopSyncAlertMutes() +} + +func loopSyncAlertMutes() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncAlertMutes(); err != nil { + logger.Warning("failed to sync alert mutes:", err) + } + } +} + +func syncAlertMutes() error { + start := time.Now() + btime := start.Unix() - int64(30) + + stat, err := models.AlertMuteStatistics(config.C.ClusterName, btime) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertMuteStatistics") + } + + if !AlertMuteCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_mutes").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_mutes").Set(0) + logger.Debug("alert mutes not changed") + return nil + } + + lst, err := models.AlertMuteGetsByCluster(config.C.ClusterName, btime) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertMuteGetsByCluster") + } + + oks := make(map[int64][]*models.AlertMute) + + for i := 0; i < len(lst); i++ { + err = lst[i].Parse() + if err != nil { + logger.Warningf("failed to parse alert_mute, id: %d", lst[i].Id) + continue + } + + oks[lst[i].GroupId] = append(oks[lst[i].GroupId], lst[i]) + } + + AlertMuteCache.Set(oks, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_mutes").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_mutes").Set(float64(len(lst))) + logger.Infof("timer: sync mutes done, cost: %dms, number: %d", ms, len(lst)) + + return nil +} diff --git a/src/server/memsto/alert_rule_cache.go b/src/server/memsto/alert_rule_cache.go new file mode 100644 index 00000000..d412d985 --- /dev/null +++ b/src/server/memsto/alert_rule_cache.go @@ -0,0 +1,120 @@ +package memsto + +import ( + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +type AlertRuleCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + rules map[int64]*models.AlertRule // key: rule id +} + +var AlertRuleCache = AlertRuleCacheType{ + statTotal: -1, + statLastUpdated: -1, + rules: make(map[int64]*models.AlertRule), +} + +func (arc *AlertRuleCacheType) StatChanged(total, lastUpdated int64) bool { + if arc.statTotal == total && arc.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (arc *AlertRuleCacheType) Set(m map[int64]*models.AlertRule, total, lastUpdated int64) { + arc.Lock() + arc.rules = m + arc.Unlock() + + // only one goroutine used, so no need lock + arc.statTotal = total + arc.statLastUpdated = lastUpdated +} + +func (arc *AlertRuleCacheType) Get(ruleId int64) *models.AlertRule { + arc.RLock() + defer arc.RUnlock() + return arc.rules[ruleId] +} + +func (arc *AlertRuleCacheType) GetRuleIds() []int64 { + arc.RLock() + defer arc.RUnlock() + + count := len(arc.rules) + list := make([]int64, 0, count) + for ruleId := range arc.rules { + list = append(list, ruleId) + } + + return list +} + +func SyncAlertRules() { + err := syncAlertRules() + if err != nil { + fmt.Println("failed to sync alert rules:", err) + exit(1) + } + + go loopSyncAlertRules() +} + +func loopSyncAlertRules() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncAlertRules(); err != nil { + logger.Warning("failed to sync alert rules:", err) + } + } +} + +func syncAlertRules() error { + start := time.Now() + + stat, err := models.AlertRuleStatistics(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertRuleStatistics") + } + + if !AlertRuleCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_rules").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_rules").Set(0) + logger.Debug("alert rules not changed") + return nil + } + + lst, err := models.AlertRuleGetsByCluster(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertRuleGetsByCluster") + } + + m := make(map[int64]*models.AlertRule) + for i := 0; i < len(lst); i++ { + m[lst[i].Id] = lst[i] + } + + AlertRuleCache.Set(m, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_rules").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_rules").Set(float64(len(m))) + logger.Infof("timer: sync rules done, cost: %dms, number: %d", ms, len(m)) + + return nil +} diff --git a/src/server/memsto/alert_subsribe_cache.go b/src/server/memsto/alert_subsribe_cache.go new file mode 100644 index 00000000..9f319e14 --- /dev/null +++ b/src/server/memsto/alert_subsribe_cache.go @@ -0,0 +1,133 @@ +package memsto + +import ( + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +type AlertSubscribeCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + subs map[int64][]*models.AlertSubscribe +} + +var AlertSubscribeCache = AlertSubscribeCacheType{ + statTotal: -1, + statLastUpdated: -1, + subs: make(map[int64][]*models.AlertSubscribe), +} + +func (c *AlertSubscribeCacheType) StatChanged(total, lastUpdated int64) bool { + if c.statTotal == total && c.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (c *AlertSubscribeCacheType) Set(m map[int64][]*models.AlertSubscribe, total, lastUpdated int64) { + c.Lock() + c.subs = m + c.Unlock() + + // only one goroutine used, so no need lock + c.statTotal = total + c.statLastUpdated = lastUpdated +} + +func (c *AlertSubscribeCacheType) Get(ruleId int64) ([]*models.AlertSubscribe, bool) { + c.RLock() + defer c.RUnlock() + + lst, has := c.subs[ruleId] + return lst, has +} + +func (c *AlertSubscribeCacheType) GetStructs(ruleId int64) []models.AlertSubscribe { + c.RLock() + defer c.RUnlock() + + lst, has := c.subs[ruleId] + if !has { + return []models.AlertSubscribe{} + } + + ret := make([]models.AlertSubscribe, len(lst)) + for i := 0; i < len(lst); i++ { + ret[i] = *lst[i] + } + + return ret +} + +func SyncAlertSubscribes() { + err := syncAlertSubscribes() + if err != nil { + fmt.Println("failed to sync alert subscribes:", err) + exit(1) + } + + go loopSyncAlertSubscribes() +} + +func loopSyncAlertSubscribes() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncAlertSubscribes(); err != nil { + logger.Warning("failed to sync alert subscribes:", err) + } + } +} + +func syncAlertSubscribes() error { + start := time.Now() + + stat, err := models.AlertSubscribeStatistics(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertSubscribeStatistics") + } + + if !AlertSubscribeCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_subscribes").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_subscribes").Set(0) + logger.Debug("alert subscribes not changed") + return nil + } + + lst, err := models.AlertSubscribeGetsByCluster(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec AlertSubscribeGetsByCluster") + } + + subs := make(map[int64][]*models.AlertSubscribe) + + for i := 0; i < len(lst); i++ { + err = lst[i].Parse() + if err != nil { + logger.Warningf("failed to parse alert subscribe, id: %d", lst[i].Id) + continue + } + + subs[lst[i].RuleId] = append(subs[lst[i].RuleId], lst[i]) + } + + AlertSubscribeCache.Set(subs, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_alert_subscribes").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_alert_subscribes").Set(float64(len(lst))) + logger.Infof("timer: sync subscribes done, cost: %dms, number: %d", ms, len(lst)) + + return nil +} diff --git a/src/server/memsto/memsto.go b/src/server/memsto/memsto.go new file mode 100644 index 00000000..22eaf97c --- /dev/null +++ b/src/server/memsto/memsto.go @@ -0,0 +1,21 @@ +package memsto + +import ( + "os" + + "github.com/toolkits/pkg/logger" +) + +func exit(code int) { + logger.Close() + os.Exit(code) +} + +func Sync() { + SyncTargets() + SyncUsers() + SyncUserGroups() + SyncAlertMutes() + SyncAlertSubscribes() + SyncAlertRules() +} diff --git a/src/server/memsto/target_cache.go b/src/server/memsto/target_cache.go new file mode 100644 index 00000000..7b915e7e --- /dev/null +++ b/src/server/memsto/target_cache.go @@ -0,0 +1,136 @@ +package memsto + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +// 1. append note to alert_event +// 2. append tags to series +type TargetCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + targets map[string]*models.Target // key: ident +} + +// init TargetCache +var TargetCache = TargetCacheType{ + statTotal: -1, + statLastUpdated: -1, + targets: make(map[string]*models.Target), +} + +func (tc *TargetCacheType) StatChanged(total, lastUpdated int64) bool { + if tc.statTotal == total && tc.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (tc *TargetCacheType) Set(m map[string]*models.Target, total, lastUpdated int64) { + tc.Lock() + tc.targets = m + tc.Unlock() + + // only one goroutine used, so no need lock + tc.statTotal = total + tc.statLastUpdated = lastUpdated +} + +func (tc *TargetCacheType) Get(ident string) (*models.Target, bool) { + tc.RLock() + defer tc.RUnlock() + val, has := tc.targets[ident] + return val, has +} + +func (tc *TargetCacheType) GetDeads(actives map[string]struct{}) map[string]*models.Target { + ret := make(map[string]*models.Target) + + tc.RLock() + defer tc.RUnlock() + + for ident, target := range tc.targets { + if _, has := actives[ident]; !has { + ret[ident] = target + } + } + + return ret +} + +func SyncTargets() { + err := syncTargets() + if err != nil { + fmt.Println("failed to sync targets:", err) + exit(1) + } + + go loopSyncTargets() +} + +func loopSyncTargets() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncTargets(); err != nil { + logger.Warning("failed to sync targets:", err) + } + } +} + +func syncTargets() error { + start := time.Now() + + stat, err := models.TargetStatistics(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec TargetStatistics") + } + + if !TargetCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_targets").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_targets").Set(0) + logger.Debug("targets not changed") + return nil + } + + lst, err := models.TargetGetsByCluster(config.C.ClusterName) + if err != nil { + return errors.WithMessage(err, "failed to exec TargetGetsByCluster") + } + + m := make(map[string]*models.Target) + for i := 0; i < len(lst); i++ { + lst[i].TagsJSON = strings.Fields(lst[i].Tags) + lst[i].TagsMap = make(map[string]string) + for _, item := range lst[i].TagsJSON { + arr := strings.Split(item, "=") + if len(arr) != 2 { + continue + } + lst[i].TagsMap[arr[0]] = arr[1] + } + m[lst[i].Ident] = lst[i] + } + + TargetCache.Set(m, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_targets").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_targets").Set(float64(len(lst))) + logger.Infof("timer: sync targets done, cost: %dms, number: %d", ms, len(lst)) + + return nil +} diff --git a/src/server/memsto/user_cache.go b/src/server/memsto/user_cache.go new file mode 100644 index 00000000..21d89c7f --- /dev/null +++ b/src/server/memsto/user_cache.go @@ -0,0 +1,134 @@ +package memsto + +import ( + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +type UserCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + users map[int64]*models.User // key: id +} + +var UserCache = UserCacheType{ + statTotal: -1, + statLastUpdated: -1, + users: make(map[int64]*models.User), +} + +func (uc *UserCacheType) StatChanged(total, lastUpdated int64) bool { + if uc.statTotal == total && uc.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (uc *UserCacheType) Set(m map[int64]*models.User, total, lastUpdated int64) { + uc.Lock() + uc.users = m + uc.Unlock() + + // only one goroutine used, so no need lock + uc.statTotal = total + uc.statLastUpdated = lastUpdated +} + +func (uc *UserCacheType) GetByUserId(id int64) *models.User { + uc.RLock() + defer uc.RUnlock() + return uc.users[id] +} + +func (uc *UserCacheType) GetByUserIds(ids []int64) []*models.User { + set := make(map[int64]struct{}) + + uc.RLock() + defer uc.RUnlock() + + var users []*models.User + for _, id := range ids { + if uc.users[id] == nil { + continue + } + + if _, has := set[id]; has { + continue + } + + users = append(users, uc.users[id]) + set[id] = struct{}{} + } + + if users == nil { + users = []*models.User{} + } + + return users +} + +func SyncUsers() { + err := syncUsers() + if err != nil { + fmt.Println("failed to sync users:", err) + exit(1) + } + + go loopSyncUsers() +} + +func loopSyncUsers() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncUsers(); err != nil { + logger.Warning("failed to sync users:", err) + } + } +} + +func syncUsers() error { + start := time.Now() + + stat, err := models.UserStatistics() + if err != nil { + return errors.WithMessage(err, "failed to exec UserStatistics") + } + + if !UserCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_users").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_users").Set(0) + logger.Debug("users not changed") + return nil + } + + lst, err := models.UserGetAll() + if err != nil { + return errors.WithMessage(err, "failed to exec UserGetAll") + } + + m := make(map[int64]*models.User) + for i := 0; i < len(lst); i++ { + m[lst[i].Id] = lst[i] + } + + UserCache.Set(m, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_users").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_users").Set(float64(len(m))) + logger.Infof("timer: sync users done, cost: %dms, number: %d", ms, len(m)) + + return nil +} diff --git a/src/server/memsto/user_group_cache.go b/src/server/memsto/user_group_cache.go new file mode 100644 index 00000000..13ca4843 --- /dev/null +++ b/src/server/memsto/user_group_cache.go @@ -0,0 +1,153 @@ +package memsto + +import ( + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/server/config" + promstat "github.com/didi/nightingale/v5/src/server/stat" +) + +type UserGroupCacheType struct { + statTotal int64 + statLastUpdated int64 + + sync.RWMutex + ugs map[int64]*models.UserGroup // key: id +} + +var UserGroupCache = UserGroupCacheType{ + statTotal: -1, + statLastUpdated: -1, + ugs: make(map[int64]*models.UserGroup), +} + +func (ugc *UserGroupCacheType) StatChanged(total, lastUpdated int64) bool { + if ugc.statTotal == total && ugc.statLastUpdated == lastUpdated { + return false + } + + return true +} + +func (ugc *UserGroupCacheType) Set(ugs map[int64]*models.UserGroup, total, lastUpdated int64) { + ugc.Lock() + ugc.ugs = ugs + ugc.Unlock() + + // only one goroutine used, so no need lock + ugc.statTotal = total + ugc.statLastUpdated = lastUpdated +} + +func (ugc *UserGroupCacheType) GetByUserGroupId(id int64) *models.UserGroup { + ugc.RLock() + defer ugc.RUnlock() + return ugc.ugs[id] +} + +func (ugc *UserGroupCacheType) GetByUserGroupIds(ids []int64) []*models.UserGroup { + set := make(map[int64]struct{}) + + ugc.RLock() + defer ugc.RUnlock() + + var ugs []*models.UserGroup + for _, id := range ids { + if ugc.ugs[id] == nil { + continue + } + + if _, has := set[id]; has { + continue + } + + ugs = append(ugs, ugc.ugs[id]) + set[id] = struct{}{} + } + + if ugs == nil { + return []*models.UserGroup{} + } + + return ugs +} + +func SyncUserGroups() { + err := syncUserGroups() + if err != nil { + fmt.Println("failed to sync user groups:", err) + exit(1) + } + + go loopSyncUserGroups() +} + +func loopSyncUserGroups() { + duration := time.Duration(9000) * time.Millisecond + for { + time.Sleep(duration) + if err := syncUserGroups(); err != nil { + logger.Warning("failed to sync user groups:", err) + } + } +} + +func syncUserGroups() error { + start := time.Now() + + stat, err := models.UserGroupStatistics() + if err != nil { + return errors.WithMessage(err, "failed to exec UserGroupStatistics") + } + + if !UserGroupCache.StatChanged(stat.Total, stat.LastUpdated) { + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_user_groups").Set(0) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_user_groups").Set(0) + logger.Debug("user_group not changed") + return nil + } + + lst, err := models.UserGroupGetAll() + if err != nil { + return errors.WithMessage(err, "failed to exec UserGroupGetAll") + } + + m := make(map[int64]*models.UserGroup) + for i := 0; i < len(lst); i++ { + m[lst[i].Id] = lst[i] + } + + // fill user ids + members, err := models.UserGroupMemberGetAll() + if err != nil { + return errors.WithMessage(err, "failed to exec UserGroupMemberGetAll") + } + + for i := 0; i < len(members); i++ { + ug, has := m[members[i].GroupId] + if !has { + continue + } + + if ug == nil { + continue + } + + ug.UserIds = append(ug.UserIds, members[i].UserId) + } + + UserGroupCache.Set(m, stat.Total, stat.LastUpdated) + + ms := time.Since(start).Milliseconds() + promstat.GaugeCronDuration.WithLabelValues(config.C.ClusterName, "sync_user_groups").Set(float64(ms)) + promstat.GaugeSyncNumber.WithLabelValues(config.C.ClusterName, "sync_user_groups").Set(float64(len(m))) + logger.Infof("timer: sync user groups done, cost: %dms, number: %d", ms, len(m)) + + return nil +} diff --git a/naming/hash_ring.go b/src/server/naming/hashring.go similarity index 86% rename from naming/hash_ring.go rename to src/server/naming/hashring.go index c1944625..468b2ad4 100644 --- a/naming/hash_ring.go +++ b/src/server/naming/hashring.go @@ -14,9 +14,7 @@ type ConsistentHashRing struct { ring *consistent.Consistent } -// 哈希环是活着的judge实例(因为模块合并,即server实例)组成的 -// trans利用哈希环做数据分片计算 -// judge利用哈希环做PULL型策略分片计算 +// for alert_rule sharding var HashRing = NewConsistentHashRing(int32(NodeReplicas), []string{}) func (chr *ConsistentHashRing) GetNode(pk string) (string, error) { diff --git a/src/server/naming/heartbeat.go b/src/server/naming/heartbeat.go new file mode 100644 index 00000000..ab4dff0a --- /dev/null +++ b/src/server/naming/heartbeat.go @@ -0,0 +1,111 @@ +package naming + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/toolkits/pkg/logger" + + "github.com/didi/nightingale/v5/src/storage" +) + +// local servers +var localss string + +type HeartbeatConfig struct { + IP string + Interval int64 + Endpoint string + Cluster string +} + +func Heartbeat(ctx context.Context, cfg HeartbeatConfig) error { + if err := heartbeat(ctx, cfg); err != nil { + fmt.Println("failed to heartbeat:", err) + return err + } + + go loopHeartbeat(ctx, cfg) + return nil +} + +func loopHeartbeat(ctx context.Context, cfg HeartbeatConfig) { + interval := time.Duration(cfg.Interval) * time.Millisecond + for { + time.Sleep(interval) + if err := heartbeat(ctx, cfg); err != nil { + logger.Warning(err) + } + } +} + +// hash struct: +// /server/heartbeat/Default -> { +// 10.2.3.4:19000 => $timestamp +// 10.2.3.5:19000 => $timestamp +// } +func redisKey(cluster string) string { + return fmt.Sprintf("/server/heartbeat/%s", cluster) +} + +func heartbeat(ctx context.Context, cfg HeartbeatConfig) error { + now := time.Now().Unix() + key := redisKey(cfg.Cluster) + err := storage.Redis.HSet(ctx, key, cfg.Endpoint, now).Err() + if err != nil { + return err + } + + servers, err := ActiveServers(ctx, cfg.Cluster) + if err != nil { + return err + } + + sort.Strings(servers) + newss := strings.Join(servers, " ") + if newss != localss { + RebuildConsistentHashRing(servers) + localss = newss + } + + return nil +} + +func clearDeadServer(ctx context.Context, cluster, endpoint string) { + key := redisKey(cluster) + err := storage.Redis.HDel(ctx, key, endpoint).Err() + if err != nil { + logger.Warningf("failed to hdel %s %s, error: %v", key, endpoint, err) + } +} + +func ActiveServers(ctx context.Context, cluster string) ([]string, error) { + ret, err := storage.Redis.HGetAll(ctx, redisKey(cluster)).Result() + if err != nil { + return nil, err + } + + now := time.Now().Unix() + dur := int64(20) + + actives := make([]string, 0, len(ret)) + for endpoint, clockstr := range ret { + clock, err := strconv.ParseInt(clockstr, 10, 64) + if err != nil { + continue + } + + if now-clock > dur { + clearDeadServer(ctx, cluster, endpoint) + continue + } + + actives = append(actives, endpoint) + } + + return actives, nil +} diff --git a/src/server/reader/api.go b/src/server/reader/api.go new file mode 100644 index 00000000..60589692 --- /dev/null +++ b/src/server/reader/api.go @@ -0,0 +1,996 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1 provides bindings to the Prometheus HTTP API v1: +// http://prometheus.io/docs/querying/api/ +package reader + +import ( + "context" + "errors" + "fmt" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "time" + "unsafe" + + json "github.com/json-iterator/go" + + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/api" +) + +func init() { + json.RegisterTypeEncoderFunc("model.SamplePair", marshalPointJSON, marshalPointJSONIsEmpty) + json.RegisterTypeDecoderFunc("model.SamplePair", unMarshalPointJSON) +} + +func unMarshalPointJSON(ptr unsafe.Pointer, iter *json.Iterator) { + p := (*model.SamplePair)(ptr) + if !iter.ReadArray() { + iter.ReportError("unmarshal model.SamplePair", "SamplePair must be [timestamp, value]") + return + } + t := iter.ReadNumber() + if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil { + iter.ReportError("unmarshal model.SamplePair", err.Error()) + return + } + if !iter.ReadArray() { + iter.ReportError("unmarshal model.SamplePair", "SamplePair missing value") + return + } + + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("unmarshal model.SamplePair", err.Error()) + return + } + p.Value = model.SampleValue(f) + + if iter.ReadArray() { + iter.ReportError("unmarshal model.SamplePair", "SamplePair has too many values, must be [timestamp, value]") + return + } +} + +func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) { + p := *((*model.SamplePair)(ptr)) + stream.WriteArrayStart() + // Write out the timestamp as a float divided by 1000. + // This is ~3x faster than converting to a float. + t := int64(p.Timestamp) + if t < 0 { + stream.WriteRaw(`-`) + t = -t + } + stream.WriteInt64(t / 1000) + fraction := t % 1000 + if fraction != 0 { + stream.WriteRaw(`.`) + if fraction < 100 { + stream.WriteRaw(`0`) + } + if fraction < 10 { + stream.WriteRaw(`0`) + } + stream.WriteInt64(fraction) + } + stream.WriteMore() + stream.WriteRaw(`"`) + + // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround + // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan) + buf := stream.Buffer() + abs := math.Abs(float64(p.Value)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + buf = strconv.AppendFloat(buf, float64(p.Value), fmt, -1, 64) + stream.SetBuffer(buf) + + stream.WriteRaw(`"`) + stream.WriteArrayEnd() + +} + +func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { + return false +} + +const ( + statusAPIError = 422 + + apiPrefix = "/api/v1" + + epAlerts = apiPrefix + "/alerts" + epAlertManagers = apiPrefix + "/alertmanagers" + epQuery = apiPrefix + "/query" + epQueryRange = apiPrefix + "/query_range" + epLabels = apiPrefix + "/labels" + epLabelValues = apiPrefix + "/label/:name/values" + epSeries = apiPrefix + "/series" + epTargets = apiPrefix + "/targets" + epTargetsMetadata = apiPrefix + "/targets/metadata" + epMetadata = apiPrefix + "/metadata" + epRules = apiPrefix + "/rules" + epSnapshot = apiPrefix + "/admin/tsdb/snapshot" + epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series" + epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones" + epConfig = apiPrefix + "/status/config" + epFlags = apiPrefix + "/status/flags" +) + +// AlertState models the state of an alert. +type AlertState string + +// ErrorType models the different API error types. +type ErrorType string + +// HealthStatus models the health status of a scrape target. +type HealthStatus string + +// RuleType models the type of a rule. +type RuleType string + +// RuleHealth models the health status of a rule. +type RuleHealth string + +// MetricType models the type of a metric. +type MetricType string + +const ( + // Possible values for AlertState. + AlertStateFiring AlertState = "firing" + AlertStateInactive AlertState = "inactive" + AlertStatePending AlertState = "pending" + + // Possible values for ErrorType. + ErrBadData ErrorType = "bad_data" + ErrTimeout ErrorType = "timeout" + ErrCanceled ErrorType = "canceled" + ErrExec ErrorType = "execution" + ErrBadResponse ErrorType = "bad_response" + ErrServer ErrorType = "server_error" + ErrClient ErrorType = "client_error" + + // Possible values for HealthStatus. + HealthGood HealthStatus = "up" + HealthUnknown HealthStatus = "unknown" + HealthBad HealthStatus = "down" + + // Possible values for RuleType. + RuleTypeRecording RuleType = "recording" + RuleTypeAlerting RuleType = "alerting" + + // Possible values for RuleHealth. + RuleHealthGood = "ok" + RuleHealthUnknown = "unknown" + RuleHealthBad = "err" + + // Possible values for MetricType + MetricTypeCounter MetricType = "counter" + MetricTypeGauge MetricType = "gauge" + MetricTypeHistogram MetricType = "histogram" + MetricTypeGaugeHistogram MetricType = "gaugehistogram" + MetricTypeSummary MetricType = "summary" + MetricTypeInfo MetricType = "info" + MetricTypeStateset MetricType = "stateset" + MetricTypeUnknown MetricType = "unknown" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string + Detail string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +const ( + DefaultStep = 30 * time.Second + MaxPoints = 30000 +) + +func (r *Range) Validate() bool { + if r.Step <= 0 { + r.Step = DefaultStep + } + + if !r.End.After(r.Start) { + return false + } + + dur := r.End.Sub(r.Start) + + for dur/r.Step > MaxPoints { + if r.Step < time.Second { + r.Step *= 10 + continue + } + if r.Step < time.Hour { + r.Step *= 60 + continue + } + + r.Step *= 2 + } + + return true +} + +// API provides bindings for Prometheus's v1 API. +type API interface { + // Alerts returns a list of all active alerts. + Alerts(ctx context.Context) (AlertsResult, error) + // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. + AlertManagers(ctx context.Context) (AlertManagersResult, error) + // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. + CleanTombstones(ctx context.Context) error + // Config returns the current Prometheus configuration. + Config(ctx context.Context) (ConfigResult, error) + // DeleteSeries deletes data for a selection of series in a time range. + DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error + // Flags returns the flag values that Prometheus was launched with. + Flags(ctx context.Context) (FlagsResult, error) + // LabelNames returns all the unique label names present in the block in sorted order. + LabelNames(ctx context.Context) ([]string, Warnings, error) + // LabelValues performs a query for the values of the given label. + LabelValues(ctx context.Context, label string, matchs []string) (model.LabelValues, Warnings, error) + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) + // QueryRange performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) + // Series finds series by label matchers. + Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) + // Snapshot creates a snapshot of all current data into snapshots/- + // under the TSDB's data directory and returns the directory as response. + Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) + // Rules returns a list of alerting and recording rules that are currently loaded. + Rules(ctx context.Context) (RulesResult, error) + // Targets returns an overview of the current state of the Prometheus target discovery. + Targets(ctx context.Context) (TargetsResult, error) + // TargetsMetadata returns metadata about metrics currently scraped by the target. + TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) + // Metadata returns metadata about metrics currently scraped by the metric name. + Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) +} + +// AlertsResult contains the result from querying the alerts endpoint. +type AlertsResult struct { + Alerts []Alert `json:"alerts"` +} + +// AlertManagersResult contains the result from querying the alertmanagers endpoint. +type AlertManagersResult struct { + Active []AlertManager `json:"activeAlertManagers"` + Dropped []AlertManager `json:"droppedAlertManagers"` +} + +// AlertManager models a configured Alert Manager. +type AlertManager struct { + URL string `json:"url"` +} + +// ConfigResult contains the result from querying the config endpoint. +type ConfigResult struct { + YAML string `json:"yaml"` +} + +// FlagsResult contains the result from querying the flag endpoint. +type FlagsResult map[string]string + +// SnapshotResult contains the result from querying the snapshot endpoint. +type SnapshotResult struct { + Name string `json:"name"` +} + +// RulesResult contains the result from querying the rules endpoint. +type RulesResult struct { + Groups []RuleGroup `json:"groups"` +} + +// RuleGroup models a rule group that contains a set of recording and alerting rules. +type RuleGroup struct { + Name string `json:"name"` + File string `json:"file"` + Interval float64 `json:"interval"` + Rules Rules `json:"rules"` +} + +// Recording and alerting rules are stored in the same slice to preserve the order +// that rules are returned in by the API. +// +// Rule types can be determined using a type switch: +// switch v := rule.(type) { +// case RecordingRule: +// fmt.Print("got a recording rule") +// case AlertingRule: +// fmt.Print("got a alerting rule") +// default: +// fmt.Printf("unknown rule type %s", v) +// } +type Rules []interface{} + +// AlertingRule models a alerting rule. +type AlertingRule struct { + Name string `json:"name"` + Query string `json:"query"` + Duration float64 `json:"duration"` + Labels model.LabelSet `json:"labels"` + Annotations model.LabelSet `json:"annotations"` + Alerts []*Alert `json:"alerts"` + Health RuleHealth `json:"health"` + LastError string `json:"lastError,omitempty"` +} + +// RecordingRule models a recording rule. +type RecordingRule struct { + Name string `json:"name"` + Query string `json:"query"` + Labels model.LabelSet `json:"labels,omitempty"` + Health RuleHealth `json:"health"` + LastError string `json:"lastError,omitempty"` +} + +// Alert models an active alert. +type Alert struct { + ActiveAt time.Time `json:"activeAt"` + Annotations model.LabelSet + Labels model.LabelSet + State AlertState + Value string +} + +// TargetsResult contains the result from querying the targets endpoint. +type TargetsResult struct { + Active []ActiveTarget `json:"activeTargets"` + Dropped []DroppedTarget `json:"droppedTargets"` +} + +// ActiveTarget models an active Prometheus scrape target. +type ActiveTarget struct { + DiscoveredLabels map[string]string `json:"discoveredLabels"` + Labels model.LabelSet `json:"labels"` + ScrapeURL string `json:"scrapeUrl"` + LastError string `json:"lastError"` + LastScrape time.Time `json:"lastScrape"` + Health HealthStatus `json:"health"` +} + +// DroppedTarget models a dropped Prometheus scrape target. +type DroppedTarget struct { + DiscoveredLabels map[string]string `json:"discoveredLabels"` +} + +// MetricMetadata models the metadata of a metric with its scrape target and name. +type MetricMetadata struct { + Target map[string]string `json:"target"` + Metric string `json:"metric,omitempty"` + Type MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` +} + +// Metadata models the metadata of a metric. +type Metadata struct { + Type MetricType `json:"type"` + Help string `json:"help"` + Unit string `json:"unit"` +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (rg *RuleGroup) UnmarshalJSON(b []byte) error { + v := struct { + Name string `json:"name"` + File string `json:"file"` + Interval float64 `json:"interval"` + Rules []json.RawMessage `json:"rules"` + }{} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + rg.Name = v.Name + rg.File = v.File + rg.Interval = v.Interval + + for _, rule := range v.Rules { + alertingRule := AlertingRule{} + if err := json.Unmarshal(rule, &alertingRule); err == nil { + rg.Rules = append(rg.Rules, alertingRule) + continue + } + recordingRule := RecordingRule{} + if err := json.Unmarshal(rule, &recordingRule); err == nil { + rg.Rules = append(rg.Rules, recordingRule) + continue + } + return errors.New("failed to decode JSON into an alerting or recording rule") + } + + return nil +} + +func (r *AlertingRule) UnmarshalJSON(b []byte) error { + v := struct { + Type string `json:"type"` + }{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + if v.Type == "" { + return errors.New("type field not present in rule") + } + if v.Type != string(RuleTypeAlerting) { + return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeAlerting), v.Type) + } + + rule := struct { + Name string `json:"name"` + Query string `json:"query"` + Duration float64 `json:"duration"` + Labels model.LabelSet `json:"labels"` + Annotations model.LabelSet `json:"annotations"` + Alerts []*Alert `json:"alerts"` + Health RuleHealth `json:"health"` + LastError string `json:"lastError,omitempty"` + }{} + if err := json.Unmarshal(b, &rule); err != nil { + return err + } + r.Health = rule.Health + r.Annotations = rule.Annotations + r.Name = rule.Name + r.Query = rule.Query + r.Alerts = rule.Alerts + r.Duration = rule.Duration + r.Labels = rule.Labels + r.LastError = rule.LastError + + return nil +} + +func (r *RecordingRule) UnmarshalJSON(b []byte) error { + v := struct { + Type string `json:"type"` + }{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + if v.Type == "" { + return errors.New("type field not present in rule") + } + if v.Type != string(RuleTypeRecording) { + return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeRecording), v.Type) + } + + rule := struct { + Name string `json:"name"` + Query string `json:"query"` + Labels model.LabelSet `json:"labels,omitempty"` + Health RuleHealth `json:"health"` + LastError string `json:"lastError,omitempty"` + }{} + if err := json.Unmarshal(b, &rule); err != nil { + return err + } + r.Health = rule.Health + r.Labels = rule.Labels + r.Name = rule.Name + r.LastError = rule.LastError + r.Query = rule.Query + + return nil +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// NewAPI returns a new API for the client. +// +// It is safe to use the returned API from multiple goroutines. +func NewAPI(c api.Client) API { + return &httpAPI{ + client: &apiClientImpl{ + client: c, + }, + } +} + +type httpAPI struct { + client apiClient +} + +func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) { + u := h.client.URL(epAlerts, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return AlertsResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return AlertsResult{}, err + } + + var res AlertsResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { + u := h.client.URL(epAlertManagers, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return AlertManagersResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return AlertManagersResult{}, err + } + + var res AlertManagersResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) CleanTombstones(ctx context.Context) error { + u := h.client.URL(epCleanTombstones, nil) + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { + u := h.client.URL(epConfig, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ConfigResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return ConfigResult{}, err + } + + var res ConfigResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { + u := h.client.URL(epDeleteSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", formatTime(startTime)) + q.Set("end", formatTime(endTime)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return err + } + + _, _, _, err = h.client.Do(ctx, req) + return err +} + +func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { + u := h.client.URL(epFlags, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return FlagsResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return FlagsResult{}, err + } + + var res FlagsResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) LabelNames(ctx context.Context) ([]string, Warnings, error) { + u := h.client.URL(epLabels, nil) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + _, body, w, err := h.client.Do(ctx, req) + if err != nil { + return nil, w, err + } + var labelNames []string + return labelNames, w, json.Unmarshal(body, &labelNames) +} + +func (h *httpAPI) LabelValues(ctx context.Context, label string, matchs []string) (model.LabelValues, Warnings, error) { + u := h.client.URL(epLabelValues, map[string]string{"name": label}) + q := u.Query() + + for _, m := range matchs { + q.Add("match[]", m) + } + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + _, body, w, err := h.client.Do(ctx, req) + if err != nil { + return nil, w, err + } + var labelValues model.LabelValues + return labelValues, w, json.Unmarshal(body, &labelValues) +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) { + u := h.client.URL(epQuery, nil) + q := u.Query() + + q.Set("query", query) + if !ts.IsZero() { + q.Set("time", formatTime(ts)) + } + + resp, body, warnings, err := h.client.DoGetFallback(ctx, u, q) + if err != nil { + return nil, warnings, err + } + + if resp.StatusCode > 200 { + fmt.Println("status code:", resp.StatusCode) + } + + var qres queryResult + return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) +} + +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) { + u := h.client.URL(epQueryRange, nil) + q := u.Query() + + q.Set("query", query) + q.Set("start", formatTime(r.Start)) + q.Set("end", formatTime(r.End)) + q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) + + _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) + if err != nil { + return nil, warnings, err + } + + var qres queryResult + + return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) +} + +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) { + u := h.client.URL(epSeries, nil) + q := u.Query() + + for _, m := range matches { + q.Add("match[]", m) + } + + q.Set("start", formatTime(startTime)) + q.Set("end", formatTime(endTime)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + + _, body, warnings, err := h.client.Do(ctx, req) + if err != nil { + return nil, warnings, err + } + + var mset []model.LabelSet + return mset, warnings, json.Unmarshal(body, &mset) +} + +func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { + u := h.client.URL(epSnapshot, nil) + q := u.Query() + + q.Set("skip_head", strconv.FormatBool(skipHead)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return SnapshotResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return SnapshotResult{}, err + } + + var res SnapshotResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) { + u := h.client.URL(epRules, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return RulesResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return RulesResult{}, err + } + + var res RulesResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { + u := h.client.URL(epTargets, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return TargetsResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return TargetsResult{}, err + } + + var res TargetsResult + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) { + u := h.client.URL(epTargetsMetadata, nil) + q := u.Query() + + q.Set("match_target", matchTarget) + q.Set("metric", metric) + q.Set("limit", limit) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var res []MetricMetadata + return res, json.Unmarshal(body, &res) +} + +func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) { + u := h.client.URL(epMetadata, nil) + q := u.Query() + + q.Set("metric", metric) + q.Set("limit", limit) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var res map[string][]Metadata + return res, json.Unmarshal(body, &res) +} + +// Warnings is an array of non critical errors +type Warnings []string + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient interface { + URL(ep string, args map[string]string) *url.URL + Do(context.Context, *http.Request) (*http.Response, []byte, Warnings, error) + DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) +} + +type apiClientImpl struct { + client api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` + Warnings []string `json:"warnings,omitempty"` +} + +func apiError(code int) bool { + // These are the codes that Prometheus sends when it returns an error. + return code == statusAPIError || code == http.StatusBadRequest +} + +func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) { + switch resp.StatusCode / 100 { + case 4: + return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode) + case 5: + return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode) + } + return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode) +} + +func (h *apiClientImpl) URL(ep string, args map[string]string) *url.URL { + return h.client.URL(ep, args) +} + +func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) { + resp, body, err := h.client.Do(ctx, req) + if err != nil { + return resp, body, nil, err + } + + code := resp.StatusCode + + if code/100 != 2 && !apiError(code) { + errorType, errorMsg := errorTypeAndMsgFor(resp) + return resp, body, nil, &Error{ + Type: errorType, + Msg: errorMsg, + Detail: string(body), + } + } + + var result apiResponse + + if http.StatusNoContent != code { + if jsonErr := json.Unmarshal(body, &result); jsonErr != nil { + return resp, body, nil, &Error{ + Type: ErrBadResponse, + Msg: jsonErr.Error(), + } + } + } + + if apiError(code) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if apiError(code) && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), result.Warnings, err + +} + +// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request. +func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) { + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode())) + if err != nil { + return nil, nil, nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, body, warnings, err := h.Do(ctx, req) + if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed { + u.RawQuery = args.Encode() + req, err = http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, warnings, err + } + + } else { + if err != nil { + return resp, body, warnings, err + } + return resp, body, warnings, nil + } + return h.Do(ctx, req) +} + +func formatTime(t time.Time) string { + return strconv.FormatFloat(float64(t.Unix())+float64(t.Nanosecond())/1e9, 'f', -1, 64) +} diff --git a/src/server/reader/reader.go b/src/server/reader/reader.go new file mode 100644 index 00000000..45212a66 --- /dev/null +++ b/src/server/reader/reader.go @@ -0,0 +1,65 @@ +package reader + +import ( + "net" + "net/http" + "time" + + "github.com/prometheus/client_golang/api" +) + +type Options struct { + Url string + BasicAuthUser string + BasicAuthPass string + + Timeout int64 + DialTimeout int64 + TLSHandshakeTimeout int64 + ExpectContinueTimeout int64 + IdleConnTimeout int64 + KeepAlive int64 + + MaxConnsPerHost int + MaxIdleConns int + MaxIdleConnsPerHost int +} + +type ReaderType struct { + Opts Options + Client API +} + +var Reader ReaderType + +func Init(opts Options) error { + cli, err := api.NewClient(api.Config{ + Address: opts.Url, + RoundTripper: &http.Transport{ + // TLSClientConfig: tlsConfig, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: time.Duration(opts.DialTimeout) * time.Millisecond, + KeepAlive: time.Duration(opts.KeepAlive) * time.Millisecond, + }).DialContext, + ResponseHeaderTimeout: time.Duration(opts.Timeout) * time.Millisecond, + TLSHandshakeTimeout: time.Duration(opts.TLSHandshakeTimeout) * time.Millisecond, + ExpectContinueTimeout: time.Duration(opts.ExpectContinueTimeout) * time.Millisecond, + MaxConnsPerHost: opts.MaxConnsPerHost, + MaxIdleConns: opts.MaxIdleConns, + MaxIdleConnsPerHost: opts.MaxIdleConnsPerHost, + IdleConnTimeout: time.Duration(opts.IdleConnTimeout) * time.Millisecond, + }, + }) + + if err != nil { + return err + } + + Reader = ReaderType{ + Opts: opts, + Client: NewAPI(cli), + } + + return nil +} diff --git a/src/server/router/router.go b/src/server/router/router.go new file mode 100644 index 00000000..8eed8e95 --- /dev/null +++ b/src/server/router/router.go @@ -0,0 +1,86 @@ +package router + +import ( + "fmt" + "os" + "strings" + + "github.com/gin-contrib/pprof" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/pkg/aop" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/naming" +) + +func New(version string) *gin.Engine { + gin.SetMode(config.C.RunMode) + + loggerMid := aop.Logger() + recoveryMid := aop.Recovery() + + if strings.ToLower(config.C.RunMode) == "release" { + aop.DisableConsoleColor() + } + + r := gin.New() + + r.Use(recoveryMid) + + // whether print access log + if config.C.HTTP.PrintAccessLog { + r.Use(loggerMid) + } + + configRoute(r, version) + + return r +} + +func configRoute(r *gin.Engine, version string) { + if config.C.HTTP.PProf { + pprof.Register(r, "/api/debug/pprof") + } + + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + r.GET("/pid", func(c *gin.Context) { + c.String(200, fmt.Sprintf("%d", os.Getpid())) + }) + + r.GET("/addr", func(c *gin.Context) { + c.String(200, c.Request.RemoteAddr) + }) + + r.GET("/version", func(c *gin.Context) { + c.String(200, version) + }) + + r.GET("/servers/active", func(c *gin.Context) { + lst, err := naming.ActiveServers(c.Request.Context(), config.C.ClusterName) + ginx.NewRender(c).Data(lst, err) + }) + + if len(config.C.BasicAuth) > 0 { + auth := gin.BasicAuth(config.C.BasicAuth) + r.Use(auth) + } + + r.POST("/opentsdb/put", handleOpenTSDB) + + r.GET("/memory/alert-rule", alertRuleGet) + r.GET("/memory/idents", identsGets) + r.GET("/memory/alert-mutes", mutesGets) + r.GET("/memory/alert-subscribes", subscribesGets) + r.GET("/memory/target", targetGet) + r.GET("/memory/user", userGet) + r.GET("/memory/user-group", userGroupGet) + + r.POST("/prom/vectors", vectorsPost) + + r.GET("/metrics", gin.WrapH(promhttp.Handler())) +} diff --git a/src/server/router/router_memsto.go b/src/server/router/router_memsto.go new file mode 100644 index 00000000..daac7b83 --- /dev/null +++ b/src/server/router/router_memsto.go @@ -0,0 +1,45 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/server/idents" + "github.com/didi/nightingale/v5/src/server/memsto" +) + +func alertRuleGet(c *gin.Context) { + id := ginx.QueryInt64(c, "id") + rule := memsto.AlertRuleCache.Get(id) + c.JSON(200, gin.H{"id": id, "rule": rule}) +} + +func identsGets(c *gin.Context) { + c.JSON(200, idents.Idents.Items()) +} + +func mutesGets(c *gin.Context) { + c.JSON(200, memsto.AlertMuteCache.GetAllStructs()) +} + +func subscribesGets(c *gin.Context) { + c.JSON(200, memsto.AlertSubscribeCache.GetStructs(ginx.QueryInt64(c, "id"))) +} + +func targetGet(c *gin.Context) { + ident := ginx.QueryStr(c, "ident") + target, _ := memsto.TargetCache.Get(ident) + c.JSON(200, gin.H{"ident": ident, "target": target}) +} + +func userGet(c *gin.Context) { + id := ginx.QueryInt64(c, "id") + user := memsto.UserCache.GetByUserId(id) + c.JSON(200, gin.H{"id": id, "user": user}) +} + +func userGroupGet(c *gin.Context) { + id := ginx.QueryInt64(c, "id") + ug := memsto.UserGroupCache.GetByUserGroupId(id) + c.JSON(200, gin.H{"id": id, "user_group": ug}) +} diff --git a/src/server/router/router_opentsdb.go b/src/server/router/router_opentsdb.go new file mode 100644 index 00000000..f4731235 --- /dev/null +++ b/src/server/router/router_opentsdb.go @@ -0,0 +1,207 @@ +package router + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io/ioutil" + "strconv" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/idents" + "github.com/didi/nightingale/v5/src/server/memsto" + promstat "github.com/didi/nightingale/v5/src/server/stat" + "github.com/didi/nightingale/v5/src/server/writer" +) + +type HTTPMetric struct { + Metric string `json:"metric"` + Timestamp int64 `json:"timestamp"` + ValueUnTyped interface{} `json:"value"` + Value float64 `json:"-"` + Tags map[string]string `json:"tags"` +} + +func (m *HTTPMetric) Clean() error { + if m.Metric == "" { + return fmt.Errorf("metric is blank") + } + + switch v := m.ValueUnTyped.(type) { + case string: + if f, err := strconv.ParseFloat(v, 64); err == nil { + m.Value = f + } else { + return fmt.Errorf("unparseable value %v", v) + } + case float64: + m.Value = v + case uint64: + m.Value = float64(v) + case int64: + m.Value = float64(v) + case int: + m.Value = float64(v) + default: + return fmt.Errorf("unparseable value %v", v) + } + + // if timestamp bigger than 32 bits, likely in milliseconds + if m.Timestamp > 0xffffffff { + m.Timestamp /= 1000 + } + + return nil +} + +func (m *HTTPMetric) ToProm() (*prompb.TimeSeries, error) { + pt := &prompb.TimeSeries{} + pt.Samples = append(pt.Samples, prompb.Sample{ + // use ms + Timestamp: m.Timestamp * 1000, + Value: m.Value, + }) + + if strings.IndexByte(m.Metric, '.') != -1 { + m.Metric = strings.ReplaceAll(m.Metric, ".", "_") + } + + if strings.IndexByte(m.Metric, '-') != -1 { + m.Metric = strings.ReplaceAll(m.Metric, "-", "_") + } + + if !model.MetricNameRE.MatchString(m.Metric) { + return nil, fmt.Errorf("invalid metric name: %s", m.Metric) + } + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: model.MetricNameLabel, + Value: m.Metric, + }) + + if _, exists := m.Tags["ident"]; !exists { + // rename tag key + host, has := m.Tags["host"] + if has { + delete(m.Tags, "host") + m.Tags["ident"] = host + } + } + + for key, value := range m.Tags { + if strings.IndexByte(key, '.') != -1 { + key = strings.ReplaceAll(key, ".", "_") + } + + if strings.IndexByte(key, '-') != -1 { + key = strings.ReplaceAll(key, "-", "_") + } + + if !model.LabelNameRE.MatchString(key) { + return nil, fmt.Errorf("invalid tag name: %s", key) + } + + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: key, + Value: value, + }) + } + + return pt, nil +} + +func handleOpenTSDB(c *gin.Context) { + var bs []byte + var err error + + if c.GetHeader("Content-Encoding") == "gzip" { + r, err := gzip.NewReader(c.Request.Body) + if err != nil { + c.String(400, err.Error()) + return + } + defer r.Close() + bs, err = ioutil.ReadAll(r) + } else { + defer c.Request.Body.Close() + bs, err = ioutil.ReadAll(c.Request.Body) + } + + if err != nil { + c.String(400, err.Error()) + return + } + + var arr []HTTPMetric + + if bs[0] == '[' { + err = json.Unmarshal(bs, &arr) + } else { + var one HTTPMetric + err = json.Unmarshal(bs, &one) + arr = []HTTPMetric{one} + } + + var ( + succ int + fail int + msg = "data pushed to queue" + list = make([]interface{}, 0, len(arr)) + ts = time.Now().Unix() + ids = make(map[string]interface{}) + ) + + for i := 0; i < len(arr); i++ { + if err := arr[i].Clean(); err != nil { + fail++ + continue + } + + pt, err := arr[i].ToProm() + if err != nil { + fail++ + continue + } + + host, has := arr[i].Tags["ident"] + if has { + // register host + ids[host] = ts + + // fill tags + target, has := memsto.TargetCache.Get(host) + if has { + for key, value := range target.TagsMap { + pt.Labels = append(pt.Labels, &prompb.Label{ + Name: key, + Value: value, + }) + } + } + } + + list = append(list, pt) + succ++ + } + + if len(list) > 0 { + promstat.CounterSampleTotal.WithLabelValues(config.C.ClusterName, "opentsdb").Add(float64(len(list))) + if !writer.Writers.PushQueue(list) { + msg = "wirter queue full" + } + + idents.Idents.MSet(ids) + } + + c.JSON(200, gin.H{ + "succ": succ, + "fail": fail, + "msg": msg, + }) +} diff --git a/src/server/router/router_prom.go b/src/server/router/router_prom.go new file mode 100644 index 00000000..afe78f41 --- /dev/null +++ b/src/server/router/router_prom.go @@ -0,0 +1,33 @@ +package router + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/server/engine" + "github.com/didi/nightingale/v5/src/server/reader" +) + +type vectorsForm struct { + PromQL string `json:"promql"` +} + +func vectorsPost(c *gin.Context) { + var f vectorsForm + ginx.BindJSON(c, &f) + + value, warnings, err := reader.Reader.Client.Query(c.Request.Context(), f.PromQL, time.Now()) + if err != nil { + c.String(500, "promql:%s error:%v", f.PromQL, err) + return + } + + if len(warnings) > 0 { + c.String(500, "promql:%s warnings:%v", f.PromQL, warnings) + return + } + + c.JSON(200, engine.ConvertVectors(value)) +} diff --git a/src/server/server.go b/src/server/server.go new file mode 100644 index 00000000..57c55d94 --- /dev/null +++ b/src/server/server.go @@ -0,0 +1,174 @@ +package server + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/toolkits/pkg/i18n" + + "github.com/didi/nightingale/v5/src/pkg/httpx" + "github.com/didi/nightingale/v5/src/pkg/logx" + "github.com/didi/nightingale/v5/src/server/config" + "github.com/didi/nightingale/v5/src/server/engine" + "github.com/didi/nightingale/v5/src/server/idents" + "github.com/didi/nightingale/v5/src/server/memsto" + "github.com/didi/nightingale/v5/src/server/naming" + "github.com/didi/nightingale/v5/src/server/reader" + "github.com/didi/nightingale/v5/src/server/router" + "github.com/didi/nightingale/v5/src/server/stat" + "github.com/didi/nightingale/v5/src/server/writer" + "github.com/didi/nightingale/v5/src/storage" +) + +type Server struct { + ConfigFile string + Version string +} + +type ServerOption func(*Server) + +func SetConfigFile(f string) ServerOption { + return func(s *Server) { + s.ConfigFile = f + } +} + +func SetVersion(v string) ServerOption { + return func(s *Server) { + s.Version = v + } +} + +// Run run server +func Run(opts ...ServerOption) { + code := 1 + sc := make(chan os.Signal, 1) + signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + server := Server{ + ConfigFile: filepath.Join("etc", "server.conf"), + Version: "not specified", + } + + for _, opt := range opts { + opt(&server) + } + + cleanFunc, err := server.initialize() + if err != nil { + fmt.Println("server init fail:", err) + os.Exit(code) + } + +EXIT: + for { + sig := <-sc + fmt.Println("received signal:", sig.String()) + switch sig { + case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: + code = 0 + break EXIT + case syscall.SIGHUP: + // reload configuration? + default: + break EXIT + } + } + + cleanFunc() + fmt.Println("server exited") + os.Exit(code) +} + +func (s Server) initialize() (func(), error) { + fns := Functions{} + ctx, cancel := context.WithCancel(context.Background()) + fns.Add(cancel) + + // parse config file + config.MustLoad(s.ConfigFile) + + // init i18n + i18n.Init() + + // init logger + loggerClean, err := logx.Init(config.C.Log) + if err != nil { + return fns.Ret(), err + } else { + fns.Add(loggerClean) + } + + // init database + if err = storage.InitDB(storage.DBConfig{ + Gorm: config.C.Gorm, + MySQL: config.C.MySQL, + Postgres: config.C.Postgres, + }); err != nil { + return fns.Ret(), err + } + + // init redis + redisClean, err := storage.InitRedis(config.C.Redis) + if err != nil { + return fns.Ret(), err + } else { + fns.Add(redisClean) + } + + // init prometheus remote writers + if err = writer.Init(config.C.Writers, config.C.WriterOpt); err != nil { + return fns.Ret(), err + } + + // init prometheus remote reader + if err = reader.Init(config.C.Reader); err != nil { + return fns.Ret(), err + } + + // sync rules/users/mutes/targets to memory cache + memsto.Sync() + + // start heartbeat + if err = naming.Heartbeat(ctx, config.C.Heartbeat); err != nil { + return fns.Ret(), err + } + + // start judge engine + if err = engine.Start(ctx); err != nil { + return fns.Ret(), err + } + + stat.Init() + + // init http server + r := router.New(s.Version) + httpClean := httpx.Init(config.C.HTTP, r) + fns.Add(httpClean) + + // register ident and nodata logic + idents.Handle(ctx) + + // release all the resources + return fns.Ret(), nil +} + +type Functions struct { + List []func() +} + +func (fs *Functions) Add(f func()) { + fs.List = append(fs.List, f) +} + +func (fs *Functions) Ret() func() { + return func() { + for i := 0; i < len(fs.List); i++ { + fs.List[i]() + } + } +} diff --git a/src/server/stat/stat.go b/src/server/stat/stat.go new file mode 100644 index 00000000..9c55f2e5 --- /dev/null +++ b/src/server/stat/stat.go @@ -0,0 +1,63 @@ +package stat + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + namespace = "n9e" + subsystem = "server" +) + +var ( + // 各个周期性任务的执行耗时 + GaugeCronDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "cron_duration", + Help: "Cron method use duration, unit: ms.", + }, []string{"cluster", "name"}) + + // 从数据库同步数据的时候,同步的条数 + GaugeSyncNumber = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "cron_sync_number", + Help: "Cron sync number.", + }, []string{"cluster", "name"}) + + // 从各个接收接口接收到的监控数据总量 + CounterSampleTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_received_total", + Help: "Total number samples received.", + }, []string{"cluster", "channel"}) + + // 产生的告警总量 + CounterAlertsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "alerts_total", + Help: "Total number alert events.", + }, []string{"cluster"}) + + // 内存中的告警事件队列的长度 + GaugeAlertQueueSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "alert_queue_size", + Help: "The size of alert queue.", + }, []string{"cluster"}) +) + +func Init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister( + GaugeCronDuration, + GaugeSyncNumber, + CounterSampleTotal, + CounterAlertsTotal, + GaugeAlertQueueSize, + ) +} diff --git a/src/server/writer/writer.go b/src/server/writer/writer.go new file mode 100644 index 00000000..89ce8a76 --- /dev/null +++ b/src/server/writer/writer.go @@ -0,0 +1,190 @@ +package writer + +import ( + "bytes" + "context" + "net" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/client_golang/api" + "github.com/prometheus/prometheus/prompb" + "github.com/toolkits/pkg/container/list" + "github.com/toolkits/pkg/logger" +) + +type Options struct { + Name string + Url string + BasicAuthUser string + BasicAuthPass string + + Timeout int64 + DialTimeout int64 + TLSHandshakeTimeout int64 + ExpectContinueTimeout int64 + IdleConnTimeout int64 + KeepAlive int64 + + MaxConnsPerHost int + MaxIdleConns int + MaxIdleConnsPerHost int +} + +type GlobalOpt struct { + QueueMaxSize int + QueuePopSize int + SleepInterval int64 +} + +type WriterType struct { + Opts Options + Client api.Client +} + +func (w WriterType) Write(items []*prompb.TimeSeries) { + req := &prompb.WriteRequest{ + Timeseries: items, + } + + data, err := proto.Marshal(req) + if err != nil { + logger.Warningf("marshal prom data to proto got error: %v, data: %+v", err, items) + return + } + + if err := w.Post(snappy.Encode(nil, data)); err != nil { + logger.Warningf("post to %s got error: %v", w.Opts.Url, err) + } +} + +func (w WriterType) Post(req []byte) error { + httpReq, err := http.NewRequest("POST", w.Opts.Url, bytes.NewReader(req)) + if err != nil { + logger.Warningf("create remote write request got error: %s", err.Error()) + return err + } + + httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Set("Content-Type", "application/x-protobuf") + httpReq.Header.Set("User-Agent", "n9e") + httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + + if w.Opts.BasicAuthUser != "" { + httpReq.SetBasicAuth(w.Opts.BasicAuthUser, w.Opts.BasicAuthPass) + } + + resp, body, err := w.Client.Do(context.Background(), httpReq) + if err != nil { + logger.Warningf("push data with remote write request got error: %v, response body: %s", err, string(body)) + return err + } + + if resp.StatusCode >= 400 { + logger.Warningf("push data with remote write request got status code: %v, response body: %s", resp.StatusCode, string(body)) + return err + } + + return nil +} + +type WritersType struct { + globalOpt GlobalOpt + m map[string]WriterType + queue *list.SafeListLimited +} + +func (ws *WritersType) Put(name string, writer WriterType) { + ws.m[name] = writer +} + +func (ws *WritersType) PushQueue(vs []interface{}) bool { + return ws.queue.PushFrontBatch(vs) +} + +func (ws *WritersType) Writes() { + batch := ws.globalOpt.QueuePopSize + if batch <= 0 { + batch = 2000 + } + + duration := time.Duration(ws.globalOpt.SleepInterval) * time.Millisecond + + for { + items := ws.queue.PopBackBy(batch) + count := len(items) + if count == 0 { + time.Sleep(duration) + continue + } + + series := make([]*prompb.TimeSeries, 0, count) + for i := 0; i < count; i++ { + item, ok := items[i].(*prompb.TimeSeries) + if !ok { + // in theory, it can be converted successfully + continue + } + series = append(series, item) + } + + if len(series) == 0 { + continue + } + + for key := range ws.m { + go ws.m[key].Write(series) + } + } +} + +func NewWriters() WritersType { + return WritersType{ + m: make(map[string]WriterType), + } +} + +var Writers = NewWriters() + +func Init(opts []Options, globalOpt GlobalOpt) error { + Writers.globalOpt = globalOpt + Writers.queue = list.NewSafeListLimited(globalOpt.QueueMaxSize) + + for i := 0; i < len(opts); i++ { + cli, err := api.NewClient(api.Config{ + Address: opts[i].Url, + RoundTripper: &http.Transport{ + // TLSClientConfig: tlsConfig, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: time.Duration(opts[i].DialTimeout) * time.Millisecond, + KeepAlive: time.Duration(opts[i].KeepAlive) * time.Millisecond, + }).DialContext, + ResponseHeaderTimeout: time.Duration(opts[i].Timeout) * time.Millisecond, + TLSHandshakeTimeout: time.Duration(opts[i].TLSHandshakeTimeout) * time.Millisecond, + ExpectContinueTimeout: time.Duration(opts[i].ExpectContinueTimeout) * time.Millisecond, + MaxConnsPerHost: opts[i].MaxConnsPerHost, + MaxIdleConns: opts[i].MaxIdleConns, + MaxIdleConnsPerHost: opts[i].MaxIdleConnsPerHost, + IdleConnTimeout: time.Duration(opts[i].IdleConnTimeout) * time.Millisecond, + }, + }) + + if err != nil { + return err + } + + writer := WriterType{ + Opts: opts[i], + Client: cli, + } + + Writers.Put(opts[i].Name, writer) + } + + go Writers.Writes() + + return nil +} diff --git a/src/storage/storage.go b/src/storage/storage.go new file mode 100644 index 00000000..85595a2e --- /dev/null +++ b/src/storage/storage.go @@ -0,0 +1,120 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/go-redis/redis/v8" + "gorm.io/gorm" + + "github.com/didi/nightingale/v5/src/pkg/ormx" +) + +type RedisConfig struct { + Address string + Password string + DB int +} + +type DBConfig struct { + Gorm Gorm + MySQL MySQL + Postgres Postgres +} + +type Gorm struct { + Debug bool + DBType string + MaxLifetime int + MaxOpenConns int + MaxIdleConns int + TablePrefix string + EnableAutoMigrate bool +} + +type MySQL struct { + Address string + User string + Password string + DBName string + Parameters string +} + +func (a MySQL) DSN() string { + return fmt.Sprintf("%s:%s@tcp(%s)/%s?%s", + a.User, a.Password, a.Address, a.DBName, a.Parameters) +} + +type Postgres struct { + Address string + User string + Password string + DBName string + SSLMode string +} + +func (a Postgres) DSN() string { + arr := strings.Split(a.Address, ":") + if len(arr) != 2 { + panic("pg address(" + a.Address + ") invalid") + } + + return fmt.Sprintf("host=%s port=%s user=%s dbname=%s password=%s sslmode=%s", + arr[0], arr[1], a.User, a.DBName, a.Password, a.SSLMode) +} + +var DB *gorm.DB + +func InitDB(cfg DBConfig) error { + db, err := newGormDB(cfg) + if err == nil { + DB = db + } + return err +} + +func newGormDB(cfg DBConfig) (*gorm.DB, error) { + var dsn string + switch cfg.Gorm.DBType { + case "mysql": + dsn = cfg.MySQL.DSN() + case "postgres": + dsn = cfg.Postgres.DSN() + default: + return nil, errors.New("unknown DBType") + } + + return ormx.New(ormx.Config{ + Debug: cfg.Gorm.Debug, + DBType: cfg.Gorm.DBType, + DSN: dsn, + MaxIdleConns: cfg.Gorm.MaxIdleConns, + MaxLifetime: cfg.Gorm.MaxLifetime, + MaxOpenConns: cfg.Gorm.MaxOpenConns, + TablePrefix: cfg.Gorm.TablePrefix, + }) +} + +var Redis *redis.Client + +func InitRedis(cfg RedisConfig) (func(), error) { + Redis = redis.NewClient(&redis.Options{ + Addr: cfg.Address, + Password: cfg.Password, + DB: cfg.DB, + }) + + err := Redis.Ping(context.Background()).Err() + if err != nil { + fmt.Println("ping redis failed:", err) + os.Exit(1) + } + + return func() { + fmt.Println("redis exiting") + Redis.Close() + }, nil +} diff --git a/src/webapi/config/config.go b/src/webapi/config/config.go new file mode 100644 index 00000000..8d4b2622 --- /dev/null +++ b/src/webapi/config/config.go @@ -0,0 +1,116 @@ +package config + +import ( + "fmt" + "os" + "strings" + "sync" + + "github.com/gin-gonic/gin" + "github.com/koding/multiconfig" + + "github.com/didi/nightingale/v5/src/pkg/httpx" + "github.com/didi/nightingale/v5/src/pkg/ldapx" + "github.com/didi/nightingale/v5/src/pkg/logx" + "github.com/didi/nightingale/v5/src/storage" + "github.com/didi/nightingale/v5/src/webapi/prom" +) + +var ( + C = new(Config) + once sync.Once +) + +func MustLoad(fpaths ...string) { + once.Do(func() { + loaders := []multiconfig.Loader{ + &multiconfig.TagLoader{}, + &multiconfig.EnvironmentLoader{}, + } + + for _, fpath := range fpaths { + handled := false + + if strings.HasSuffix(fpath, "toml") { + loaders = append(loaders, &multiconfig.TOMLLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "conf") { + loaders = append(loaders, &multiconfig.TOMLLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "json") { + loaders = append(loaders, &multiconfig.JSONLoader{Path: fpath}) + handled = true + } + if strings.HasSuffix(fpath, "yaml") { + loaders = append(loaders, &multiconfig.YAMLLoader{Path: fpath}) + handled = true + } + + if !handled { + fmt.Println("config file invalid, valid file exts: .conf,.yaml,.toml,.json") + os.Exit(1) + } + } + + m := multiconfig.DefaultLoader{ + Loader: multiconfig.MultiLoader(loaders...), + Validator: multiconfig.MultiValidator(&multiconfig.RequiredValidator{}), + } + + m.MustLoad(C) + + if !strings.HasPrefix(C.Ibex.Address, "http") { + C.Ibex.Address = "http://" + C.Ibex.Address + } + + err := loadMetricsYaml() + if err != nil { + fmt.Println("failed to load metrics.yaml:", err) + os.Exit(1) + } + }) +} + +type Config struct { + RunMode string + I18N string + AdminRole string + ContactKeys []ContactKey + NotifyChannels []string + Log logx.Config + HTTP httpx.Config + JWTAuth JWTAuth + BasicAuth gin.Accounts + LDAP ldapx.LdapSection + Redis storage.RedisConfig + Gorm storage.Gorm + MySQL storage.MySQL + Postgres storage.Postgres + Clusters []prom.Options + Ibex Ibex +} + +type ContactKey struct { + Label string `json:"label"` + Key string `json:"key"` +} + +type JWTAuth struct { + SigningKey string + AccessExpired int64 + RefreshExpired int64 + RedisKeyPrefix string +} + +type Ibex struct { + Address string + BasicAuthUser string + BasicAuthPass string + Timeout int64 +} + +func (c *Config) IsDebugMode() bool { + return c.RunMode == "debug" +} diff --git a/src/webapi/config/i18n.go b/src/webapi/config/i18n.go new file mode 100644 index 00000000..7f49d303 --- /dev/null +++ b/src/webapi/config/i18n.go @@ -0,0 +1,46 @@ +package config + +var ( + dict = map[string]string{ + "just a test": "这只是一个测试", + "just a test: %s": "这只是一个测试: %s", + "InternalServerError": "系统内部错误,请联系管理员", + "Username or password invalid": "登录失败,请检查用户名和密码", + "Username is blank": "用户名不能为空", + "Username has invalid characters": "用户名含有非法字符", + "Nickname has invalid characters": "昵称含有非法字符", + "Phone invalid": "手机号格式非法", + "Email invalid": "邮箱格式非法", + "Incorrect old password": "旧密码错误", + "Username already exists": "用户名已存在", + "No such user": "用户不存在", + "Note has invalid characters": "备注含有非法字符", + "UserGroup already exists": "用户组已存在,不能重复创建", + "No such UserGroup": "用户组不存在", + "No such BusiGroup": "业务组不存在", + "BusiGroup already exists": "业务分组已存在,不能重复创建", + "Some UserGroup id not exists": "有些用户组ID不存在", + "Some alert mutes still in the BusiGroup": "业务组下仍然存在告警屏蔽配置,不能删除", + "Some dashboards still in the BusiGroup": "业务组下仍然存在监控大盘配置,不能删除", + "Some collect rules still in the BusiGroup": "业务组下仍然存在采集规则配置,不能删除", + "Some alert rules still in the BusiGroup": "业务组下仍然存在告警规则配置,不能删除", + "Some alert subscribes still in the BusiGroup": "业务组下仍然存在订阅规则配置,不能删除", + "Some targets still in the BusiGroup": "业务组下仍然存在监控对象,不能删除", + "Some recovery scripts still in the BusiGroup": "业务组下仍然存在自愈脚本,不能删除", + "Name is blank": "名称不能为空", + "Name has invalid characters": "名称含有非法字符", + "Dashboard already exists": "监控大盘已存在", + "No such dashboard": "监控大盘不存在", + "AlertRule already exists": "告警规则已存在,不能重复创建", + "No such AlertRule": "告警规则不存在", + "CollectRule already exists": "采集规则已存在,不能重复创建", + "No such metric description": "该指标释义不存在,可能已被删除", + "No such TargetQuery": "查询条件不存在,可能已被删除", + "No permission. Only admins can assign BG": "没有权限!只有管理员才能分配业务组", + "No permission to operate the targets: %s": "没有权限操作这些监控对象:%s", + "No permission. You are not admin of BG(%s)": "没有权限操作,您并非业务组(%s)的管理员", + } + langDict = map[string]map[string]string{ + "zh": dict, + } +) diff --git a/src/webapi/config/init.go b/src/webapi/config/init.go new file mode 100644 index 00000000..74793cf9 --- /dev/null +++ b/src/webapi/config/init.go @@ -0,0 +1,7 @@ +package config + +import "github.com/toolkits/pkg/i18n" + +func init() { + i18n.DictRegister(langDict) +} diff --git a/src/webapi/config/metrics.go b/src/webapi/config/metrics.go new file mode 100644 index 00000000..97a4c1b0 --- /dev/null +++ b/src/webapi/config/metrics.go @@ -0,0 +1,30 @@ +package config + +import ( + "path" + + cmap "github.com/orcaman/concurrent-map" + "github.com/toolkits/pkg/file" + "github.com/toolkits/pkg/runner" +) + +var Metrics = cmap.New() + +func loadMetricsYaml() error { + fp := path.Join(runner.Cwd, "etc", "metrics.yaml") + if !file.IsExist(fp) { + return nil + } + + nmap := make(map[string]string) + err := file.ReadYaml(fp, &nmap) + if err != nil { + return err + } + + for key, val := range nmap { + Metrics.Set(key, val) + } + + return nil +} diff --git a/src/webapi/prom/prom.go b/src/webapi/prom/prom.go new file mode 100644 index 00000000..23b8c39d --- /dev/null +++ b/src/webapi/prom/prom.go @@ -0,0 +1,78 @@ +package prom + +import ( + "net" + "net/http" + "time" +) + +type Options struct { + Name string + Prom string + + BasicAuthUser string + BasicAuthPass string + + Timeout int64 + DialTimeout int64 + TLSHandshakeTimeout int64 + ExpectContinueTimeout int64 + IdleConnTimeout int64 + KeepAlive int64 + + MaxConnsPerHost int + MaxIdleConns int + MaxIdleConnsPerHost int +} + +type ClusterType struct { + Opts Options + Transport *http.Transport +} + +type ClustersType struct { + M map[string]ClusterType +} + +func NewClusters() ClustersType { + return ClustersType{ + M: make(map[string]ClusterType), + } +} + +func (cs *ClustersType) Put(name string, cluster ClusterType) { + cs.M[name] = cluster +} + +func (cs *ClustersType) Get(name string) (ClusterType, bool) { + c, has := cs.M[name] + return c, has +} + +var Clusters = NewClusters() + +func Init(opts []Options) error { + for i := 0; i < len(opts); i++ { + cluster := ClusterType{ + Opts: opts[i], + Transport: &http.Transport{ + // TLSClientConfig: tlsConfig, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: time.Duration(opts[i].DialTimeout) * time.Millisecond, + KeepAlive: time.Duration(opts[i].KeepAlive) * time.Millisecond, + }).DialContext, + ResponseHeaderTimeout: time.Duration(opts[i].Timeout) * time.Millisecond, + TLSHandshakeTimeout: time.Duration(opts[i].TLSHandshakeTimeout) * time.Millisecond, + ExpectContinueTimeout: time.Duration(opts[i].ExpectContinueTimeout) * time.Millisecond, + MaxConnsPerHost: opts[i].MaxConnsPerHost, + MaxIdleConns: opts[i].MaxIdleConns, + MaxIdleConnsPerHost: opts[i].MaxIdleConnsPerHost, + IdleConnTimeout: time.Duration(opts[i].IdleConnTimeout) * time.Millisecond, + }, + } + Clusters.Put(opts[i].Name, cluster) + } + + return nil +} diff --git a/src/webapi/router/router.go b/src/webapi/router/router.go new file mode 100644 index 00000000..5e8ca715 --- /dev/null +++ b/src/webapi/router/router.go @@ -0,0 +1,223 @@ +package router + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/gin-contrib/pprof" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/pkg/aop" + "github.com/didi/nightingale/v5/src/webapi/config" + promstat "github.com/didi/nightingale/v5/src/webapi/stat" +) + +var InternalServerError = "InternalServerError" + +func stat() gin.HandlerFunc { + return func(c *gin.Context) { + start := time.Now() + c.Next() + + code := fmt.Sprintf("%d", c.Writer.Status()) + method := c.Request.Method + labels := []string{promstat.Service, code, c.FullPath(), method} + + promstat.RequestCounter.WithLabelValues(labels...).Inc() + promstat.RequestDuration.WithLabelValues(labels...).Observe(float64(time.Since(start).Seconds())) + } +} + +func New(version string) *gin.Engine { + gin.SetMode(config.C.RunMode) + + if strings.ToLower(config.C.RunMode) == "release" { + aop.DisableConsoleColor() + } + + r := gin.New() + + r.Use(stat()) + r.Use(aop.Recovery()) + + // whether print access log + if config.C.HTTP.PrintAccessLog { + r.Use(aop.Logger()) + } + + configRoute(r, version) + configNoRoute(r) + + return r +} + +func configNoRoute(r *gin.Engine) { + r.NoRoute(func(c *gin.Context) { + arr := strings.Split(c.Request.URL.Path, ".") + suffix := arr[len(arr)-1] + switch suffix { + case "png", "jpeg", "jpg", "svg", "ico", "gif", "css", "js", "html", "htm", "gz", "zip", "map": + c.File(path.Join(strings.Split("pub/"+c.Request.URL.Path, "/")...)) + default: + c.File(path.Join("pub", "index.html")) + } + }) +} + +func configRoute(r *gin.Engine, version string) { + if config.C.HTTP.PProf { + pprof.Register(r, "/api/debug/pprof") + } + + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + r.GET("/pid", func(c *gin.Context) { + c.String(200, fmt.Sprintf("%d", os.Getpid())) + }) + + r.GET("/addr", func(c *gin.Context) { + c.String(200, c.Request.RemoteAddr) + }) + + r.GET("/version", func(c *gin.Context) { + c.String(200, version) + }) + + r.GET("/i18n", func(c *gin.Context) { + ginx.NewRender(c).Message("just a test: %s", "by ulric") + }) + + r.GET("/metrics", gin.WrapH(promhttp.Handler())) + + pagesPrefix := "/api/n9e" + + pages := r.Group(pagesPrefix) + { + pages.Any("/prometheus/*url", jwtAuth(), prometheusProxy) + + pages.POST("/auth/login", loginPost) + pages.POST("/auth/logout", logoutPost) + pages.POST("/auth/refresh", refreshPost) + + pages.GET("/metrics/desc", metricsDescGetFile) + pages.POST("/metrics/desc", metricsDescGetMap) + + pages.GET("/roles", rolesGets) + pages.GET("/notify-channels", notifyChannelsGets) + pages.GET("/contact-keys", contactKeysGets) + pages.GET("/clusters", clustersGets) + + pages.GET("/self/perms", jwtAuth(), user(), permsGets) + pages.GET("/self/profile", jwtAuth(), user(), selfProfileGet) + pages.PUT("/self/profile", jwtAuth(), user(), selfProfilePut) + pages.PUT("/self/password", jwtAuth(), user(), selfPasswordPut) + + pages.GET("/users", jwtAuth(), user(), userGets) + pages.POST("/users", jwtAuth(), admin(), userAddPost) + pages.GET("/user/:id/profile", jwtAuth(), userProfileGet) + pages.PUT("/user/:id/profile", jwtAuth(), admin(), userProfilePut) + pages.PUT("/user/:id/password", jwtAuth(), admin(), userPasswordPut) + pages.DELETE("/user/:id", jwtAuth(), admin(), userDel) + + pages.GET("/user-groups", jwtAuth(), user(), userGroupGets) + pages.POST("/user-groups", jwtAuth(), user(), userGroupAdd) + pages.GET("/user-group/:id", jwtAuth(), user(), userGroupGet) + pages.PUT("/user-group/:id", jwtAuth(), user(), userGroupWrite(), userGroupPut) + pages.DELETE("/user-group/:id", jwtAuth(), user(), userGroupWrite(), userGroupDel) + pages.POST("/user-group/:id/members", jwtAuth(), user(), userGroupWrite(), userGroupMemberAdd) + pages.DELETE("/user-group/:id/members", jwtAuth(), user(), userGroupWrite(), userGroupMemberDel) + + pages.POST("/busi-groups", jwtAuth(), user(), busiGroupAdd) + pages.GET("/busi-groups", jwtAuth(), user(), busiGroupGets) + pages.GET("/busi-groups/alertings", jwtAuth(), busiGroupAlertingsGets) + pages.GET("/busi-group/:id", jwtAuth(), user(), bgro(), busiGroupGet) + pages.PUT("/busi-group/:id", jwtAuth(), user(), bgrw(), busiGroupPut) + pages.POST("/busi-group/:id/members", jwtAuth(), user(), bgrw(), busiGroupMemberAdd) + pages.DELETE("/busi-group/:id/members", jwtAuth(), user(), bgrw(), busiGroupMemberDel) + pages.DELETE("/busi-group/:id", jwtAuth(), user(), bgrw(), busiGroupDel) + + pages.GET("/targets", jwtAuth(), user(), targetGets) + pages.DELETE("/targets", jwtAuth(), user(), targetDel) + pages.GET("/targets/tags", jwtAuth(), user(), targetGetTags) + pages.POST("/targets/tags", jwtAuth(), user(), targetBindTags) + pages.DELETE("/targets/tags", jwtAuth(), user(), targetUnbindTags) + pages.PUT("/targets/note", jwtAuth(), user(), targetUpdateNote) + pages.PUT("/targets/bgid", jwtAuth(), user(), targetUpdateBgid) + + pages.GET("/busi-group/:id/dashboards", jwtAuth(), user(), bgro(), dashboardGets) + pages.POST("/busi-group/:id/dashboards", jwtAuth(), user(), bgrw(), dashboardAdd) + pages.POST("/busi-group/:id/dashboards/export", jwtAuth(), user(), bgro(), dashboardExport) + pages.POST("/busi-group/:id/dashboards/import", jwtAuth(), user(), bgrw(), dashboardImport) + pages.POST("/busi-group/:id/dashboard/:did/clone", jwtAuth(), user(), bgrw(), dashboardClone) + pages.GET("/busi-group/:id/dashboard/:did", jwtAuth(), user(), bgro(), dashboardGet) + pages.PUT("/busi-group/:id/dashboard/:did", jwtAuth(), user(), bgrw(), dashboardPut) + pages.DELETE("/busi-group/:id/dashboard/:did", jwtAuth(), user(), bgrw(), dashboardDel) + + pages.GET("/busi-group/:id/chart-groups", jwtAuth(), user(), bgro(), chartGroupGets) + pages.POST("/busi-group/:id/chart-groups", jwtAuth(), user(), bgrw(), chartGroupAdd) + pages.PUT("/busi-group/:id/chart-groups", jwtAuth(), user(), bgrw(), chartGroupPut) + pages.DELETE("/busi-group/:id/chart-groups", jwtAuth(), user(), bgrw(), chartGroupDel) + + pages.GET("/busi-group/:id/charts", jwtAuth(), user(), bgro(), chartGets) + pages.POST("/busi-group/:id/charts", jwtAuth(), user(), bgrw(), chartAdd) + pages.PUT("/busi-group/:id/charts", jwtAuth(), user(), bgrw(), chartPut) + pages.DELETE("/busi-group/:id/charts", jwtAuth(), user(), bgrw(), chartDel) + + pages.GET("/share-charts", chartShareGets) + pages.POST("/share-charts", jwtAuth(), chartShareAdd) + + pages.GET("/busi-group/:id/alert-rules", jwtAuth(), user(), alertRuleGets) + pages.POST("/busi-group/:id/alert-rules", jwtAuth(), user(), bgrw(), alertRuleAdd) + pages.DELETE("/busi-group/:id/alert-rules", jwtAuth(), user(), bgrw(), alertRuleDel) + pages.PUT("/busi-group/:id/alert-rules/fields", jwtAuth(), user(), bgrw(), alertRulePutFields) + pages.PUT("/busi-group/:id/alert-rule/:arid", jwtAuth(), user(), bgrw(), alertRulePut) + pages.GET("/alert-rule/:arid", jwtAuth(), user(), alertRuleGet) + + pages.GET("/busi-group/:id/alert-mutes", jwtAuth(), user(), bgro(), alertMuteGets) + pages.POST("/busi-group/:id/alert-mutes", jwtAuth(), user(), bgrw(), alertMuteAdd) + pages.DELETE("/busi-group/:id/alert-mutes", jwtAuth(), user(), bgrw(), alertMuteDel) + + pages.GET("/busi-group/:id/alert-subscribes", jwtAuth(), user(), bgro(), alertSubscribeGets) + pages.POST("/busi-group/:id/alert-subscribes", jwtAuth(), user(), bgrw(), alertSubscribeAdd) + pages.PUT("/busi-group/:id/alert-subscribes", jwtAuth(), user(), bgrw(), alertSubscribePut) + pages.DELETE("/busi-group/:id/alert-subscribes", jwtAuth(), user(), bgrw(), alertSubscribeDel) + + // pages.GET("/busi-group/:id/collect-rules", jwtAuth(), user(), bgro(), collectRuleGets) + // pages.POST("/busi-group/:id/collect-rules", jwtAuth(), user(), bgrw(), collectRuleAdd) + // pages.DELETE("/busi-group/:id/collect-rules", jwtAuth(), user(), bgrw(), collectRuleDel) + // pages.GET("/busi-group/:id/collect-rule/:crid", jwtAuth(), user(), bgro(), collectRuleGet) + // pages.PUT("/busi-group/:id/collect-rule/:crid", jwtAuth(), user(), bgrw(), collectRulePut) + + pages.GET("/busi-group/:id/alert-cur-events", jwtAuth(), user(), bgro(), alertCurEventGets) + pages.DELETE("/busi-group/:id/alert-cur-events", jwtAuth(), user(), bgrw(), alertCurEventDel) + pages.GET("/busi-group/:id/alert-cur-event/:eid", jwtAuth(), user(), bgro(), alertCurEventGet) + + pages.GET("/busi-group/:id/alert-his-events", jwtAuth(), user(), bgro(), alertHisEventGets) + pages.GET("/busi-group/:id/alert-his-event/:eid", jwtAuth(), user(), bgro(), alertHisEventGet) + + pages.GET("/busi-group/:id/task-tpls", jwtAuth(), user(), bgro(), taskTplGets) + pages.POST("/busi-group/:id/task-tpls", jwtAuth(), user(), bgrw(), taskTplAdd) + pages.DELETE("/busi-group/:id/task-tpl/:tid", jwtAuth(), user(), bgrw(), taskTplDel) + pages.POST("/busi-group/:id/task-tpls/tags", jwtAuth(), user(), bgrw(), taskTplBindTags) + pages.DELETE("/busi-group/:id/task-tpls/tags", jwtAuth(), user(), bgrw(), taskTplUnbindTags) + pages.GET("/busi-group/:id/task-tpl/:tid", jwtAuth(), user(), bgro(), taskTplGet) + pages.PUT("/busi-group/:id/task-tpl/:tid", jwtAuth(), user(), bgrw(), taskTplPut) + + pages.GET("/busi-group/:id/tasks", jwtAuth(), user(), bgro(), taskGets) + pages.POST("/busi-group/:id/tasks", jwtAuth(), user(), bgrw(), taskAdd) + pages.GET("/busi-group/:id/task/*url", jwtAuth(), user(), bgro(), taskProxy) + pages.PUT("/busi-group/:id/task/*url", jwtAuth(), user(), bgrw(), taskProxy) + } + + service := r.Group("/v1/n9e", gin.BasicAuth(config.C.BasicAuth)) + { + service.Any("/prometheus/*url", prometheusProxy) + } +} diff --git a/src/webapi/router/router_alert_cur_event.go b/src/webapi/router/router_alert_cur_event.go new file mode 100644 index 00000000..5c8d9d20 --- /dev/null +++ b/src/webapi/router/router_alert_cur_event.go @@ -0,0 +1,67 @@ +package router + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func alertCurEventGets(c *gin.Context) { + stime := ginx.QueryInt64(c, "stime", 0) + etime := ginx.QueryInt64(c, "etime", 0) + hours := ginx.QueryInt64(c, "hours", 0) + now := time.Now().Unix() + if hours != 0 { + stime = now - 3600*hours + etime = now + 3600*24 + } + + if stime != 0 && etime == 0 { + etime = now + 3600*24 + } + + severity := ginx.QueryInt(c, "severity", -1) + query := ginx.QueryStr(c, "query", "") + limit := ginx.QueryInt(c, "limit", 20) + busiGroupId := ginx.UrlParamInt64(c, "id") + clusters := queryClusters(c) + + total, err := models.AlertCurEventTotal(busiGroupId, stime, etime, severity, clusters, query) + ginx.Dangerous(err) + + list, err := models.AlertCurEventGets(busiGroupId, stime, etime, severity, clusters, query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + cache := make(map[int64]*models.UserGroup) + for i := 0; i < len(list); i++ { + list[i].FillNotifyGroups(cache) + } + + ginx.NewRender(c).Data(gin.H{ + "list": list, + "total": total, + }, nil) +} + +func alertCurEventDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + ginx.NewRender(c).Message(models.AlertCurEventDel(f.Ids)) +} + +func alertCurEventGet(c *gin.Context) { + eid := ginx.UrlParamInt64(c, "eid") + event, err := models.AlertCurEventGetById(eid) + ginx.Dangerous(err) + + if event == nil { + ginx.Bomb(404, "No such active event") + } + + ginx.NewRender(c).Data(event, nil) +} diff --git a/src/webapi/router/router_alert_his_event.go b/src/webapi/router/router_alert_his_event.go new file mode 100644 index 00000000..2758ec7a --- /dev/null +++ b/src/webapi/router/router_alert_his_event.go @@ -0,0 +1,60 @@ +package router + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func alertHisEventGets(c *gin.Context) { + stime := ginx.QueryInt64(c, "stime", 0) + etime := ginx.QueryInt64(c, "etime", 0) + hours := ginx.QueryInt64(c, "hours", 0) + now := time.Now().Unix() + if hours != 0 { + stime = now - 3600*hours + etime = now + 3600*24 + } + + if stime != 0 && etime == 0 { + etime = now + 3600*24 + } + + severity := ginx.QueryInt(c, "severity", -1) + recovered := ginx.QueryInt(c, "is_recovered", -1) + query := ginx.QueryStr(c, "query", "") + limit := ginx.QueryInt(c, "limit", 20) + busiGroupId := ginx.UrlParamInt64(c, "id") + clusters := queryClusters(c) + + total, err := models.AlertHisEventTotal(busiGroupId, stime, etime, severity, recovered, clusters, query) + ginx.Dangerous(err) + + list, err := models.AlertHisEventGets(busiGroupId, stime, etime, severity, recovered, clusters, query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + cache := make(map[int64]*models.UserGroup) + for i := 0; i < len(list); i++ { + list[i].FillNotifyGroups(cache) + } + + ginx.NewRender(c).Data(gin.H{ + "list": list, + "total": total, + }, nil) +} + +func alertHisEventGet(c *gin.Context) { + eid := ginx.UrlParamInt64(c, "eid") + event, err := models.AlertHisEventGetById(eid) + ginx.Dangerous(err) + + if event == nil { + ginx.Bomb(404, "No such alert event") + } + + ginx.NewRender(c).Data(event, err) +} diff --git a/src/webapi/router/router_alert_rule.go b/src/webapi/router/router_alert_rule.go new file mode 100644 index 00000000..6d6556d6 --- /dev/null +++ b/src/webapi/router/router_alert_rule.go @@ -0,0 +1,128 @@ +package router + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +// Return all, front-end search and paging +func alertRuleGets(c *gin.Context) { + busiGroupId := ginx.UrlParamInt64(c, "id") + ars, err := models.AlertRuleGets(busiGroupId) + if err == nil { + cache := make(map[int64]*models.UserGroup) + for i := 0; i < len(ars); i++ { + ars[i].FillNotifyGroups(cache) + } + } + ginx.NewRender(c).Data(ars, err) +} + +// single or import +func alertRuleAdd(c *gin.Context) { + var lst []models.AlertRule + ginx.BindJSON(c, &lst) + + count := len(lst) + if count == 0 { + ginx.Bomb(http.StatusBadRequest, "input json is empty") + } + + username := c.MustGet("username").(string) + bgid := ginx.UrlParamInt64(c, "id") + + // alert rule name -> error string + reterr := make(map[string]string) + for i := 0; i < count; i++ { + lst[i].Id = 0 + lst[i].GroupId = bgid + lst[i].CreateBy = username + lst[i].UpdateBy = username + lst[i].FE2DB() + + if err := lst[i].Add(); err != nil { + reterr[lst[i].Name] = err.Error() + } else { + reterr[lst[i].Name] = "" + } + } + + ginx.NewRender(c).Data(reterr, nil) +} + +func alertRuleDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + // param(busiGroupId) for protect + ginx.NewRender(c).Message(models.AlertRuleDels(f.Ids, ginx.UrlParamInt64(c, "id"))) +} + +func alertRulePut(c *gin.Context) { + var f models.AlertRule + ginx.BindJSON(c, &f) + + arid := ginx.UrlParamInt64(c, "arid") + ar, err := models.AlertRuleGetById(arid) + ginx.Dangerous(err) + + if ar == nil { + ginx.NewRender(c, http.StatusNotFound).Message("No such AlertRule") + return + } + + f.UpdateBy = c.MustGet("username").(string) + ginx.NewRender(c).Message(ar.Update(f)) +} + +type alertRuleFieldForm struct { + Ids []int64 `json:"ids"` + Fields map[string]interface{} `json:"fields"` +} + +// update one field: cluster note severity disabled prom_eval_interval prom_for_duration notify_channels notify_groups notify_recovered notify_repeat_step callbacks runbook_url append_tags +func alertRulePutFields(c *gin.Context) { + var f alertRuleFieldForm + ginx.BindJSON(c, &f) + + if len(f.Fields) == 0 { + ginx.Bomb(http.StatusBadRequest, "fields empty") + } + + f.Fields["update_by"] = c.MustGet("username").(string) + f.Fields["update_at"] = time.Now().Unix() + + for i := 0; i < len(f.Ids); i++ { + ar, err := models.AlertRuleGetById(f.Ids[i]) + ginx.Dangerous(err) + + if ar == nil { + continue + } + + ginx.Dangerous(ar.UpdateFieldsMap(f.Fields)) + } + + ginx.NewRender(c).Message(nil) +} + +func alertRuleGet(c *gin.Context) { + arid := ginx.UrlParamInt64(c, "arid") + + ar, err := models.AlertRuleGetById(arid) + ginx.Dangerous(err) + + if ar == nil { + ginx.NewRender(c, http.StatusNotFound).Message("No such AlertRule") + return + } + + err = ar.FillNotifyGroups(make(map[int64]*models.UserGroup)) + ginx.NewRender(c).Data(ar, err) +} diff --git a/src/webapi/router/router_alert_subscribe.go b/src/webapi/router/router_alert_subscribe.go new file mode 100644 index 00000000..a578ddcb --- /dev/null +++ b/src/webapi/router/router_alert_subscribe.go @@ -0,0 +1,78 @@ +package router + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +// Return all, front-end search and paging +func alertSubscribeGets(c *gin.Context) { + bgid := ginx.UrlParamInt64(c, "id") + lst, err := models.AlertSubscribeGets(bgid) + if err == nil { + ugcache := make(map[int64]*models.UserGroup) + for i := 0; i < len(lst); i++ { + lst[i].FillUserGroups(ugcache) + } + + rulecache := make(map[int64]string) + for i := 0; i < len(lst); i++ { + lst[i].FillRuleName(rulecache) + } + } + ginx.NewRender(c).Data(lst, err) +} + +func alertSubscribeAdd(c *gin.Context) { + var f models.AlertSubscribe + ginx.BindJSON(c, &f) + + if f.GroupId <= 0 { + ginx.Bomb(http.StatusBadRequest, "group_id invalid") + } + + username := c.MustGet("username").(string) + f.CreateBy = username + f.UpdateBy = username + f.GroupId = ginx.UrlParamInt64(c, "id") + + ginx.NewRender(c).Message(f.Add()) +} + +func alertSubscribePut(c *gin.Context) { + var fs []models.AlertSubscribe + ginx.BindJSON(c, &fs) + + timestamp := time.Now().Unix() + username := c.MustGet("username").(string) + for i := 0; i < len(fs); i++ { + fs[i].UpdateBy = username + fs[i].UpdateAt = timestamp + ginx.Dangerous(fs[i].Update( + "rule_id", + "tags", + "redefine_severity", + "new_severity", + "redefine_channels", + "new_channels", + "user_group_ids", + "update_at", + "update_by", + )) + } + + ginx.NewRender(c).Message(nil) +} + +func alertSubscribeDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + ginx.NewRender(c).Message(models.AlertSubscribeDel(f.Ids)) +} diff --git a/src/webapi/router/router_busi_group.go b/src/webapi/router/router_busi_group.go new file mode 100644 index 00000000..36fa19b6 --- /dev/null +++ b/src/webapi/router/router_busi_group.go @@ -0,0 +1,96 @@ +package router + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/logger" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" +) + +type busiGroupForm struct { + Name string `json:"name" binding:"required"` + Members []models.BusiGroupMember `json:"members"` +} + +func busiGroupAdd(c *gin.Context) { + var f busiGroupForm + ginx.BindJSON(c, &f) + + if len(f.Members) == 0 { + ginx.Bomb(http.StatusBadRequest, "members empty") + } + + username := c.MustGet("username").(string) + ginx.NewRender(c).Message(models.BusiGroupAdd(f.Name, f.Members, username)) +} + +func busiGroupPut(c *gin.Context) { + var f busiGroupForm + ginx.BindJSON(c, &f) + + username := c.MustGet("username").(string) + targetbg := c.MustGet("busi_group").(*models.BusiGroup) + ginx.NewRender(c).Message(targetbg.Update(f.Name, username)) +} + +func busiGroupMemberAdd(c *gin.Context) { + var members []models.BusiGroupMember + ginx.BindJSON(c, &members) + + username := c.MustGet("username").(string) + targetbg := c.MustGet("busi_group").(*models.BusiGroup) + + ginx.NewRender(c).Message(targetbg.AddMembers(members, username)) +} + +func busiGroupMemberDel(c *gin.Context) { + var members []models.BusiGroupMember + ginx.BindJSON(c, &members) + + username := c.MustGet("username").(string) + targetbg := c.MustGet("busi_group").(*models.BusiGroup) + + ginx.NewRender(c).Message(targetbg.DelMembers(members, username)) +} + +func busiGroupDel(c *gin.Context) { + username := c.MustGet("username").(string) + targetbg := c.MustGet("busi_group").(*models.BusiGroup) + + err := targetbg.Del() + if err != nil { + logger.Infof("busi_group_delete fail: operator=%s, group_name=%s error=%v", username, targetbg.Name, err) + } else { + logger.Infof("busi_group_delete succ: operator=%s, group_name=%s", username, targetbg.Name) + } + + ginx.NewRender(c).Message(err) +} + +// 我是超管、或者我是业务组成员 +func busiGroupGets(c *gin.Context) { + limit := ginx.QueryInt(c, "limit", defaultLimit) + query := ginx.QueryStr(c, "query", "") + + me := c.MustGet("user").(*models.User) + lst, err := me.BusiGroups(limit, query) + + ginx.NewRender(c).Data(lst, err) +} + +// 这个接口只有在活跃告警页面才调用,获取各个BG的活跃告警数量 +func busiGroupAlertingsGets(c *gin.Context) { + ids := ginx.QueryStr(c, "ids", "") + ret, err := models.AlertNumbers(str.IdsInt64(ids)) + ginx.NewRender(c).Data(ret, err) +} + +func busiGroupGet(c *gin.Context) { + bg := BusiGroup(ginx.UrlParamInt64(c, "id")) + ginx.Dangerous(bg.FillUserGroups()) + ginx.NewRender(c).Data(bg, nil) +} diff --git a/src/webapi/router/router_chart.go b/src/webapi/router/router_chart.go new file mode 100644 index 00000000..cb8573b0 --- /dev/null +++ b/src/webapi/router/router_chart.go @@ -0,0 +1,46 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func chartGets(c *gin.Context) { + lst, err := models.ChartsOf(ginx.QueryInt64(c, "cgid")) + ginx.NewRender(c).Data(lst, err) +} + +func chartAdd(c *gin.Context) { + var chart models.Chart + ginx.BindJSON(c, &chart) + + // group_id / configs / weight + chart.Id = 0 + err := chart.Add() + ginx.NewRender(c).Data(chart, err) +} + +func chartPut(c *gin.Context) { + var arr []models.Chart + ginx.BindJSON(c, &arr) + + for i := 0; i < len(arr); i++ { + ginx.Dangerous(arr[i].Update("configs", "weight", "group_id")) + } + + ginx.NewRender(c).Message(nil) +} + +func chartDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + + for i := 0; i < len(f.Ids); i++ { + cg := models.Chart{Id: f.Ids[i]} + ginx.Dangerous(cg.Del()) + } + + ginx.NewRender(c).Message(nil) +} diff --git a/src/webapi/router/router_chart_group.go b/src/webapi/router/router_chart_group.go new file mode 100644 index 00000000..522f6997 --- /dev/null +++ b/src/webapi/router/router_chart_group.go @@ -0,0 +1,46 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func chartGroupGets(c *gin.Context) { + objs, err := models.ChartGroupsOf(ginx.QueryInt64(c, "did")) + ginx.NewRender(c).Data(objs, err) +} + +func chartGroupAdd(c *gin.Context) { + var cg models.ChartGroup + ginx.BindJSON(c, &cg) + + // dashboard_id / name / weight + cg.Id = 0 + err := cg.Add() + ginx.NewRender(c).Data(cg, err) +} + +func chartGroupPut(c *gin.Context) { + var arr []models.ChartGroup + ginx.BindJSON(c, &arr) + + for i := 0; i < len(arr); i++ { + ginx.Dangerous(arr[i].Update("name", "weight", "dashboard_id")) + } + + ginx.NewRender(c).Message(nil) +} + +func chartGroupDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + + for i := 0; i < len(f.Ids); i++ { + cg := models.ChartGroup{Id: f.Ids[i]} + ginx.Dangerous(cg.Del()) + } + + ginx.NewRender(c).Message(nil) +} diff --git a/src/webapi/router/router_chart_share.go b/src/webapi/router/router_chart_share.go new file mode 100644 index 00000000..f874677a --- /dev/null +++ b/src/webapi/router/router_chart_share.go @@ -0,0 +1,45 @@ +package router + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" +) + +func chartShareGets(c *gin.Context) { + ids := ginx.QueryStr(c, "ids", "") + lst, err := models.ChartShareGetsByIds(str.IdsInt64(ids, ",")) + ginx.NewRender(c).Data(lst, err) +} + +type chartShareForm struct { + Configs string `json:"configs"` +} + +func chartShareAdd(c *gin.Context) { + username := c.MustGet("username").(string) + cluster := MustGetCluster(c) + + var forms []chartShareForm + ginx.BindJSON(c, &forms) + + ids := []int64{} + now := time.Now().Unix() + + for _, f := range forms { + chart := models.ChartShare{ + Cluster: cluster, + Configs: f.Configs, + CreateBy: username, + CreateAt: now, + } + ginx.Dangerous(chart.Add()) + ids = append(ids, chart.Id) + } + + ginx.NewRender(c).Data(ids, nil) +} diff --git a/src/webapi/router/router_collect_rule.go b/src/webapi/router/router_collect_rule.go new file mode 100644 index 00000000..7912612f --- /dev/null +++ b/src/webapi/router/router_collect_rule.go @@ -0,0 +1,79 @@ +package router + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func collectRuleGets(c *gin.Context) { + busiGroupId := ginx.UrlParamInt64(c, "id") + crs, err := models.CollectRuleGets(busiGroupId, ginx.QueryStr(c, "type", "")) + ginx.NewRender(c).Data(crs, err) +} + +func collectRuleAdd(c *gin.Context) { + var lst []models.CollectRule + ginx.BindJSON(c, &lst) + + count := len(lst) + if count == 0 { + ginx.Bomb(http.StatusBadRequest, "input json is empty") + } + + username := c.MustGet("username").(string) + bgid := ginx.UrlParamInt64(c, "id") + + // collect rule name -> error string + reterr := make(map[string]string) + for i := 0; i < count; i++ { + lst[i].Id = 0 + lst[i].GroupId = bgid + lst[i].CreateBy = username + lst[i].UpdateBy = username + lst[i].FE2DB() + + if err := lst[i].Add(); err != nil { + reterr[lst[i].Name] = err.Error() + } else { + reterr[lst[i].Name] = "" + } + } + + ginx.NewRender(c).Data(reterr, nil) +} + +func collectRuleDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + // param(busiGroupId) for protect + ginx.NewRender(c).Message(models.CollectRuleDels(f.Ids, ginx.UrlParamInt64(c, "id"))) +} + +func collectRuleGet(c *gin.Context) { + crid := ginx.UrlParamInt64(c, "crid") + cr, err := models.CollectRuleGetById(crid) + ginx.NewRender(c).Data(cr, err) +} + +func collectRulePut(c *gin.Context) { + var f models.CollectRule + ginx.BindJSON(c, &f) + + crid := ginx.UrlParamInt64(c, "crid") + cr, err := models.CollectRuleGetById(crid) + ginx.Dangerous(err) + + if cr == nil { + ginx.NewRender(c, http.StatusNotFound).Message("No such CollectRule") + return + } + + f.UpdateBy = c.MustGet("username").(string) + ginx.NewRender(c).Message(cr.Update(f)) +} diff --git a/src/webapi/router/router_config.go b/src/webapi/router/router_config.go new file mode 100644 index 00000000..f0cd45ae --- /dev/null +++ b/src/webapi/router/router_config.go @@ -0,0 +1,16 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/webapi/config" +) + +func notifyChannelsGets(c *gin.Context) { + ginx.NewRender(c).Data(config.C.NotifyChannels, nil) +} + +func contactKeysGets(c *gin.Context) { + ginx.NewRender(c).Data(config.C.ContactKeys, nil) +} diff --git a/src/webapi/router/router_dashboard.go b/src/webapi/router/router_dashboard.go new file mode 100644 index 00000000..77d73a0a --- /dev/null +++ b/src/webapi/router/router_dashboard.go @@ -0,0 +1,262 @@ +package router + +import ( + "net/http" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +// Return all, front-end search and paging +func dashboardGets(c *gin.Context) { + busiGroupId := ginx.UrlParamInt64(c, "id") + query := ginx.QueryStr(c, "query", "") + dashboards, err := models.DashboardGets(busiGroupId, query) + ginx.NewRender(c).Data(dashboards, err) +} + +type dashboardForm struct { + Name string `json:"name"` + Tags []string `json:"tags"` + Configs string `json:"configs"` + Pure bool `json:"pure"` // 更新的时候,如果pure=true,就不更新configs了 +} + +func dashboardAdd(c *gin.Context) { + var f dashboardForm + ginx.BindJSON(c, &f) + + me := c.MustGet("user").(*models.User) + + dash := &models.Dashboard{ + GroupId: ginx.UrlParamInt64(c, "id"), + Name: f.Name, + Tags: strings.Join(f.Tags, " "), + Configs: f.Configs, + CreateBy: me.Username, + UpdateBy: me.Username, + } + + err := dash.Add() + if err == nil { + models.NewDefaultChartGroup(dash.Id) + } + + ginx.NewRender(c).Message(err) +} + +func dashboardGet(c *gin.Context) { + dash := Dashboard(ginx.UrlParamInt64(c, "did")) + ginx.NewRender(c).Data(dash, nil) +} + +func dashboardPut(c *gin.Context) { + var f dashboardForm + ginx.BindJSON(c, &f) + + me := c.MustGet("user").(*models.User) + dash := Dashboard(ginx.UrlParamInt64(c, "did")) + + if dash.Name != f.Name { + exists, err := models.DashboardExists("name = ? and id <> ?", f.Name, dash.Id) + ginx.Dangerous(err) + + if exists { + ginx.Bomb(200, "Dashboard already exists") + } + } + + dash.Name = f.Name + dash.Tags = strings.Join(f.Tags, " ") + dash.TagsLst = f.Tags + dash.UpdateBy = me.Username + dash.UpdateAt = time.Now().Unix() + + var err error + if !f.Pure { + dash.Configs = f.Configs + err = dash.Update("name", "tags", "configs", "update_by", "update_at") + } else { + err = dash.Update("name", "tags", "update_by", "update_at") + } + + ginx.NewRender(c).Data(dash, err) +} + +func dashboardDel(c *gin.Context) { + dash := Dashboard(ginx.UrlParamInt64(c, "did")) + if dash.GroupId != ginx.UrlParamInt64(c, "id") { + ginx.Bomb(http.StatusForbidden, "Oops...bad boy...") + } + ginx.NewRender(c).Message(dash.Del()) +} + +type ChartPure struct { + Configs string `json:"configs"` + Weight int `json:"weight"` +} + +type ChartGroupPure struct { + Name string `json:"name"` + Weight int `json:"weight"` + Charts []ChartPure `json:"charts"` +} + +type DashboardPure struct { + Name string `json:"name"` + Tags string `json:"tags"` + Configs string `json:"configs"` + ChartGroups []ChartGroupPure `json:"chart_groups"` +} + +func dashboardExport(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + + dashboards, err := models.DashboardGetsByIds(f.Ids) + ginx.Dangerous(err) + + dashPures := []DashboardPure{} + + for i := range dashboards { + // convert dashboard + dashPure := DashboardPure{ + Name: dashboards[i].Name, + Tags: dashboards[i].Tags, + Configs: dashboards[i].Configs, + } + + cgs, err := models.ChartGroupsOf(dashboards[i].Id) + ginx.Dangerous(err) + + cgPures := []ChartGroupPure{} + for j := range cgs { + cgPure := ChartGroupPure{ + Name: cgs[j].Name, + Weight: cgs[j].Weight, + } + + charts, err := models.ChartsOf(cgs[j].Id) + ginx.Dangerous(err) + + chartPures := []ChartPure{} + for k := range charts { + chartPure := ChartPure{ + Configs: charts[k].Configs, + Weight: charts[k].Weight, + } + chartPures = append(chartPures, chartPure) + } + + cgPure.Charts = chartPures + cgPures = append(cgPures, cgPure) + } + + dashPure.ChartGroups = cgPures + dashPures = append(dashPures, dashPure) + } + + ginx.NewRender(c).Data(dashPures, nil) +} + +func dashboardImport(c *gin.Context) { + var dashPures []DashboardPure + ginx.BindJSON(c, &dashPures) + + me := c.MustGet("user").(*models.User) + bg := c.MustGet("busi_group").(*models.BusiGroup) + + ret := make(map[string]string) + + for _, dashPure := range dashPures { + dash := &models.Dashboard{ + Name: dashPure.Name, + Tags: dashPure.Tags, + Configs: dashPure.Configs, + GroupId: bg.Id, + CreateBy: me.Username, + UpdateBy: me.Username, + } + + ret[dash.Name] = "" + + err := dash.Add() + if err != nil { + ret[dash.Name] = err.Error() + continue + } + + for _, cgPure := range dashPure.ChartGroups { + cg := &models.ChartGroup{ + Name: cgPure.Name, + Weight: cgPure.Weight, + DashboardId: dash.Id, + } + + err := cg.Add() + if err != nil { + ret[dash.Name] = err.Error() + continue + } + + for _, chartPure := range cgPure.Charts { + chart := &models.Chart{ + Configs: chartPure.Configs, + Weight: chartPure.Weight, + GroupId: cg.Id, + } + + err := chart.Add() + if err != nil { + ret[dash.Name] = err.Error() + continue + } + } + } + } + + ginx.NewRender(c).Data(ret, nil) +} + +type idForm struct { + Id int64 `json:"id" binding:"required"` +} + +func dashboardClone(c *gin.Context) { + dash := Dashboard(ginx.UrlParamInt64(c, "did")) + user := c.MustGet("user").(*models.User) + + newDash := &models.Dashboard{ + Name: dash.Name + " Copy at " + time.Now().Format("2006-01-02 15:04:05"), + Tags: dash.Tags, + Configs: dash.Configs, + GroupId: dash.GroupId, + CreateBy: user.Username, + UpdateBy: user.Username, + } + ginx.Dangerous(newDash.Add()) + + chartGroups, err := models.ChartGroupsOf(dash.Id) + ginx.Dangerous(err) + + for _, chartGroup := range chartGroups { + charts, err := models.ChartsOf(chartGroup.Id) + ginx.Dangerous(err) + + chartGroup.DashboardId = newDash.Id + chartGroup.Id = 0 + ginx.Dangerous(chartGroup.Add()) + + for _, chart := range charts { + chart.Id = 0 + chart.GroupId = chartGroup.Id + ginx.Dangerous(chart.Add()) + } + } + + ginx.NewRender(c).Message(nil) +} diff --git a/src/webapi/router/router_funcs.go b/src/webapi/router/router_funcs.go new file mode 100644 index 00000000..58498e0e --- /dev/null +++ b/src/webapi/router/router_funcs.go @@ -0,0 +1,150 @@ +package router + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/pkg/ibex" + "github.com/didi/nightingale/v5/src/webapi/config" +) + +const defaultLimit = 300 + +func queryClusters(c *gin.Context) []string { + clusters := ginx.QueryStr(c, "clusters", "") + clusters = strings.ReplaceAll(clusters, ",", " ") + return strings.Fields(clusters) +} + +func Cluster(c *gin.Context) string { + return c.GetHeader("X-Cluster") +} + +func MustGetCluster(c *gin.Context) string { + cluster := Cluster(c) + if cluster == "" { + ginx.Bomb(http.StatusBadRequest, "Header(X-Cluster) missed") + } + return cluster +} + +type idsForm struct { + Ids []int64 `json:"ids"` +} + +func (f idsForm) Verify() { + if len(f.Ids) == 0 { + ginx.Bomb(http.StatusBadRequest, "ids empty") + } +} + +func User(id int64) *models.User { + obj, err := models.UserGetById(id) + ginx.Dangerous(err) + + if obj == nil { + ginx.Bomb(http.StatusNotFound, "No such user") + } + + return obj +} + +func UserGroup(id int64) *models.UserGroup { + obj, err := models.UserGroupGetById(id) + ginx.Dangerous(err) + + if obj == nil { + ginx.Bomb(http.StatusNotFound, "No such UserGroup") + } + + return obj +} + +func BusiGroup(id int64) *models.BusiGroup { + obj, err := models.BusiGroupGetById(id) + ginx.Dangerous(err) + + if obj == nil { + ginx.Bomb(http.StatusNotFound, "No such BusiGroup") + } + + return obj +} + +func Dashboard(id int64) *models.Dashboard { + obj, err := models.DashboardGet("id=?", id) + ginx.Dangerous(err) + + if obj == nil { + ginx.Bomb(http.StatusNotFound, "No such dashboard") + } + + return obj +} + +type DoneIdsReply struct { + Err string `json:"err"` + Dat struct { + List []int64 `json:"list"` + } `json:"dat"` +} + +func TaskDoneIds(ids []int64) ([]int64, error) { + var res DoneIdsReply + err := ibex.New( + config.C.Ibex.Address, + config.C.Ibex.BasicAuthUser, + config.C.Ibex.BasicAuthPass, + config.C.Ibex.Timeout, + ). + Path("/ibex/v1/tasks/done-ids"). + QueryString("ids", str.IdsString(ids, ",")). + Out(&res). + GET() + + if err != nil { + return nil, err + } + + if res.Err != "" { + return nil, fmt.Errorf("response.err: %v", res.Err) + } + + return res.Dat.List, nil +} + +type TaskCreateReply struct { + Err string `json:"err"` + Dat int64 `json:"dat"` // task.id +} + +// return task.id, error +func TaskCreate(v interface{}) (int64, error) { + var res TaskCreateReply + err := ibex.New( + config.C.Ibex.Address, + config.C.Ibex.BasicAuthUser, + config.C.Ibex.BasicAuthPass, + config.C.Ibex.Timeout, + ). + Path("/ibex/v1/tasks"). + In(v). + Out(&res). + POST() + + if err != nil { + return 0, err + } + + if res.Err != "" { + return 0, fmt.Errorf("response.err: %v", res.Err) + } + + return res.Dat, nil +} diff --git a/src/webapi/router/router_login.go b/src/webapi/router/router_login.go new file mode 100644 index 00000000..13d41576 --- /dev/null +++ b/src/webapi/router/router_login.go @@ -0,0 +1,138 @@ +package router + +import ( + "fmt" + "net/http" + "strings" + + "github.com/dgrijalva/jwt-go" + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/webapi/config" +) + +type loginForm struct { + Username string `json:"username" binding:"required"` + Password string `json:"password" binding:"required"` +} + +func loginPost(c *gin.Context) { + var f loginForm + ginx.BindJSON(c, &f) + + user, err := models.PassLogin(f.Username, f.Password) + if err != nil { + // pass validate fail, try ldap + if config.C.LDAP.Enable { + user, err = models.LdapLogin(f.Username, f.Password) + if err != nil { + ginx.NewRender(c).Message(err) + return + } + } else { + ginx.NewRender(c).Message(err) + return + } + } + + if user == nil { + // Theoretically impossible + ginx.NewRender(c).Message("Username or password invalid") + return + } + + userIdentity := fmt.Sprintf("%d-%s", user.Id, user.Username) + + ts, err := createTokens(config.C.JWTAuth.SigningKey, userIdentity) + ginx.Dangerous(err) + ginx.Dangerous(createAuth(c.Request.Context(), userIdentity, ts)) + + ginx.NewRender(c).Data(gin.H{ + "user": user, + "access_token": ts.AccessToken, + "refresh_token": ts.RefreshToken, + }, nil) +} + +func logoutPost(c *gin.Context) { + metadata, err := extractTokenMetadata(c.Request) + if err != nil { + ginx.NewRender(c, http.StatusBadRequest).Message("failed to parse jwt token") + return + } + + delErr := deleteTokens(c.Request.Context(), metadata) + if delErr != nil { + ginx.NewRender(c).Message(InternalServerError) + return + } + + ginx.NewRender(c).Message("") +} + +type refreshForm struct { + RefreshToken string `json:"refresh_token" binding:"required"` +} + +func refreshPost(c *gin.Context) { + var f refreshForm + ginx.BindJSON(c, &f) + + // verify the token + token, err := jwt.Parse(f.RefreshToken, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected jwt signing method: %v", token.Header["alg"]) + } + return []byte(config.C.JWTAuth.SigningKey), nil + }) + + // if there is an error, the token must have expired + if err != nil { + // redirect to login page + ginx.NewRender(c).Message("refresh token expired") + return + } + + // Since token is valid, get the uuid: + claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims + if ok && token.Valid { + refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string + if !ok { + // Theoretically impossible + ginx.NewRender(c).Message("failed to parse refresh_uuid from jwt") + return + } + + userIdentity, ok := claims["user_identity"].(string) + if !ok { + // Theoretically impossible + ginx.NewRender(c).Message("failed to parse user_identity from jwt") + return + } + + // Delete the previous Refresh Token + err = deleteAuth(c.Request.Context(), refreshUuid) + if err != nil { + ginx.NewRender(c).Message(InternalServerError) + return + } + + // Delete previous Access Token + deleteAuth(c.Request.Context(), strings.Split(refreshUuid, "++")[0]) + + // Create new pairs of refresh and access tokens + ts, err := createTokens(config.C.JWTAuth.SigningKey, userIdentity) + ginx.Dangerous(err) + ginx.Dangerous(createAuth(c.Request.Context(), userIdentity, ts)) + + ginx.NewRender(c).Data(gin.H{ + "access_token": ts.AccessToken, + "refresh_token": ts.RefreshToken, + }, nil) + } else { + // redirect to login page + ginx.NewRender(c).Message("refresh token expired") + } +} diff --git a/src/webapi/router/router_metric_desc.go b/src/webapi/router/router_metric_desc.go new file mode 100644 index 00000000..f46726d5 --- /dev/null +++ b/src/webapi/router/router_metric_desc.go @@ -0,0 +1,119 @@ +package router + +import ( + "path" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/file" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/runner" + + "github.com/didi/nightingale/v5/src/webapi/config" +) + +func metricsDescGetFile(c *gin.Context) { + fp := path.Join(runner.Cwd, "etc", "metrics.yaml") + if !file.IsExist(fp) { + c.String(404, "%s not found", fp) + return + } + + ret := make(map[string]string) + err := file.ReadYaml(fp, &ret) + if err != nil { + c.String(500, err.Error()) + return + } + + c.JSON(200, ret) +} + +// 前端传过来一个metric数组,后端去查询有没有对应的释义,返回map +func metricsDescGetMap(c *gin.Context) { + var arr []string + ginx.BindJSON(c, &arr) + + ret := make(map[string]string) + for i := 0; i < len(arr); i++ { + desc, has := config.Metrics.Get(arr[i]) + if !has { + ret[arr[i]] = "" + } else { + ret[arr[i]] = desc.(string) + } + } + + ginx.NewRender(c).Data(ret, nil) +} + +// 页面功能暂时先不要了,直接通过配置文件来维护 +// func metricDescriptionGets(c *gin.Context) { +// limit := ginx.QueryInt(c, "limit", 20) +// query := ginx.QueryStr(c, "query", "") + +// total, err := models.MetricDescriptionTotal(query) +// ginx.Dangerous(err) + +// list, err := models.MetricDescriptionGets(query, limit, ginx.Offset(c, limit)) +// ginx.Dangerous(err) + +// ginx.NewRender(c).Data(gin.H{ +// "list": list, +// "total": total, +// }, nil) +// } + +// type metricDescriptionAddForm struct { +// Data string `json:"data"` +// } + +// func metricDescriptionAdd(c *gin.Context) { +// var f metricDescriptionAddForm +// ginx.BindJSON(c, &f) + +// var metricDescriptions []models.MetricDescription + +// lines := strings.Split(f.Data, "\n") +// for _, md := range lines { +// arr := strings.SplitN(md, ":", 2) +// if len(arr) != 2 { +// ginx.Bomb(200, "metric description %s is illegal", md) +// } +// m := models.MetricDescription{ +// Metric: arr[0], +// Description: arr[1], +// } +// metricDescriptions = append(metricDescriptions, m) +// } + +// if len(metricDescriptions) == 0 { +// ginx.Bomb(http.StatusBadRequest, "Decoded metric description empty") +// } + +// ginx.NewRender(c).Message(models.MetricDescriptionUpdate(metricDescriptions)) +// } + +// func metricDescriptionDel(c *gin.Context) { +// var f idsForm +// ginx.BindJSON(c, &f) +// f.Verify() +// ginx.NewRender(c).Message(models.MetricDescriptionDel(f.Ids)) +// } + +// type metricDescriptionForm struct { +// Description string `json:"description"` +// } + +// func metricDescriptionPut(c *gin.Context) { +// var f metricDescriptionForm +// ginx.BindJSON(c, &f) + +// md, err := models.MetricDescriptionGet("id=?", ginx.UrlParamInt64(c, "id")) +// ginx.Dangerous(err) + +// if md == nil { +// ginx.Bomb(200, "No such metric description") +// } + +// ginx.NewRender(c).Message(md.Update(f.Description, time.Now().Unix())) +// } diff --git a/src/webapi/router/router_mute.go b/src/webapi/router/router_mute.go new file mode 100644 index 00000000..13599e7d --- /dev/null +++ b/src/webapi/router/router_mute.go @@ -0,0 +1,34 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +// Return all, front-end search and paging +func alertMuteGets(c *gin.Context) { + bgid := ginx.UrlParamInt64(c, "id") + lst, err := models.AlertMuteGets(bgid) + ginx.NewRender(c).Data(lst, err) +} + +func alertMuteAdd(c *gin.Context) { + var f models.AlertMute + ginx.BindJSON(c, &f) + + username := c.MustGet("username").(string) + f.CreateBy = username + f.GroupId = ginx.UrlParamInt64(c, "id") + + ginx.NewRender(c).Message(f.Add()) +} + +func alertMuteDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + ginx.NewRender(c).Message(models.AlertMuteDel(f.Ids)) +} diff --git a/src/webapi/router/router_mw.go b/src/webapi/router/router_mw.go new file mode 100644 index 00000000..108d8c2e --- /dev/null +++ b/src/webapi/router/router_mw.go @@ -0,0 +1,311 @@ +package router + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/storage" + "github.com/didi/nightingale/v5/src/webapi/config" +) + +type AccessDetails struct { + AccessUuid string + UserIdentity string +} + +func jwtAuth() gin.HandlerFunc { + return func(c *gin.Context) { + metadata, err := extractTokenMetadata(c.Request) + if err != nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + userIdentity, err := fetchAuth(c.Request.Context(), metadata.AccessUuid) + if err != nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + // ${userid}-${username} + arr := strings.SplitN(userIdentity, "-", 2) + if len(arr) != 2 { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + userid, err := strconv.ParseInt(arr[0], 10, 64) + if err != nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + c.Set("userid", userid) + c.Set("username", arr[1]) + + c.Next() + } +} + +func user() gin.HandlerFunc { + return func(c *gin.Context) { + userid := c.MustGet("userid").(int64) + + user, err := models.UserGetById(userid) + if err != nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + if user == nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + c.Set("user", user) + c.Next() + } +} + +func userGroupWrite() gin.HandlerFunc { + return func(c *gin.Context) { + me := c.MustGet("user").(*models.User) + ug := UserGroup(ginx.UrlParamInt64(c, "id")) + + can, err := me.CanModifyUserGroup(ug) + ginx.Dangerous(err) + + if !can { + ginx.Bomb(http.StatusForbidden, "forbidden") + } + + c.Set("user_group", ug) + c.Next() + } +} + +func bgro() gin.HandlerFunc { + return func(c *gin.Context) { + me := c.MustGet("user").(*models.User) + bg := BusiGroup(ginx.UrlParamInt64(c, "id")) + + can, err := me.CanDoBusiGroup(bg) + ginx.Dangerous(err) + + if !can { + ginx.Bomb(http.StatusForbidden, "forbidden") + } + + c.Set("busi_group", bg) + c.Next() + } +} + +func bgrw() gin.HandlerFunc { + return func(c *gin.Context) { + me := c.MustGet("user").(*models.User) + bg := BusiGroup(ginx.UrlParamInt64(c, "id")) + + can, err := me.CanDoBusiGroup(bg, "rw") + ginx.Dangerous(err) + + if !can { + ginx.Bomb(http.StatusForbidden, "forbidden") + } + + c.Set("busi_group", bg) + c.Next() + } +} + +func perm(operation string) gin.HandlerFunc { + return func(c *gin.Context) { + me := c.MustGet("user").(*models.User) + + can, err := me.CheckPerm(operation) + ginx.Dangerous(err) + + if !can { + ginx.Bomb(http.StatusForbidden, "forbidden") + } + + c.Next() + } +} + +func admin() gin.HandlerFunc { + return func(c *gin.Context) { + userid := c.MustGet("userid").(int64) + + user, err := models.UserGetById(userid) + if err != nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + if user == nil { + ginx.Bomb(http.StatusUnauthorized, "unauthorized") + } + + roles := strings.Fields(user.Roles) + found := false + for i := 0; i < len(roles); i++ { + if roles[i] == config.C.AdminRole { + found = true + break + } + } + + if !found { + ginx.Bomb(http.StatusForbidden, "forbidden") + } + + c.Set("user", user) + c.Next() + } +} + +func extractTokenMetadata(r *http.Request) (*AccessDetails, error) { + token, err := verifyToken(config.C.JWTAuth.SigningKey, extractToken(r)) + if err != nil { + return nil, err + } + + claims, ok := token.Claims.(jwt.MapClaims) + if ok && token.Valid { + accessUuid, ok := claims["access_uuid"].(string) + if !ok { + return nil, err + } + + return &AccessDetails{ + AccessUuid: accessUuid, + UserIdentity: claims["user_identity"].(string), + }, nil + } + + return nil, err +} + +func extractToken(r *http.Request) string { + tok := r.Header.Get("Authorization") + + if len(tok) > 6 && strings.ToUpper(tok[0:7]) == "BEARER " { + return tok[7:] + } + + return "" +} + +func createAuth(ctx context.Context, userIdentity string, td *TokenDetails) error { + at := time.Unix(td.AtExpires, 0) + rt := time.Unix(td.RtExpires, 0) + now := time.Now() + + errAccess := storage.Redis.Set(ctx, wrapJwtKey(td.AccessUuid), userIdentity, at.Sub(now)).Err() + if errAccess != nil { + return errAccess + } + + errRefresh := storage.Redis.Set(ctx, wrapJwtKey(td.RefreshUuid), userIdentity, rt.Sub(now)).Err() + if errRefresh != nil { + return errRefresh + } + + return nil +} + +func fetchAuth(ctx context.Context, givenUuid string) (string, error) { + return storage.Redis.Get(ctx, wrapJwtKey(givenUuid)).Result() +} + +func deleteAuth(ctx context.Context, givenUuid string) error { + return storage.Redis.Del(ctx, wrapJwtKey(givenUuid)).Err() +} + +func deleteTokens(ctx context.Context, authD *AccessDetails) error { + // get the refresh uuid + refreshUuid := authD.AccessUuid + "++" + authD.UserIdentity + + // delete access token + err := storage.Redis.Del(ctx, wrapJwtKey(authD.AccessUuid)).Err() + if err != nil { + return err + } + + // delete refresh token + err = storage.Redis.Del(ctx, wrapJwtKey(refreshUuid)).Err() + if err != nil { + return err + } + + return nil +} + +func wrapJwtKey(key string) string { + return config.C.JWTAuth.RedisKeyPrefix + key +} + +type TokenDetails struct { + AccessToken string + RefreshToken string + AccessUuid string + RefreshUuid string + AtExpires int64 + RtExpires int64 +} + +func createTokens(signingKey, userIdentity string) (*TokenDetails, error) { + td := &TokenDetails{} + td.AtExpires = time.Now().Add(time.Minute * time.Duration(config.C.JWTAuth.AccessExpired)).Unix() + td.AccessUuid = uuid.NewString() + + td.RtExpires = time.Now().Add(time.Minute * time.Duration(config.C.JWTAuth.RefreshExpired)).Unix() + td.RefreshUuid = td.AccessUuid + "++" + userIdentity + + var err error + // Creating Access Token + atClaims := jwt.MapClaims{} + atClaims["authorized"] = true + atClaims["access_uuid"] = td.AccessUuid + atClaims["user_identity"] = userIdentity + atClaims["exp"] = td.AtExpires + at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims) + td.AccessToken, err = at.SignedString([]byte(signingKey)) + if err != nil { + return nil, err + } + + // Creating Refresh Token + rtClaims := jwt.MapClaims{} + rtClaims["refresh_uuid"] = td.RefreshUuid + rtClaims["user_identity"] = userIdentity + rtClaims["exp"] = td.RtExpires + rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims) + td.RefreshToken, err = rt.SignedString([]byte(signingKey)) + if err != nil { + return nil, err + } + + return td, nil +} + +func verifyToken(signingKey, tokenString string) (*jwt.Token, error) { + if tokenString == "" { + return nil, fmt.Errorf("Bearer token not found") + } + + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected jwt signing method: %v", token.Header["alg"]) + } + return []byte(signingKey), nil + }) + if err != nil { + return nil, err + } + return token, nil +} diff --git a/src/webapi/router/router_prometheus.go b/src/webapi/router/router_prometheus.go new file mode 100644 index 00000000..36fbce6c --- /dev/null +++ b/src/webapi/router/router_prometheus.go @@ -0,0 +1,82 @@ +package router + +import ( + "net/http" + "net/http/httputil" + "net/url" + "strings" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/webapi/config" + "github.com/didi/nightingale/v5/src/webapi/prom" +) + +func prometheusProxy(c *gin.Context) { + xcluster := c.GetHeader("X-Cluster") + if xcluster == "" { + c.String(http.StatusBadRequest, "X-Cluster missed") + return + } + + cluster, exists := prom.Clusters.Get(xcluster) + if !exists { + c.String(http.StatusBadRequest, "No such cluster: %s", xcluster) + return + } + + target, err := url.Parse(cluster.Opts.Prom) + if err != nil { + c.String(http.StatusInternalServerError, "invalid prometheus url: %s", cluster.Opts.Prom) + return + } + + director := func(req *http.Request) { + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + + // fe request e.g. /api/n9e/prometheus/api/v1/query + index := strings.Index(req.URL.Path, "/prometheus") + if index == -1 { + panic("url path invalid") + } + + req.URL.Path = strings.TrimRight(target.Path, "/") + req.URL.Path[index+11:] + + if target.RawQuery == "" || req.URL.RawQuery == "" { + req.URL.RawQuery = target.RawQuery + req.URL.RawQuery + } else { + req.URL.RawQuery = target.RawQuery + "&" + req.URL.RawQuery + } + + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", "") + } + + if cluster.Opts.BasicAuthUser != "" { + req.SetBasicAuth(cluster.Opts.BasicAuthUser, cluster.Opts.BasicAuthPass) + } + } + + errFunc := func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusBadGateway) + } + + proxy := &httputil.ReverseProxy{ + Director: director, + Transport: cluster.Transport, + ErrorHandler: errFunc, + } + + proxy.ServeHTTP(c.Writer, c.Request) +} + +func clustersGets(c *gin.Context) { + count := len(config.C.Clusters) + names := make([]string, 0, count) + for i := 0; i < count; i++ { + names = append(names, config.C.Clusters[i].Name) + } + ginx.NewRender(c).Data(names, nil) +} diff --git a/src/webapi/router/router_role.go b/src/webapi/router/router_role.go new file mode 100644 index 00000000..dae2cdd3 --- /dev/null +++ b/src/webapi/router/router_role.go @@ -0,0 +1,21 @@ +package router + +import ( + "strings" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func rolesGets(c *gin.Context) { + lst, err := models.RoleGetsAll() + ginx.NewRender(c).Data(lst, err) +} + +func permsGets(c *gin.Context) { + user := c.MustGet("user").(*models.User) + lst, err := models.OperationsOfRole(strings.Fields(user.Roles)) + ginx.NewRender(c).Data(lst, err) +} diff --git a/src/webapi/router/router_self.go b/src/webapi/router/router_self.go new file mode 100644 index 00000000..22fb9e1c --- /dev/null +++ b/src/webapi/router/router_self.go @@ -0,0 +1,52 @@ +package router + +import ( + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/pkg/ormx" +) + +func selfProfileGet(c *gin.Context) { + user := c.MustGet("user").(*models.User) + if user.IsAdmin() { + user.Admin = true + } + ginx.NewRender(c).Data(user, nil) +} + +type selfProfileForm struct { + Nickname string `json:"nickname"` + Phone string `json:"phone"` + Email string `json:"email"` + Portrait string `json:"portrait"` + Contacts ormx.JSONObj `json:"contacts"` +} + +func selfProfilePut(c *gin.Context) { + var f selfProfileForm + ginx.BindJSON(c, &f) + + user := c.MustGet("user").(*models.User) + user.Nickname = f.Nickname + user.Phone = f.Phone + user.Email = f.Email + user.Portrait = f.Portrait + user.Contacts = f.Contacts + user.UpdateBy = user.Username + + ginx.NewRender(c).Message(user.UpdateAllFields()) +} + +type selfPasswordForm struct { + OldPass string `json:"oldpass" binding:"required"` + NewPass string `json:"newpass" binding:"required"` +} + +func selfPasswordPut(c *gin.Context) { + var f selfPasswordForm + ginx.BindJSON(c, &f) + user := c.MustGet("user").(*models.User) + ginx.NewRender(c).Message(user.ChangePassword(f.OldPass, f.NewPass)) +} diff --git a/src/webapi/router/router_target.go b/src/webapi/router/router_target.go new file mode 100644 index 00000000..02408140 --- /dev/null +++ b/src/webapi/router/router_target.go @@ -0,0 +1,222 @@ +package router + +import ( + "net/http" + "strings" + + "github.com/gin-gonic/gin" + "github.com/prometheus/common/model" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +func targetGets(c *gin.Context) { + bgid := ginx.QueryInt64(c, "bgid", -1) + query := ginx.QueryStr(c, "query", "") + limit := ginx.QueryInt(c, "limit", 30) + clusters := queryClusters(c) + + total, err := models.TargetTotal(bgid, clusters, query) + ginx.Dangerous(err) + + list, err := models.TargetGets(bgid, clusters, query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + if err == nil { + cache := make(map[int64]*models.BusiGroup) + for i := 0; i < len(list); i++ { + ginx.Dangerous(list[i].FillGroup(cache)) + } + } + + ginx.NewRender(c).Data(gin.H{ + "list": list, + "total": total, + }, nil) +} + +func targetGetTags(c *gin.Context) { + idents := ginx.QueryStr(c, "idents") + idents = strings.ReplaceAll(idents, ",", " ") + lst, err := models.TargetGetTags(strings.Fields(idents)) + ginx.NewRender(c).Data(lst, err) +} + +type targetTagsForm struct { + Idents []string `json:"idents" binding:"required"` + Tags []string `json:"tags" binding:"required"` +} + +func targetBindTags(c *gin.Context) { + var f targetTagsForm + ginx.BindJSON(c, &f) + + if len(f.Idents) == 0 { + ginx.Bomb(http.StatusBadRequest, "idents empty") + } + + checkTargetPerm(c, f.Idents) + + // verify + for i := 0; i < len(f.Tags); i++ { + arr := strings.Split(f.Tags[i], "=") + if len(arr) != 2 { + ginx.Bomb(200, "invalid tag(%s)", f.Tags[i]) + } + + if strings.TrimSpace(arr[0]) == "" || strings.TrimSpace(arr[1]) == "" { + ginx.Bomb(200, "invalid tag(%s)", f.Tags[i]) + } + + if strings.IndexByte(arr[0], '.') != -1 { + ginx.Bomb(200, "invalid tagkey(%s): cannot contains .", arr[0]) + } + + if strings.IndexByte(arr[0], '-') != -1 { + ginx.Bomb(200, "invalid tagkey(%s): cannot contains -", arr[0]) + } + + if !model.LabelNameRE.MatchString(arr[0]) { + ginx.Bomb(200, "invalid tagkey(%s)", arr[0]) + } + } + + for i := 0; i < len(f.Idents); i++ { + target, err := models.TargetGetByIdent(f.Idents[i]) + ginx.Dangerous(err) + + if target == nil { + continue + } + + ginx.Dangerous(target.AddTags(f.Tags)) + } + + ginx.NewRender(c).Message(nil) +} + +func targetUnbindTags(c *gin.Context) { + var f targetTagsForm + ginx.BindJSON(c, &f) + + if len(f.Idents) == 0 { + ginx.Bomb(http.StatusBadRequest, "idents empty") + } + + checkTargetPerm(c, f.Idents) + + for i := 0; i < len(f.Idents); i++ { + target, err := models.TargetGetByIdent(f.Idents[i]) + ginx.Dangerous(err) + + if target == nil { + continue + } + + ginx.Dangerous(target.DelTags(f.Tags)) + } + + ginx.NewRender(c).Message(nil) +} + +type targetNoteForm struct { + Idents []string `json:"idents" binding:"required"` + Note string `json:"note"` +} + +func targetUpdateNote(c *gin.Context) { + var f targetNoteForm + ginx.BindJSON(c, &f) + + if len(f.Idents) == 0 { + ginx.Bomb(http.StatusBadRequest, "idents empty") + } + + checkTargetPerm(c, f.Idents) + + ginx.NewRender(c).Message(models.TargetUpdateNote(f.Idents, f.Note)) +} + +type targetBgidForm struct { + Idents []string `json:"idents" binding:"required"` + Bgid int64 `json:"bgid"` +} + +func targetUpdateBgid(c *gin.Context) { + var f targetBgidForm + ginx.BindJSON(c, &f) + + if len(f.Idents) == 0 { + ginx.Bomb(http.StatusBadRequest, "idents empty") + } + + user := c.MustGet("user").(*models.User) + if user.IsAdmin() { + ginx.NewRender(c).Message(models.TargetUpdateBgid(f.Idents, f.Bgid, false)) + return + } + + if f.Bgid > 0 { + // 把要操作的机器分成两部分,一部分是bgid为0,需要管理员分配,另一部分bgid>0,说明是业务组内部想调整 + // 比如原来分配给didiyun的机器,didiyun的管理员想把部分机器调整到didiyun-ceph下 + // 对于调整的这种情况,当前登录用户要对这批机器有操作权限,同时还要对目标BG有操作权限 + orphans, err := models.IdentsFilter(f.Idents, "group_id = ?", 0) + ginx.Dangerous(err) + + // 机器里边存在未归组的,登录用户就需要是admin + if len(orphans) > 0 && !user.IsAdmin() { + ginx.Bomb(http.StatusForbidden, "No permission. Only admin can assign BG") + } + + reBelongs, err := models.IdentsFilter(f.Idents, "group_id > ?", 0) + ginx.Dangerous(err) + + if len(reBelongs) > 0 { + // 对于这些要重新分配的机器,操作者要对这些机器本身有权限,同时要对目标bgid有权限 + checkTargetPerm(c, f.Idents) + + bg := BusiGroup(f.Bgid) + can, err := user.CanDoBusiGroup(bg, "rw") + ginx.Dangerous(err) + + if !can { + ginx.Bomb(http.StatusForbidden, "No permission. You are not admin of BG(%s)", bg.Name) + } + } + } else if f.Bgid == 0 { + // 退还机器 + checkTargetPerm(c, f.Idents) + } else { + ginx.Bomb(http.StatusBadRequest, "invalid bgid") + } + + ginx.NewRender(c).Message(models.TargetUpdateBgid(f.Idents, f.Bgid, false)) +} + +type identsForm struct { + Idents []string `json:"idents" binding:"required"` +} + +func targetDel(c *gin.Context) { + var f identsForm + ginx.BindJSON(c, &f) + + if len(f.Idents) == 0 { + ginx.Bomb(http.StatusBadRequest, "idents empty") + } + + checkTargetPerm(c, f.Idents) + + ginx.NewRender(c).Message(models.TargetDel(f.Idents)) +} + +func checkTargetPerm(c *gin.Context, idents []string) { + user := c.MustGet("user").(*models.User) + nopri, err := user.NopriIdents(idents) + ginx.Dangerous(err) + + if len(nopri) > 0 { + ginx.Bomb(http.StatusForbidden, "No permission to operate the targets: %s", strings.Join(nopri, ", ")) + } +} diff --git a/src/webapi/router/router_task.go b/src/webapi/router/router_task.go new file mode 100644 index 00000000..643bc9b3 --- /dev/null +++ b/src/webapi/router/router_task.go @@ -0,0 +1,211 @@ +package router + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/webapi/config" +) + +func taskGets(c *gin.Context) { + bgid := ginx.UrlParamInt64(c, "id") + mine := ginx.QueryBool(c, "mine", false) + hours := ginx.QueryInt64(c, "hours", 24*7) + limit := ginx.QueryInt(c, "limit", 20) + query := ginx.QueryStr(c, "query", "") + user := c.MustGet("user").(*models.User) + + creator := "" + if mine { + creator = user.Username + } + + beginTime := time.Now().Unix() - hours*3600 + + total, err := models.TaskRecordTotal(bgid, beginTime, creator, query) + ginx.Dangerous(err) + + list, err := models.TaskRecordGets(bgid, beginTime, creator, query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + ginx.NewRender(c).Data(gin.H{ + "total": total, + "list": list, + }, nil) +} + +type taskForm struct { + Title string `json:"title" binding:"required"` + Account string `json:"account" binding:"required"` + Batch int `json:"batch"` + Tolerance int `json:"tolerance"` + Timeout int `json:"timeout"` + Pause string `json:"pause"` + Script string `json:"script" binding:"required"` + Args string `json:"args"` + Action string `json:"action" binding:"required"` + Creator string `json:"creator"` + Hosts []string `json:"hosts" binding:"required"` +} + +func (f *taskForm) Verify() error { + if f.Batch < 0 { + return fmt.Errorf("arg(batch) should be nonnegative") + } + + if f.Tolerance < 0 { + return fmt.Errorf("arg(tolerance) should be nonnegative") + } + + if f.Timeout < 0 { + return fmt.Errorf("arg(timeout) should be nonnegative") + } + + if f.Timeout > 3600*24 { + return fmt.Errorf("arg(timeout) longer than one day") + } + + if f.Timeout == 0 { + f.Timeout = 30 + } + + f.Pause = strings.Replace(f.Pause, ",", ",", -1) + f.Pause = strings.Replace(f.Pause, " ", "", -1) + f.Args = strings.Replace(f.Args, ",", ",", -1) + + if f.Title == "" { + return fmt.Errorf("arg(title) is required") + } + + if str.Dangerous(f.Title) { + return fmt.Errorf("arg(title) is dangerous") + } + + if f.Script == "" { + return fmt.Errorf("arg(script) is required") + } + + if str.Dangerous(f.Args) { + return fmt.Errorf("arg(args) is dangerous") + } + + if str.Dangerous(f.Pause) { + return fmt.Errorf("arg(pause) is dangerous") + } + + if len(f.Hosts) == 0 { + return fmt.Errorf("arg(hosts) empty") + } + + if f.Action != "start" && f.Action != "pause" { + return fmt.Errorf("arg(action) invalid") + } + + return nil +} + +func (f *taskForm) HandleFH(fh string) { + i := strings.Index(f.Title, " FH: ") + if i > 0 { + f.Title = f.Title[:i] + } + f.Title = f.Title + " FH: " + fh +} + +func taskAdd(c *gin.Context) { + var f taskForm + ginx.BindJSON(c, &f) + + bgid := ginx.UrlParamInt64(c, "id") + user := c.MustGet("user").(*models.User) + f.Creator = user.Username + + err := f.Verify() + ginx.Dangerous(err) + + f.HandleFH(f.Hosts[0]) + + // check permission + checkTargetPerm(c, f.Hosts) + + // call ibex + taskId, err := TaskCreate(f) + ginx.Dangerous(err) + + if taskId <= 0 { + ginx.Dangerous("created task.id is zero") + } + + // write db + record := models.TaskRecord{ + Id: taskId, + GroupId: bgid, + IbexAddress: config.C.Ibex.Address, + IbexAuthUser: config.C.Ibex.BasicAuthUser, + IbexAuthPass: config.C.Ibex.BasicAuthPass, + Title: f.Title, + Account: f.Account, + Batch: f.Batch, + Tolerance: f.Tolerance, + Timeout: f.Timeout, + Pause: f.Pause, + Script: f.Script, + Args: f.Args, + CreateAt: time.Now().Unix(), + CreateBy: f.Creator, + } + + err = record.Add() + ginx.NewRender(c).Data(taskId, err) +} + +func taskProxy(c *gin.Context) { + target, err := url.Parse(config.C.Ibex.Address) + if err != nil { + ginx.NewRender(c).Message("invalid ibex address: %s", config.C.Ibex.Address) + return + } + + director := func(req *http.Request) { + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + + // fe request e.g. /api/n9e/busi-group/:id/task/*url + index := strings.Index(req.URL.Path, "/task/") + if index == -1 { + panic("url path invalid") + } + + req.URL.Path = "/ibex/v1" + req.URL.Path[index:] + + if target.RawQuery == "" || req.URL.RawQuery == "" { + req.URL.RawQuery = target.RawQuery + req.URL.RawQuery + } else { + req.URL.RawQuery = target.RawQuery + "&" + req.URL.RawQuery + } + + if config.C.Ibex.BasicAuthUser != "" { + req.SetBasicAuth(config.C.Ibex.BasicAuthUser, config.C.Ibex.BasicAuthPass) + } + } + + errFunc := func(w http.ResponseWriter, r *http.Request, err error) { + ginx.NewRender(c, http.StatusBadGateway).Message(err) + } + + proxy := &httputil.ReverseProxy{ + Director: director, + ErrorHandler: errFunc, + } + + proxy.ServeHTTP(c.Writer, c.Request) +} diff --git a/src/webapi/router/router_task_tpl.go b/src/webapi/router/router_task_tpl.go new file mode 100644 index 00000000..5a48afdc --- /dev/null +++ b/src/webapi/router/router_task_tpl.go @@ -0,0 +1,211 @@ +package router + +import ( + "net/http" + "sort" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + "github.com/toolkits/pkg/str" + + "github.com/didi/nightingale/v5/src/models" +) + +func taskTplGets(c *gin.Context) { + query := ginx.QueryStr(c, "query", "") + limit := ginx.QueryInt(c, "limit", 20) + groupId := ginx.UrlParamInt64(c, "id") + + total, err := models.TaskTplTotal(groupId, query) + ginx.Dangerous(err) + + list, err := models.TaskTplGets(groupId, query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + ginx.NewRender(c).Data(gin.H{ + "total": total, + "list": list, + }, nil) +} + +func taskTplGet(c *gin.Context) { + tid := ginx.UrlParamInt64(c, "tid") + + tpl, err := models.TaskTplGet("id = ?", tid) + ginx.Dangerous(err) + + if tpl == nil { + ginx.Bomb(404, "no such task template") + } + + hosts, err := tpl.Hosts() + + ginx.NewRender(c).Data(gin.H{ + "tpl": tpl, + "hosts": hosts, + }, nil) +} + +type taskTplForm struct { + Title string `json:"title" binding:"required"` + Batch int `json:"batch"` + Tolerance int `json:"tolerance"` + Timeout int `json:"timeout"` + Pause string `json:"pause"` + Script string `json:"script"` + Args string `json:"args"` + Tags []string `json:"tags"` + Account string `json:"account"` + Hosts []string `json:"hosts"` +} + +func taskTplAdd(c *gin.Context) { + var f taskTplForm + ginx.BindJSON(c, &f) + + user := c.MustGet("user").(*models.User) + now := time.Now().Unix() + + sort.Strings(f.Tags) + + tpl := &models.TaskTpl{ + GroupId: ginx.UrlParamInt64(c, "id"), + Title: f.Title, + Batch: f.Batch, + Tolerance: f.Tolerance, + Timeout: f.Timeout, + Pause: f.Pause, + Script: f.Script, + Args: f.Args, + Tags: strings.Join(f.Tags, " ") + " ", + Account: f.Account, + CreateBy: user.Username, + UpdateBy: user.Username, + CreateAt: now, + UpdateAt: now, + } + + ginx.NewRender(c).Message(tpl.Save(f.Hosts)) +} + +func taskTplPut(c *gin.Context) { + tid := ginx.UrlParamInt64(c, "tid") + + tpl, err := models.TaskTplGet("id = ?", tid) + ginx.Dangerous(err) + + if tpl == nil { + ginx.Dangerous("no such task template") + } + + user := c.MustGet("user").(*models.User) + + var f taskTplForm + ginx.BindJSON(c, &f) + + sort.Strings(f.Tags) + + tpl.Title = f.Title + tpl.Batch = f.Batch + tpl.Tolerance = f.Tolerance + tpl.Timeout = f.Timeout + tpl.Pause = f.Pause + tpl.Script = f.Script + tpl.Args = f.Args + tpl.Tags = strings.Join(f.Tags, " ") + " " + tpl.Account = f.Account + tpl.UpdateBy = user.Username + tpl.UpdateAt = time.Now().Unix() + + ginx.NewRender(c).Message(tpl.Update(f.Hosts)) +} + +func taskTplDel(c *gin.Context) { + tid := ginx.UrlParamInt64(c, "tid") + + tpl, err := models.TaskTplGet("id = ?", tid) + ginx.Dangerous(err) + + if tpl == nil { + ginx.NewRender(c).Message(nil) + return + } + + ginx.NewRender(c).Message(tpl.Del()) +} + +type tplTagsForm struct { + Ids []int64 `json:"ids" binding:"required"` + Tags []string `json:"tags" binding:"required"` +} + +func (f *tplTagsForm) Verify() { + if len(f.Ids) == 0 { + ginx.Bomb(http.StatusBadRequest, "arg(ids) empty") + } + + if len(f.Tags) == 0 { + ginx.Bomb(http.StatusBadRequest, "arg(tags) empty") + } + + newTags := make([]string, 0, len(f.Tags)) + for i := 0; i < len(f.Tags); i++ { + tag := strings.TrimSpace(f.Tags[i]) + if tag == "" { + continue + } + + if str.Dangerous(tag) { + ginx.Bomb(http.StatusBadRequest, "arg(tags) invalid") + } + + newTags = append(newTags, tag) + } + + f.Tags = newTags + if len(f.Tags) == 0 { + ginx.Bomb(http.StatusBadRequest, "arg(tags) empty") + } +} + +func taskTplBindTags(c *gin.Context) { + var f tplTagsForm + ginx.BindJSON(c, &f) + + username := c.MustGet("username").(string) + + for i := 0; i < len(f.Ids); i++ { + tpl, err := models.TaskTplGet("id = ?", f.Ids[i]) + ginx.Dangerous(err) + + if tpl == nil { + continue + } + + ginx.Dangerous(tpl.AddTags(f.Tags, username)) + } + + ginx.NewRender(c).Message(nil) +} + +func taskTplUnbindTags(c *gin.Context) { + var f tplTagsForm + ginx.BindJSON(c, &f) + + username := c.MustGet("username").(string) + + for i := 0; i < len(f.Ids); i++ { + tpl, err := models.TaskTplGet("id = ?", f.Ids[i]) + ginx.Dangerous(err) + + if tpl == nil { + continue + } + + ginx.Dangerous(tpl.DelTags(f.Tags, username)) + } + + ginx.NewRender(c).Message(nil) +} diff --git a/src/webapi/router/router_user.go b/src/webapi/router/router_user.go new file mode 100644 index 00000000..59bbfcdd --- /dev/null +++ b/src/webapi/router/router_user.go @@ -0,0 +1,132 @@ +package router + +import ( + "net/http" + "strings" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/pkg/ormx" +) + +func userGets(c *gin.Context) { + limit := ginx.QueryInt(c, "limit", 20) + query := ginx.QueryStr(c, "query", "") + + total, err := models.UserTotal(query) + ginx.Dangerous(err) + + list, err := models.UserGets(query, limit, ginx.Offset(c, limit)) + ginx.Dangerous(err) + + user := c.MustGet("user").(*models.User) + + ginx.NewRender(c).Data(gin.H{ + "list": list, + "total": total, + "admin": user.IsAdmin(), + }, nil) +} + +type userAddForm struct { + Username string `json:"username" binding:"required"` + Password string `json:"password" binding:"required"` + Nickname string `json:"nickname"` + Phone string `json:"phone"` + Email string `json:"email"` + Portrait string `json:"portrait"` + Roles []string `json:"roles" binding:"required"` + Contacts ormx.JSONObj `json:"contacts"` +} + +func userAddPost(c *gin.Context) { + var f userAddForm + ginx.BindJSON(c, &f) + + password, err := models.CryptoPass(f.Password) + ginx.Dangerous(err) + + if len(f.Roles) == 0 { + ginx.Bomb(http.StatusBadRequest, "roles empty") + } + + user := c.MustGet("user").(*models.User) + + u := models.User{ + Username: f.Username, + Password: password, + Nickname: f.Nickname, + Phone: f.Phone, + Email: f.Email, + Portrait: f.Portrait, + Roles: strings.Join(f.Roles, " "), + Contacts: f.Contacts, + CreateBy: user.Username, + UpdateBy: user.Username, + } + + ginx.NewRender(c).Message(u.Add()) +} + +func userProfileGet(c *gin.Context) { + user := User(ginx.UrlParamInt64(c, "id")) + ginx.NewRender(c).Data(user, nil) +} + +type userProfileForm struct { + Nickname string `json:"nickname"` + Phone string `json:"phone"` + Email string `json:"email"` + Roles []string `json:"roles"` + Contacts ormx.JSONObj `json:"contacts"` +} + +func userProfilePut(c *gin.Context) { + var f userProfileForm + ginx.BindJSON(c, &f) + + if len(f.Roles) == 0 { + ginx.Bomb(http.StatusBadRequest, "roles empty") + } + + target := User(ginx.UrlParamInt64(c, "id")) + target.Nickname = f.Nickname + target.Phone = f.Phone + target.Email = f.Email + target.Roles = strings.Join(f.Roles, " ") + target.Contacts = f.Contacts + target.UpdateBy = c.MustGet("username").(string) + + ginx.NewRender(c).Message(target.UpdateAllFields()) +} + +type userPasswordForm struct { + Password string `json:"password" binding:"required"` +} + +func userPasswordPut(c *gin.Context) { + var f userPasswordForm + ginx.BindJSON(c, &f) + + target := User(ginx.UrlParamInt64(c, "id")) + + cryptoPass, err := models.CryptoPass(f.Password) + ginx.Dangerous(err) + + ginx.NewRender(c).Message(target.UpdatePassword(cryptoPass, c.MustGet("username").(string))) +} + +func userDel(c *gin.Context) { + id := ginx.UrlParamInt64(c, "id") + target, err := models.UserGetById(id) + ginx.Dangerous(err) + + if target == nil { + ginx.NewRender(c).Message(nil) + return + } + + ginx.NewRender(c).Message(target.Del()) +} diff --git a/src/webapi/router/router_user_group.go b/src/webapi/router/router_user_group.go new file mode 100644 index 00000000..bc2ec58f --- /dev/null +++ b/src/webapi/router/router_user_group.go @@ -0,0 +1,127 @@ +package router + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/toolkits/pkg/ginx" + + "github.com/didi/nightingale/v5/src/models" +) + +// Return all, front-end search and paging +// I'm creator or member +func userGroupGets(c *gin.Context) { + me := c.MustGet("user").(*models.User) + lst, err := models.GroupsOf(me) + ginx.NewRender(c).Data(lst, err) +} + +type userGroupForm struct { + Name string `json:"name" binding:"required"` + Note string `json:"note"` +} + +func userGroupAdd(c *gin.Context) { + var f userGroupForm + ginx.BindJSON(c, &f) + + me := c.MustGet("user").(*models.User) + + ug := models.UserGroup{ + Name: f.Name, + Note: f.Note, + CreateBy: me.Username, + UpdateBy: me.Username, + } + + err := ug.Add() + if err == nil { + // Even failure is not a big deal + models.UserGroupMemberAdd(ug.Id, me.Id) + } + + ginx.NewRender(c).Data(ug.Id, err) +} + +func userGroupPut(c *gin.Context) { + var f userGroupForm + ginx.BindJSON(c, &f) + + me := c.MustGet("user").(*models.User) + ug := c.MustGet("user_group").(*models.UserGroup) + + if ug.Name != f.Name { + // name changed, check duplication + num, err := models.UserGroupCount("name=? and id<>?", f.Name, ug.Id) + ginx.Dangerous(err) + + if num > 0 { + ginx.Bomb(http.StatusOK, "UserGroup already exists") + } + } + + ug.Name = f.Name + ug.Note = f.Note + ug.UpdateBy = me.Username + ug.UpdateAt = time.Now().Unix() + + ginx.NewRender(c).Message(ug.Update("Name", "Note", "UpdateAt", "UpdateBy")) +} + +// Return all members, front-end search and paging +func userGroupGet(c *gin.Context) { + ug := UserGroup(ginx.UrlParamInt64(c, "id")) + + ids, err := models.MemberIds(ug.Id) + ginx.Dangerous(err) + + users, err := models.UserGetsByIds(ids) + + ginx.NewRender(c).Data(gin.H{ + "users": users, + "user_group": ug, + }, err) +} + +func userGroupDel(c *gin.Context) { + ug := c.MustGet("user_group").(*models.UserGroup) + ginx.NewRender(c).Message(ug.Del()) +} + +func userGroupMemberAdd(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + me := c.MustGet("user").(*models.User) + ug := c.MustGet("user_group").(*models.UserGroup) + + err := ug.AddMembers(f.Ids) + if err == nil { + ug.UpdateAt = time.Now().Unix() + ug.UpdateBy = me.Username + ug.Update("UpdateAt", "UpdateBy") + } + + ginx.NewRender(c).Message(err) +} + +func userGroupMemberDel(c *gin.Context) { + var f idsForm + ginx.BindJSON(c, &f) + f.Verify() + + me := c.MustGet("user").(*models.User) + ug := c.MustGet("user_group").(*models.UserGroup) + + err := ug.DelMembers(f.Ids) + if err == nil { + ug.UpdateAt = time.Now().Unix() + ug.UpdateBy = me.Username + ug.Update("UpdateAt", "UpdateBy") + } + + ginx.NewRender(c).Message(err) +} diff --git a/src/webapi/stat/stat.go b/src/webapi/stat/stat.go new file mode 100644 index 00000000..52d539c6 --- /dev/null +++ b/src/webapi/stat/stat.go @@ -0,0 +1,53 @@ +package stat + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const Service = "n9e-webapi" + +var ( + labels = []string{"service", "code", "path", "method"} + + uptime = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "uptime", + Help: "HTTP service uptime.", + }, []string{"service"}, + ) + + RequestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_request_count_total", + Help: "Total number of HTTP requests made.", + }, labels, + ) + + RequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Buckets: []float64{.01, .1, 1, 10}, + Name: "http_request_duration_seconds", + Help: "HTTP request latencies in seconds.", + }, labels, + ) +) + +func Init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister( + uptime, + RequestCounter, + RequestDuration, + ) + + go recordUptime() +} + +// recordUptime increases service uptime per second. +func recordUptime() { + for range time.Tick(time.Second) { + uptime.WithLabelValues(Service).Inc() + } +} diff --git a/src/webapi/webapi.go b/src/webapi/webapi.go new file mode 100644 index 00000000..765278d2 --- /dev/null +++ b/src/webapi/webapi.go @@ -0,0 +1,134 @@ +package webapi + +import ( + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/toolkits/pkg/i18n" + + "github.com/didi/nightingale/v5/src/models" + "github.com/didi/nightingale/v5/src/pkg/httpx" + "github.com/didi/nightingale/v5/src/pkg/ldapx" + "github.com/didi/nightingale/v5/src/pkg/logx" + "github.com/didi/nightingale/v5/src/storage" + "github.com/didi/nightingale/v5/src/webapi/config" + "github.com/didi/nightingale/v5/src/webapi/prom" + "github.com/didi/nightingale/v5/src/webapi/router" + "github.com/didi/nightingale/v5/src/webapi/stat" +) + +type Webapi struct { + ConfigFile string + Version string +} + +type WebapiOption func(*Webapi) + +func SetConfigFile(f string) WebapiOption { + return func(s *Webapi) { + s.ConfigFile = f + } +} + +func SetVersion(v string) WebapiOption { + return func(s *Webapi) { + s.Version = v + } +} + +// Run run webapi +func Run(opts ...WebapiOption) { + code := 1 + sc := make(chan os.Signal, 1) + signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + webapi := Webapi{ + ConfigFile: filepath.Join("etc", "webapi.conf"), + Version: "not specified", + } + + for _, opt := range opts { + opt(&webapi) + } + + cleanFunc, err := webapi.initialize() + if err != nil { + fmt.Println("webapi init fail:", err) + os.Exit(code) + } + +EXIT: + for { + sig := <-sc + fmt.Println("received signal:", sig.String()) + switch sig { + case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: + code = 0 + break EXIT + case syscall.SIGHUP: + // reload configuration? + default: + break EXIT + } + } + + cleanFunc() + fmt.Println("webapi exited") + os.Exit(code) +} + +func (a Webapi) initialize() (func(), error) { + // parse config file + config.MustLoad(a.ConfigFile) + + // init i18n + i18n.Init(config.C.I18N) + + // init ldap + ldapx.Init(config.C.LDAP) + + // init logger + loggerClean, err := logx.Init(config.C.Log) + if err != nil { + return nil, err + } + + // init database + if err = storage.InitDB(storage.DBConfig{ + Gorm: config.C.Gorm, + MySQL: config.C.MySQL, + Postgres: config.C.Postgres, + }); err != nil { + return nil, err + } + + // init redis + redisClean, err := storage.InitRedis(config.C.Redis) + if err != nil { + return nil, err + } + + models.InitSalt() + models.InitRoot() + + // init prometheus proxy config + if err = prom.Init(config.C.Clusters); err != nil { + return nil, err + } + + stat.Init() + + // init http server + r := router.New(a.Version) + httpClean := httpx.Init(config.C.HTTP, r) + + // release all the resources + return func() { + loggerClean() + httpClean() + redisClean() + }, nil +} diff --git a/timer/alert_mute.go b/timer/alert_mute.go deleted file mode 100644 index 9098e956..00000000 --- a/timer/alert_mute.go +++ /dev/null @@ -1,73 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -func SyncAlertMutes() { - if err := syncAlertMutes(); err != nil { - fmt.Println("timer: sync alert mutes fail:", err) - exit(1) - } - - go loopSyncAlertMutes() -} - -func loopSyncAlertMutes() { - randtime := rand.Intn(9000) - fmt.Printf("timer: sync alert mutes: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - for { - time.Sleep(time.Second * time.Duration(9)) - if err := syncAlertMutes(); err != nil { - logger.Warning("timer: sync alert mutes fail:", err) - } - } -} - -func syncAlertMutes() error { - start := time.Now() - - err := models.MuteCleanExpire() - if err != nil { - logger.Errorf("clean expire mute fail, err: %v", err) - return err - } - - mutes, err := models.MuteGetsAll() - if err != nil { - logger.Errorf("get AlertMute fail, err: %v", err) - return err - } - - // key: metric - // value: ResFilters#TagsFilters - muteMap := make(map[string][]cache.Filter) - for i := 0; i < len(mutes); i++ { - if err := mutes[i].Parse(); err != nil { - logger.Warning("parse mute fail:", err) - continue - } - - filter := cache.Filter{ - ResReg: mutes[i].ResRegexp, - TagsMap: mutes[i].TagsMap, - ClasspathPrefix: mutes[i].ClasspathPrefix, - } - - muteMap[mutes[i].Metric] = append(muteMap[mutes[i].Metric], filter) - } - - cache.AlertMute.SetAll(muteMap) - logger.Debugf("timer: sync alert mutes done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/alert_rule.go b/timer/alert_rule.go deleted file mode 100644 index 7002c0c7..00000000 --- a/timer/alert_rule.go +++ /dev/null @@ -1,90 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -func SyncAlertRules() { - if err := syncAlertRules(); err != nil { - fmt.Println(err) - exit(1) - } - - go loopSyncAlertRules() -} - -func loopSyncAlertRules() { - randtime := rand.Intn(9000) - fmt.Printf("timer: sync alert rules: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(9) * time.Second - - for { - time.Sleep(interval) - if err := syncAlertRules(); err != nil { - logger.Warning(err) - } - } -} - -func syncAlertRules() error { - start := time.Now() - - // 上次同步的时候同步了多少条rule,数据库中最近的更新时间,如果这俩信息都没变,说明DB中数据没变 - // 数据库中数据没变,那就不用再做操作了 - lastMaxUpdateTs := cache.AlertRulesByMetric.MaxUpdateTs - ruleNum := cache.AlertRulesByMetric.RuleNum - - statistic, err := models.GetAlertRuleStatistic() - if err != nil { - return fmt.Errorf("sync alertRules getAlertRuleStatistics err: %v", err) - } - - if statistic.Count == ruleNum && statistic.MaxUpdateAt == lastMaxUpdateTs { - lastMaxUpdateStr := time.Unix(lastMaxUpdateTs, 0).Format("2006-01-02 15:04:05") - logger.Debugf("[no_change_not_sync][LastUpdateAt:%+v][ruleNum:%+v]:", lastMaxUpdateStr, ruleNum) - return nil - } - - // 数据库中的记录和上次拉取的数据相比,发生变化,重新从数据库拉取最新数据 - logger.Debugf("[alert_rule_change_start_sync][last_num:%d this_num:%d][last_max_update_ts:%d this_max_update_ts:%d]:", - ruleNum, - statistic.Count, - lastMaxUpdateTs, - statistic.MaxUpdateAt) - - alertRules, err := models.AllAlertRules() - alertRulesMap := make(map[int64]*models.AlertRule) - - if err != nil { - return fmt.Errorf("sync alertRules [type=all] err: %v", err) - } - - metricAlertRulesMap := make(map[string][]*models.AlertRule) - for i := range alertRules { - if err := alertRules[i].Decode(); err != nil { - // 单个rule无法decode,直接忽略继续处理别的,等后面用户修复好了,数据库last_update信息变化,这里自然能感知 - logger.Warningf("syncAlertRule %v err:%v", alertRules[i], err) - continue - } - - alertRulesMap[alertRules[i].Id] = alertRules[i] - if alertRules[i].Type == models.PUSH { - metricAlertRulesMap[alertRules[i].FirstMetric] = append(metricAlertRulesMap[alertRules[i].FirstMetric], alertRules[i]) - } - } - - cache.AlertRules.SetAll(alertRulesMap) - cache.AlertRulesByMetric.SetAll(metricAlertRulesMap, statistic.MaxUpdateAt, statistic.Count, start.UnixNano()) - logger.Infof("[timer] sync alert rules done, found %d records, cost: %dms", statistic.Count, time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/cleaner.go b/timer/cleaner.go deleted file mode 100644 index a9d0afbb..00000000 --- a/timer/cleaner.go +++ /dev/null @@ -1,62 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/v5/models" -) - -// CleanExpireMute 清理过期的告警屏蔽 -// 1. mute表:如果屏蔽结束时间小于当前时间,说明已经过了屏蔽时间了,这条屏蔽记录就可以被干掉 -// 2. resource表:也有个屏蔽结束时间,需要和mute表做相同的判断和清理逻辑 -func CleanExpireMute() { - go loopCleanExpireMute() -} - -func loopCleanExpireMute() { - randtime := rand.Intn(2000) - fmt.Printf("timer: clean expire mute: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(10) * time.Second - - for { - time.Sleep(interval) - cleanExpireMute() - } -} - -func cleanExpireMute() { - err := models.MuteCleanExpire() - if err != nil { - logger.Warningf("MuteCleanExpire fail: %v", err) - } -} - -func CleanExpireResource() { - go loopCleanExpireResource() -} - -func loopCleanExpireResource() { - randtime := rand.Intn(2000) - fmt.Printf("timer: clean expire resource: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(10) * time.Second - - for { - time.Sleep(interval) - cleanExpireResource() - } -} - -func cleanExpireResource() { - err := models.ResourceCleanExpire() - if err != nil { - logger.Warningf("ResourceCleanExpire fail: %v", err) - } -} diff --git a/timer/collect_rule.go b/timer/collect_rule.go deleted file mode 100644 index 26485567..00000000 --- a/timer/collect_rule.go +++ /dev/null @@ -1,299 +0,0 @@ -package timer - -import ( - "encoding/json" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -func SyncCollectRules() { - err := syncCollectRules() - if err != nil { - fmt.Println("timer: sync collect rules fail:", err) - exit(1) - } - - go loopSyncCollectRules() -} - -func loopSyncCollectRules() { - randtime := rand.Intn(10000) - fmt.Printf("timer: sync collect rules: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(60) * time.Second - - for { - time.Sleep(interval) - err := syncCollectRules() - if err != nil { - logger.Warning("timer: sync collect rules fail:", err) - } - } -} - -func syncCollectRules() error { - start := time.Now() - - collectRules, err := models.CollectRuleGetAll() - if err != nil { - return err - } - - // ident -> collect_rule1, collect_rule2 ... - collectRulesMap := make(map[string][]*models.CollectRule) - // classpath prefix -> classpaths - prefixClasspath := make(map[string][]models.Classpath) - - for i := range collectRules { - classpathAndRes, exists := cache.ClasspathRes.Get(collectRules[i].ClasspathId) - if !exists { - continue - } - - err := changeCollectRule(collectRules[i]) - if err != nil { - logger.Errorf("change collect:%+v err:%v", collectRules[i], err) - continue - } - - if collectRules[i].PrefixMatch == 0 { - // 我这个采集规则所关联的节点下面直接挂载的那些资源,都关联本采集规则 - for _, ident := range classpathAndRes.Res { - if _, exists := collectRulesMap[ident]; !exists { - collectRulesMap[ident] = []*models.CollectRule{collectRules[i]} - } else { - collectRulesMap[ident] = append(collectRulesMap[ident], collectRules[i]) - } - } - } else { - // 我这个采集规则关联的节点下面的所有的子节点,这个计算量有点大,可能是个问题 - cps, exists := prefixClasspath[classpathAndRes.Classpath.Path] - if !exists { - cps, err = models.ClasspathGetsByPrefix(classpathAndRes.Classpath.Path) - if err != nil { - logger.Errorf("collectRule %+v get classpath err:%v", collectRules[i], err) - continue - } - prefixClasspath[classpathAndRes.Classpath.Path] = cps - } - - for j := range cps { - classpathAndRes, exists := cache.ClasspathRes.Get(cps[j].Id) - if !exists { - continue - } - - for _, ident := range classpathAndRes.Res { - if _, exists := collectRulesMap[ident]; !exists { - collectRulesMap[ident] = []*models.CollectRule{collectRules[i]} - } else { - collectRulesMap[ident] = append(collectRulesMap[ident], collectRules[i]) - } - } - } - } - - } - - cache.CollectRulesOfIdent.SetAll(collectRulesMap) - logger.Debugf("timer: sync collect rules done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} - -// 将服务端collect rule转换为agent需要的格式 -func changeCollectRule(rule *models.CollectRule) error { - switch rule.Type { - case "port": - var conf models.PortConfig - err := json.Unmarshal([]byte(rule.Data), &conf) - if err != nil { - return err - } - - tags := strings.Fields(rule.AppendTags) - for i := 0; i < len(tags); i++ { - tags[i] = strings.Replace(tags[i], "=", ":", 1) - } - - config := PortCollectFormat{ - Instances: []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - Tags []string `json:"tags,omitempty"` - Protocol string `json:"protocol" description:"udp or tcp"` - Port int `json:"port"` - Timeout int `json:"timeout"` - }{{ - MinCollectionInterval: rule.Step, - Tags: tags, - Protocol: conf.Protocol, - Port: conf.Port, - Timeout: conf.Timeout, - }}, - } - - data, err := json.Marshal(config) - if err != nil { - return err - } - rule.Data = string(data) - - case "script": - var conf models.ScriptConfig - err := json.Unmarshal([]byte(rule.Data), &conf) - if err != nil { - return err - } - - tags := strings.Fields(rule.AppendTags) - for i := 0; i < len(tags); i++ { - tags[i] = strings.Replace(tags[i], "=", ":", 1) - } - - config := ScriptCollectFormat{ - Instances: []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - FilePath string `json:"file_path"` - Root string `json:"root"` - Params string `json:"params"` - Env map[string]string `json:"env"` - Stdin string `json:"stdin"` - Timeout int `json:"timeout"` - Tags []string `json:"tags,omitempty"` - }{{ - MinCollectionInterval: rule.Step, - FilePath: conf.Path, - Params: conf.Params, - Env: conf.Env, - Stdin: conf.Stdin, - Timeout: conf.Timeout, - Tags: tags, - }}, - } - - data, err := json.Marshal(config) - if err != nil { - return err - } - rule.Data = string(data) - case "log": - var conf models.LogConfig - err := json.Unmarshal([]byte(rule.Data), &conf) - if err != nil { - return err - } - - tags := strings.Fields(rule.AppendTags) - for i := 0; i < len(tags); i++ { - tags[i] = strings.Replace(tags[i], "=", ":", 1) - } - - config := LogCollectFormat{ - Instances: []struct { - MetricName string `json:"metric_name"` // - FilePath string `json:"file_path"` - Pattern string `json:"pattern"` - TagsPattern map[string]string `json:"tags_pattern"` - Func string `json:"func"` - Tags []string `json:"tags,omitempty"` - }{{ - MetricName: rule.Name, - FilePath: conf.FilePath, - Pattern: conf.Pattern, - TagsPattern: conf.TagsPattern, - Func: conf.Func, - Tags: tags, - }}, - } - - data, err := json.Marshal(config) - if err != nil { - return err - } - rule.Data = string(data) - case "process": - var conf models.ProcConfig - err := json.Unmarshal([]byte(rule.Data), &conf) - if err != nil { - return err - } - - tags := strings.Fields(rule.AppendTags) - for i := 0; i < len(tags); i++ { - tags[i] = strings.Replace(tags[i], "=", ":", 1) - } - - config := ProcCollectFormat{ - Instances: []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - Tags []string `json:"tags,omitempty"` - Target string `json:"target"` - CollectMethod string `json:"collect_method" description:"name or cmdline"` - }{{ - MinCollectionInterval: rule.Step, - Tags: tags, - Target: conf.Param, - CollectMethod: conf.Method, - }}, - } - - data, err := json.Marshal(config) - if err != nil { - return err - } - rule.Data = string(data) - } - - return nil -} - -type ScriptCollectFormat struct { - Instances []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - FilePath string `json:"file_path"` - Root string `json:"root"` - Params string `json:"params"` - Env map[string]string `json:"env"` - Stdin string `json:"stdin"` - Timeout int `json:"timeout"` - Tags []string `json:"tags,omitempty"` - } `json:"instances"` -} - -type PortCollectFormat struct { - Instances []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - Tags []string `json:"tags,omitempty"` - Protocol string `json:"protocol" description:"udp or tcp"` - Port int `json:"port"` - Timeout int `json:"timeout"` - } `json:"instances"` -} - -type LogCollectFormat struct { - Instances []struct { - MetricName string `json:"metric_name"` // - FilePath string `json:"file_path"` // - Pattern string `json:"pattern"` // - TagsPattern map[string]string `json:"tags_pattern"` // - Func string `json:"func"` // count(c), histogram(h) - Tags []string `json:"tags,omitempty"` - } `json:"instances"` -} - -type ProcCollectFormat struct { - Instances []struct { - MinCollectionInterval int `json:"min_collection_interval,omitempty"` - Tags []string `json:"tags,omitempty"` - Target string `json:"target"` - CollectMethod string `json:"collect_method" description:"name or cmdline"` - } `json:"instances"` -} diff --git a/timer/metric_alias.go b/timer/metric_alias.go deleted file mode 100644 index cff2886c..00000000 --- a/timer/metric_alias.go +++ /dev/null @@ -1,53 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/toolkits/pkg/logger" -) - -func SyncMetricDesc() { - if err := syncMetricDesc(); err != nil { - fmt.Println("timer: sync metric desc fail:", err) - exit(1) - } - - go loopSyncMetricDesc() -} - -func loopSyncMetricDesc() { - randtime := rand.Intn(30000) - fmt.Printf("timer: sync metric desc: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - for { - time.Sleep(time.Second * time.Duration(30)) - if err := syncMetricDesc(); err != nil { - logger.Warning("timer: sync metric desc fail:", err) - } - } -} - -func syncMetricDesc() error { - start := time.Now() - - metricDescs, err := models.MetricDescriptionGetAll() - if err != nil { - logger.Error("MetricDescriptionGetAll err:", err) - return err - } - - metricDescMap := make(map[string]interface{}) - for _, m := range metricDescs { - metricDescMap[m.Metric] = m.Description - } - - cache.MetricDescMapper.Clear() - cache.MetricDescMapper.MSet(metricDescMap) - logger.Debugf("timer: sync metric desc done, cost: %dms", time.Since(start).Milliseconds()) - return nil -} diff --git a/timer/orphan_res.go b/timer/orphan_res.go deleted file mode 100644 index 765fe909..00000000 --- a/timer/orphan_res.go +++ /dev/null @@ -1,28 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/models" -) - -// 是个兜底扫描器,担心有些resource脱离id为1的preset的classpath -// 如果有发现,就把resource重新bind回来 -func BindOrphanRes() { - go loopBindOrphanRes() -} - -func loopBindOrphanRes() { - randtime := rand.Intn(10000) - fmt.Printf("timer: bind orphan res: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(10) * time.Second - - for { - time.Sleep(interval) - models.BindOrphanToPresetClasspath() - } -} diff --git a/timer/res_classpath.go b/timer/res_classpath.go deleted file mode 100644 index 9565680e..00000000 --- a/timer/res_classpath.go +++ /dev/null @@ -1,97 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/toolkits/pkg/logger" -) - -func SyncClasspathReses() { - err := syncClasspathReses() - if err != nil { - fmt.Println("timer: sync classpath resources fail:", err) - exit(1) - } - - go loopSyncResClasspath() -} - -func loopSyncResClasspath() { - randtime := rand.Intn(10000) - fmt.Printf("timer: sync classpath resources: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(60) * time.Second - - for { - time.Sleep(interval) - - err := syncClasspathReses() - if err != nil { - logger.Warning("timer: sync classpath resources fail:", err) - } - } -} - -func syncClasspathReses() error { - start := time.Now() - - classpaths, err := models.ClasspathGetAll() - if err != nil { - return err - } - - // classpath_id -> classpath - classpathMap := make(map[int64]*models.Classpath) - for i := range classpaths { - classpathMap[classpaths[i].Id] = &classpaths[i] - } - - classpathResource, err := models.ClasspathResourceGetAll() - if err != nil { - return err - } - - // classpath_id -> ident1, ident2 ... - classpathRes := make(map[int64]*cache.ClasspathAndRes) - - // ident -> classpath1, classpath2 ... - resClasspath := make(map[string]map[string]struct{}) - - for _, cr := range classpathResource { - c, has := classpathMap[cr.ClasspathId] - if !has { - // 理论上不会走到这里,只是做个防御 - continue - } - - classpathAndRes, exists := classpathRes[cr.ClasspathId] - if !exists { - classpathRes[cr.ClasspathId] = &cache.ClasspathAndRes{ - Res: []string{cr.ResIdent}, - Classpath: c, - } - } else { - classpathAndRes.Res = append(classpathAndRes.Res, cr.ResIdent) - } - - cset, exists := resClasspath[cr.ResIdent] - if !exists { - resClasspath[cr.ResIdent] = map[string]struct{}{ - c.Path: {}, - } - } else { - cset[c.Path] = struct{}{} - } - } - - cache.ClasspathRes.SetAll(classpathRes) - cache.ResClasspath.SetAll(resClasspath) - logger.Debugf("timer: sync classpath resources done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/res_tags.go b/timer/res_tags.go deleted file mode 100644 index 795797d2..00000000 --- a/timer/res_tags.go +++ /dev/null @@ -1,76 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "strings" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -// 从数据库同步资源表的信息,组成res_ident->res_tags结构, -// 监控数据上报时,会根据ident找到资源标签,附到监控数据的标签里 -func SyncResourceTags() { - err := syncResourceTags() - if err != nil { - fmt.Println("timer: sync res tags fail:", err) - exit(1) - } - - go loopSyncResourceTags() -} - -func loopSyncResourceTags() { - randtime := rand.Intn(9000) - fmt.Printf("timer: sync res tags: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - for { - time.Sleep(time.Second * time.Duration(9)) - err := syncResourceTags() - if err != nil { - logger.Warning("timer: sync res tags fail:", err) - } - } -} - -func syncResourceTags() error { - start := time.Now() - - resources, err := models.ResourceGetAll() - if err != nil { - return err - } - - resTagsMap := make(map[string]cache.ResourceAndTags) - for i := 0; i < len(resources); i++ { - resAndTags := cache.ResourceAndTags{ - Resource: resources[i], - } - - tagslst := strings.Fields(resources[i].Tags) - count := len(tagslst) - if count != 0 { - tagsmap := make(map[string]string, count) - for i := 0; i < count; i++ { - arr := strings.Split(tagslst[i], "=") - if len(arr) != 2 { - continue - } - tagsmap[arr[0]] = arr[1] - } - resAndTags.Tags = tagsmap - } - - resTagsMap[resources[i].Ident] = resAndTags - } - - cache.ResTags.SetAll(resTagsMap) - logger.Debugf("timer: sync res tags done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/timer.go b/timer/timer.go deleted file mode 100644 index 56c02ae7..00000000 --- a/timer/timer.go +++ /dev/null @@ -1,12 +0,0 @@ -package timer - -import ( - "os" - - "github.com/toolkits/pkg/logger" -) - -func exit(code int) { - logger.Close() - os.Exit(code) -} diff --git a/timer/updater.go b/timer/updater.go deleted file mode 100644 index 32483697..00000000 --- a/timer/updater.go +++ /dev/null @@ -1,43 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/toolkits/pkg/logger" - - "github.com/didi/nightingale/v5/models" -) - -// UpdateAlias 对于上报的监控数据,会缓存在内存里,然后周期性更新其alias -// 主要是性能考虑,要不然每秒上报千万条监控指标,每条都去更新alias耗时太久 -// server是无状态的,对于某个ident,如果刚开始上报alias1到server1,后来上报alias2到server2 -// 如果server1和server2同时去更新数据库,可能会造成混乱,一会是alias1,一会是alias2 -// 所以,models.UpdateAlias中做了一个逻辑,先清空了15s之前的数据,这样就可以保证只需要更新新数据即可 -// go进程中的AliasMapper这个变量,在进程运行时间久了之后不知道是否会让内存持续增长而不释放 -// 如果真的出现这个问题,可能要考虑把这个变量的存储放到redis之类的KV中 -func UpdateAlias() { - go loopUpdateAlias() -} - -func loopUpdateAlias() { - randtime := rand.Intn(2000) - fmt.Printf("timer: update alias: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - // 5s跑一次,只会使用最近15s有过更新的数据,在models.UpdateAlias有15s的清理逻辑 - interval := time.Duration(5) * time.Second - - for { - time.Sleep(interval) - updateAlias() - } -} - -func updateAlias() { - err := models.UpdateAlias() - if err != nil { - logger.Warningf("UpdateAlias fail: %v", err) - } -} diff --git a/timer/user.go b/timer/user.go deleted file mode 100644 index 379f2930..00000000 --- a/timer/user.go +++ /dev/null @@ -1,57 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -// userid->user 将数据库中的用户信息缓存在内存里, -// 在生成告警事件的时候,根据用户ID快速找到用户的详情 -func SyncUsers() { - err := syncUsers() - if err != nil { - fmt.Println("timer: sync users fail:", err) - exit(1) - } - - go loopSyncUsers() -} - -func loopSyncUsers() { - randtime := rand.Intn(9000) - fmt.Printf("timer: sync users: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - for { - time.Sleep(time.Second * time.Duration(9)) - err := syncUsers() - if err != nil { - logger.Warning("timer: sync users fail:", err) - } - } -} - -func syncUsers() error { - start := time.Now() - - users, err := models.UserGetAll() - if err != nil { - return err - } - - usersMap := make(map[int64]*models.User) - for i := range users { - usersMap[users[i].Id] = &users[i] - } - - cache.UserCache.SetAll(usersMap) - logger.Debugf("timer: sync users done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/user_group.go b/timer/user_group.go deleted file mode 100644 index 89614f94..00000000 --- a/timer/user_group.go +++ /dev/null @@ -1,57 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -// user_group_id->user_group 将数据库中的用户信息缓存在内存里, -// 在生成告警事件的时候,根据用户ID快速找到用户的详情 -func SyncUserGroups() { - err := syncUserGroups() - if err != nil { - fmt.Println("timer: sync users fail:", err) - exit(1) - } - - go loopSyncUserGroups() -} - -func loopSyncUserGroups() { - randtime := rand.Intn(9000) - fmt.Printf("timer: sync users: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - for { - time.Sleep(time.Second * time.Duration(9)) - err := syncUserGroups() - if err != nil { - logger.Warning("timer: sync users fail:", err) - } - } -} - -func syncUserGroups() error { - start := time.Now() - - userGroups, err := models.UserGroupGetAll() - if err != nil { - return err - } - - userGroupsMap := make(map[int64]*models.UserGroup) - for i := range userGroups { - userGroupsMap[userGroups[i].Id] = &userGroups[i] - } - - cache.UserGroupCache.SetAll(userGroupsMap) - logger.Debugf("timer: sync userGroups done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/timer/user_group_member.go b/timer/user_group_member.go deleted file mode 100644 index 0d38ae7c..00000000 --- a/timer/user_group_member.go +++ /dev/null @@ -1,59 +0,0 @@ -package timer - -import ( - "fmt" - "math/rand" - "time" - - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - - "github.com/toolkits/pkg/logger" -) - -func SyncUserGroupMember() { - if err := syncUserGroupMember(); err != nil { - fmt.Println(err) - exit(1) - } - - go loopSyncUserGroupMember() -} - -func loopSyncUserGroupMember() { - randtime := rand.Intn(60000) - fmt.Printf("timer: sync group users: random sleep %dms\n", randtime) - time.Sleep(time.Duration(randtime) * time.Millisecond) - - interval := time.Duration(60) * time.Second - - for { - time.Sleep(interval) - if err := syncUserGroupMember(); err != nil { - logger.Warning(err) - } - } -} - -func syncUserGroupMember() error { - start := time.Now() - - members, err := models.UserGroupMemberGetAll() - if err != nil { - return fmt.Errorf("UserGroupMemberGetAll error: %v", err) - } - - memberMap := make(map[int64]map[int64]struct{}) - for _, m := range members { - if _, exists := memberMap[m.GroupId]; !exists { - memberMap[m.GroupId] = make(map[int64]struct{}) - } - memberMap[m.GroupId][m.UserId] = struct{}{} - } - - cache.UserGroupMember.SetAll(memberMap) - - logger.Debugf("timer: sync group users done, cost: %dms", time.Since(start).Milliseconds()) - - return nil -} diff --git a/trans/push.go b/trans/push.go deleted file mode 100644 index 286cea05..00000000 --- a/trans/push.go +++ /dev/null @@ -1,143 +0,0 @@ -package trans - -import ( - "bytes" - "fmt" - "sort" - "sync" - "time" - - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/str" - - "github.com/didi/nightingale/v5/backend" - "github.com/didi/nightingale/v5/cache" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/naming" - "github.com/didi/nightingale/v5/vos" -) - -func Push(points []*vos.MetricPoint) error { - if points == nil { - return fmt.Errorf("param(points) is nil") - } - - count := len(points) - if count == 0 { - return fmt.Errorf("param(points) is empty") - } - - var reterr error - - // 把ident->alias做成map,放内存里,后续要周期性与DB中的数据对比,更新resource表 - aliasMapper := make(map[string]interface{}) - - now := time.Now().Unix() - validPoints := make([]*vos.MetricPoint, 0, count) - for i := 0; i < count; i++ { - logger.Debugf("recv %+v", points[i]) - // 如果tags中发现有__ident__和__alias__就提到外层,这个逻辑是为了应对snmp之类的场景 - if val, has := points[i].TagsMap["__ident__"]; has { - points[i].Ident = val - // 如果后面没有发现__alias__,那alias就给改成空 - points[i].Alias = "" - delete(points[i].TagsMap, "__ident__") - } - - if val, has := points[i].TagsMap["__alias__"]; has { - points[i].Alias = val - delete(points[i].TagsMap, "__alias__") - } - - if err := points[i].Tidy(now); err != nil { - // 如果有部分point校验失败,没关系,把error返回即可,正常的可以继续往下走 - logger.Warningf("point %+v is invalid, err:%v ", points[i], err) - reterr = err - } else { - if points[i].Ident != "" { - // 把当前时间也带上,处理的时候只处理最近的数据,避免alias发生变化且数据分散在多个server造成的alias不一致的问题 - aliasMapper[points[i].Ident] = &models.AliasTime{Alias: points[i].Alias, Time: now} - } - // 将resource的tag追加到曲线的tag中,根据tagsmap生成tagslst,排序,生成primarykey - enrich(points[i]) - validPoints = append(validPoints, points[i]) - } - } - - models.AliasMapper.MSet(aliasMapper) - - // 路由数据,做转发的逻辑可以做成异步,这个过程如果有错,都是系统内部错误,不需要暴露给client侧 - go DispatchPoints(validPoints) - - return reterr -} - -func DispatchPoints(points []*vos.MetricPoint) { - // send to push endpoints - pushEndpoints, err := backend.GetPushEndpoints() - if err != nil { - logger.Errorf("could not find pushendpoint:%v", err) - } else { - for _, pushendpoint := range pushEndpoints { - go pushendpoint.Push2Queue(points) - } - } - - // send to judge queue - for i := range points { - node, err := naming.HashRing.GetNode(points[i].PK) - if err != nil { - logger.Errorf("could not find node:%v", err) - continue - } - - q, exists := queues.Get(node) - if !exists { - logger.Errorf("could not find queue by %s", node) - continue - } - - q.PushFront(points[i]) - } -} - -var bufferPool = sync.Pool{New: func() interface{} { return new(bytes.Buffer) }} - -func enrich(point *vos.MetricPoint) { - // 把res的tags附到point上 - resAndTags, exists := cache.ResTags.Get(point.Ident) - if exists { - for k, v := range resAndTags.Tags { - point.TagsMap[k] = v - } - } - - var tagsLst []string - - // 根据tagsmap生成tagslst,sort - count := len(point.TagsMap) - if count == 0 { - tagsLst = []string{} - } else { - lst := make([]string, 0, count) - for k, v := range point.TagsMap { - lst = append(lst, k+"="+v) - } - sort.Strings(lst) - tagsLst = lst - } - - // ident metric tagslst 生成 pk - ret := bufferPool.Get().(*bytes.Buffer) - ret.Reset() - defer bufferPool.Put(ret) - - ret.WriteString(point.Ident) - ret.WriteString(point.Metric) - - for i := 0; i < len(tagsLst); i++ { - ret.WriteString(tagsLst[i]) - } - - point.PK = str.MD5(ret.String()) -} diff --git a/trans/queue.go b/trans/queue.go deleted file mode 100644 index 6860fd41..00000000 --- a/trans/queue.go +++ /dev/null @@ -1,82 +0,0 @@ -package trans - -import ( - "sync" - - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/logger" - "github.com/toolkits/pkg/slice" -) - -type SafeJudgeQueue struct { - sync.RWMutex - Data map[string]*list.SafeListLimited - QueueMaxSize int -} - -var queues = NewJudgeQueue() - -func NewJudgeQueue() SafeJudgeQueue { - return SafeJudgeQueue{ - Data: make(map[string]*list.SafeListLimited), - QueueMaxSize: 10240000, - } -} - -func (s *SafeJudgeQueue) Del(instance string) { - s.Lock() - delete(s.Data, instance) - s.Unlock() -} - -func (s *SafeJudgeQueue) Set(instance string, q *list.SafeListLimited) { - s.Lock() - s.Data[instance] = q - s.Unlock() -} - -func (s *SafeJudgeQueue) Get(instance string) (*list.SafeListLimited, bool) { - s.RLock() - defer s.RUnlock() - q, exists := s.Data[instance] - return q, exists -} - -func (s *SafeJudgeQueue) Exists(instance string) bool { - s.RLock() - defer s.RUnlock() - _, exists := s.Data[instance] - return exists -} - -func (s *SafeJudgeQueue) GetAll() map[string]*list.SafeListLimited { - s.RLock() - defer s.RUnlock() - return s.Data -} - -func (s *SafeJudgeQueue) Update(instances []string) { - for _, instance := range instances { - if !s.Exists(instance) { - q := list.NewSafeListLimited(s.QueueMaxSize) - s.Set(instance, q) - go send2JudgeTask(q, instance) - } - } - - toDel := make(map[string]struct{}) - all := s.GetAll() - for key := range all { - if !slice.ContainsString(instances, key) { - toDel[key] = struct{}{} - } - } - - for key := range toDel { - if queue, ok := s.Get(key); ok { - queue.RemoveAll() - } - s.Del(key) - logger.Infof("server instance %s dead, so remove from judge queues", key) - } -} diff --git a/trans/sender.go b/trans/sender.go deleted file mode 100644 index 85c9cb35..00000000 --- a/trans/sender.go +++ /dev/null @@ -1,88 +0,0 @@ -package trans - -import ( - "time" - - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/judge" - "github.com/didi/nightingale/v5/vos" - "github.com/toolkits/pkg/concurrent/semaphore" - "github.com/toolkits/pkg/container/list" - "github.com/toolkits/pkg/logger" -) - -// 多个judge实例,如果对端地址等于本地地址走内存 -func send2JudgeTask(q *list.SafeListLimited, addr string) { - if config.Config.Heartbeat.LocalAddr == addr { - send2LocalJudge(q) - } else { - send2RemoteJudge(q, addr) - } -} - -func send2LocalJudge(q *list.SafeListLimited) { - for { - items := q.PopBackBy(config.Config.Judge.ReadBatch) - - count := len(items) - if count == 0 { - time.Sleep(time.Millisecond * 100) - continue - } - - points := make([]*vos.MetricPoint, count) - for i := 0; i < count; i++ { - points[i] = items[i].(*vos.MetricPoint) - } - - judge.Send(points) - } - -} - -func send2RemoteJudge(q *list.SafeListLimited, addr string) { - sema := semaphore.NewSemaphore(config.Config.Judge.WriterNum) - - for { - items := q.PopBackBy(config.Config.Judge.ReadBatch) - count := len(items) - if count == 0 { - time.Sleep(time.Millisecond * 50) - if !queues.Exists(addr) { - // 对端实例已挂,我已经没有存在的必要了 - logger.Infof("server instance %s dead, queue reader exiting...", addr) - return - } - continue - } - - judgeItems := make([]*vos.MetricPoint, count) - for i := 0; i < count; i++ { - judgeItems[i] = items[i].(*vos.MetricPoint) - } - - sema.Acquire() - go func(addr string, judgeItems []*vos.MetricPoint, count int) { - defer sema.Release() - - var res string - var err error - sendOk := false - for i := 0; i < 15; i++ { - err = connPools.Call(addr, "Server.PushToJudge", judgeItems, &res) - if err == nil { - sendOk = true - break - } - time.Sleep(time.Second) - } - - if !sendOk { - for _, item := range judgeItems { - logger.Errorf("send %v to judge %s fail: %v", item, addr, err) - } - } - - }(addr, judgeItems, count) - } -} diff --git a/trans/trans.go b/trans/trans.go deleted file mode 100644 index eac93472..00000000 --- a/trans/trans.go +++ /dev/null @@ -1,70 +0,0 @@ -package trans - -import ( - "context" - "fmt" - "os" - "sort" - "strings" - "time" - - "github.com/didi/nightingale/v5/config" - "github.com/didi/nightingale/v5/models" - "github.com/didi/nightingale/v5/naming" - "github.com/didi/nightingale/v5/pkg/ipool" - - "github.com/toolkits/pkg/logger" -) - -var connPools *ipool.ConnPools -var svcsCache string - -func Start(ctx context.Context) { - // 初始化本包的数据结构,然后启动一个goroutine,周期性获取活着的judge实例,更新相应的pool、queue等 - judgeConf := config.Config.Judge - connPools = ipool.NewConnPools(judgeConf.ConnMax, judgeConf.ConnIdle, judgeConf.ConnTimeout, judgeConf.CallTimeout, []string{}) - - if err := syncInstances(); err != nil { - fmt.Println("syncInstances fail:", err) - logger.Close() - os.Exit(1) - } - - go loopSyncInstances() -} - -func loopSyncInstances() { - interval := time.Duration(config.Config.Heartbeat.Interval) * time.Millisecond - for { - time.Sleep(interval) - if err := syncInstances(); err != nil { - logger.Warning("syncInstances fail:", err) - } - } -} - -func syncInstances() error { - // 获取当前活着的所有实例 - instances, err := models.InstanceGetAlive(config.EndpointName) - if err != nil { - logger.Warningf("mysql.error: get alive server instances fail: %v", err) - return err - } - - // 排序,便于与内存中的实例列表做差别判断 - sort.Strings(instances) - - // 如果列表变化,就去处理,并且要更新内存变量serverStr - newSvcs := strings.Join(instances, ",") - if newSvcs != svcsCache { - // 如果有新实例,创建对应的连接池,如果实例少了,删掉没用的连接池 - connPools.UpdatePools(instances) - // 如果有新实例,创建对应的Queue,如果实例少了,删掉对应的Queue - queues.Update(instances) - // 重建哈希环 - naming.RebuildConsistentHashRing(instances) - svcsCache = newSvcs - } - - return nil -} diff --git a/vos/event.go b/vos/event.go deleted file mode 100644 index 2d98e994..00000000 --- a/vos/event.go +++ /dev/null @@ -1,18 +0,0 @@ -package vos - -type HistoryPoints struct { - Metric string `json:"metric"` - Tags map[string]string `json:"tags"` - Points []*HPoint `json:"points"` -} - -type HPoint struct { - Timestamp int64 `json:"t"` - Value JsonFloat `json:"v"` -} - -type HistoryDataS []*HPoint - -func (r HistoryDataS) Len() int { return len(r) } -func (r HistoryDataS) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r HistoryDataS) Less(i, j int) bool { return r[i].Timestamp < r[j].Timestamp } diff --git a/vos/metric.go b/vos/metric.go deleted file mode 100644 index da9ceeca..00000000 --- a/vos/metric.go +++ /dev/null @@ -1,197 +0,0 @@ -package vos - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - "sync" - - "github.com/didi/nightingale/v5/pkg/istr" -) - -const ( - SPLIT = "/" -) - -type MetricPoint struct { - PK string `json:"pk"` // 内部字段,ident、metric、sorted(tags)拼接之后算md5 - Ident string `json:"ident"` // 资源标识,跟资源无关的监控数据,该字段为空 - Alias string `json:"alias"` // 资源名称,跟资源无关的监控数据,该字段为空 - Metric string `json:"metric"` // 监控指标名称 - TagsMap map[string]string `json:"tags"` // 监控数据标签 - Time int64 `json:"time"` // 时间戳,单位是秒 - ValueUntyped interface{} `json:"value"` // 监控数据数值,可以是int float string,但最终要能转换为float64 - Value float64 `json:"-"` // 内部字段,最终转换之后的float64数值 -} - -func (m *MetricPoint) Tidy(now int64) error { - if m == nil { - return fmt.Errorf("point is nil") - } - - // 时间超前5分钟则报错 - if m.Time-now > 300 { - return fmt.Errorf("point_time(%d) - server_time(%d) = %d. use ntp to calibrate host time?", m.Time, now, m.Time-now) - } - - // 时间延迟30分钟则报错 - if m.Time-now < -1800 { - return fmt.Errorf("point_time(%d) - server_time(%d) = %d. use ntp to calibrate host time?", m.Time, now, m.Time-now) - } - - if m.Time <= 0 { - m.Time = now - } - - if m.Metric == "" { - return fmt.Errorf("metric is blank") - } - - if istr.SampleKeyInvalid(m.Metric) { - return fmt.Errorf("metric:%s contains reserved words", m.Metric) - } - - if istr.SampleKeyInvalid(m.Ident) { - return fmt.Errorf("ident:%s contains reserved words", m.Ident) - } - - if m.ValueUntyped == nil { - return fmt.Errorf("value is nil") - } - - safemap := make(map[string]string) - for k, v := range m.TagsMap { - if istr.SampleKeyInvalid(k) { - return fmt.Errorf("tag key: %s contains reserved words", k) - } - - if len(k) == 0 { - return fmt.Errorf("tag key is blank, metric: %s", m.Metric) - } - - v = strings.Map(func(r rune) rune { - if r == '\t' || - r == '\r' || - r == '\n' || - r == ',' { - return '_' - } - return r - }, v) - - if len(v) == 0 { - safemap[k] = "nil" - } else { - safemap[k] = v - } - } - - m.TagsMap = safemap - - valid := true - var vv float64 - var err error - - switch cv := m.ValueUntyped.(type) { - case string: - vv, err = strconv.ParseFloat(cv, 64) - if err != nil { - valid = false - } - case float64: - vv = cv - case uint64: - vv = float64(cv) - case int64: - vv = float64(cv) - case int: - vv = float64(cv) - default: - valid = false - } - - if !valid { - return fmt.Errorf("value(%v) is illegal", m.Value) - } - - m.Value = vv - - return nil -} - -func DictedTagList(tags []string) map[string]string { - rmap := make(map[string]string) - if len(tags) == 0 { - return rmap - } - - for _, tag := range tags { - pair := strings.SplitN(tag, "=", 2) - if len(pair) != 2 { - continue - } - - if pair[0] == "" { - continue - } - - if pair[1] == "" { - rmap[pair[0]] = "nil" - } else { - rmap[pair[0]] = pair[1] - } - } - - return rmap -} - -var bufferPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -func SortedTags(tags map[string]string) string { - if tags == nil { - return "" - } - - size := len(tags) - if size == 0 { - return "" - } - - ret := bufferPool.Get().(*bytes.Buffer) - ret.Reset() - defer bufferPool.Put(ret) - - if size == 1 { - for k, v := range tags { - ret.WriteString(k) - ret.WriteString("=") - ret.WriteString(v) - } - return ret.String() - } - - keys := make([]string, size) - i := 0 - for k := range tags { - keys[i] = k - i++ - } - sort.Strings(keys) - - for j, key := range keys { - ret.WriteString(key) - ret.WriteString("=") - ret.WriteString(tags[key]) - if j != size-1 { - ret.WriteString(",") - } - } - - return ret.String() -} diff --git a/vos/query.go b/vos/query.go deleted file mode 100644 index 92948e20..00000000 --- a/vos/query.go +++ /dev/null @@ -1,238 +0,0 @@ -package vos - -import ( - "fmt" - "math" - "time" -) - -type JsonFloat float64 - -func (v JsonFloat) MarshalJSON() ([]byte, error) { - f := float64(v) - if math.IsNaN(f) || math.IsInf(f, 0) { - return []byte("null"), nil - } else { - return []byte(fmt.Sprintf("%f", f)), nil - } -} - -type Point struct { - Timestamp int64 `json:"t"` - Value JsonFloat `json:"v"` -} - -func NewPoint(ts int64, val float64) *Point { - return &Point{Timestamp: ts, Value: JsonFloat(val)} -} - -type DataQueryParam struct { - Params []DataQueryParamOne `json:"params"` - Limit int `json:"limit"` - Start int64 `json:"start"` - End int64 `json:"end"` - Step int64 `json:"step"` -} - -type DataQueryInstantParam struct { - PromeQl string `json:"prome_ql"` -} - -type DataQueryParamOne struct { - PromeQl string `json:"prome_ql"` - Idents []string `json:"idents"` - ClasspathId int64 `json:"classpath_id"` - ClasspathPrefix int `json:"classpath_prefix"` - Metric string `json:"metric"` - TagPairs []*TagPair `json:"tags"` - DownSamplingFunc string `json:"down_sampling_func"` - Aggr AggrConf `json:"aggr"` - Comparisons []int64 `json:"comparisons"` //环比多少时间 -} - -type AggrConf struct { - GroupKey []string `json:"group_key"` //聚合维度 - Func string `json:"func" description:"sum,avg,max,min"` //聚合计算 -} - -type DataQueryResp struct { - Ident string `json:"ident"` - Metric string `json:"metric"` - Tags string `json:"tags"` - Values []*Point `json:"values"` - Resolution int64 `json:"resolution"` - PNum int `json:"pNum"` -} - -type DataQueryInstanceResp struct { - Metric map[string]interface{} `json:"metric"` - Value []float64 `json:"value"` -} - -type DataQL struct { - Start int64 `json:"start"` - End int64 `json:"end"` - QL string `json:"ql"` - Step int64 `json:"step"` -} - -type TagKeyQueryParam struct { - Idents []string `json:"idents"` - TagKey string `json:"tagkey"` - TagPairs []*TagPair `json:"tags"` - Metric string `json:"metric"` - Start int64 `json:"start" description:"inclusive"` - End int64 `json:"end" description:"exclusive"` - StartInclusive time.Time `json:"-"` - EndExclusive time.Time `json:"-"` -} - -func (p *TagKeyQueryParam) Validate() (err error) { - p.StartInclusive, p.EndExclusive, err = timeRangeValidate(p.Start, p.End) - return -} - -type TagKeyQueryResp struct { - Keys []string `json:"keys"` -} - -type TagValueQueryParam struct { - TagKey string `json:"tagkey"` - TagValue string `json:"value"` - Metric string `json:"metric"` - Idents []string `json:"idents"` - Tags []string `json:"tags"` - Start int64 `json:"start" description:"inclusive"` - End int64 `json:"end" description:"exclusive"` - StartInclusive time.Time `json:"-"` - EndExclusive time.Time `json:"-"` -} - -func (p *TagValueQueryParam) Validate() (err error) { - p.StartInclusive, p.EndExclusive, err = timeRangeValidate(p.Start, p.End) - return -} - -type PromQlCheckResp struct { - ParseError string `json:"parse_error"` - QlCorrect bool `json:"ql_correct"` -} - -type TagValueQueryResp struct { - Values []string `json:"values"` -} - -type TagPairQueryParamOne struct { - Idents []string `json:"idents"` - Metric string `json:"metric"` -} - -type TagPairQueryParam struct { - Params []TagPairQueryParamOne `json:"params"` - TagPairs []*TagPair `json:"tags"` - Start int64 `json:"start" description:"inclusive"` - End int64 `json:"end" description:"exclusive"` - StartInclusive time.Time `json:"-"` - EndExclusive time.Time `json:"-"` - Limit int `json:"limit"` -} - -type CommonTagQueryParam struct { - Params []TagPairQueryParamOne `json:"params"` - TagPairs []*TagPair `json:"tags"` - TagKey string `json:"tag_key"` // 查询目标key,或者模糊查询 - TagValue string `json:"tag_value"` // 根据标签key查询value,或者模糊查询 - Start int64 `json:"start" description:"inclusive"` - End int64 `json:"end" description:"exclusive"` - StartInclusive time.Time `json:"-"` - EndExclusive time.Time `json:"-"` - Search string `json:"search"` // 查询标签组的时候的搜索 str,可以是key 也可以是value - Limit int `json:"limit"` -} - -func (p *CommonTagQueryParam) Validate() (err error) { - p.StartInclusive, p.EndExclusive, err = timeRangeValidate(p.Start, p.End) - return -} - -type TagPairQueryResp struct { - Idents []string `json:"idents"` - Metric string `json:"metric"` - TagPairs []string `json:"tags"` -} - -type MetricQueryParam struct { - Idents []string `json:"idents"` - Metric string `json:"metric"` - TagPairs []*TagPair `json:"tags"` - Start int64 `json:"start" description:"inclusive"` - End int64 `json:"end" description:"exclusive"` - StartInclusive time.Time `json:"-"` - EndExclusive time.Time `json:"-"` - Limit int `json:"limit"` -} - -func (p *MetricQueryParam) Validate() (err error) { - p.StartInclusive, p.EndExclusive, err = timeRangeValidate(p.Start, p.End) - return -} - -type MetricQueryResp struct { - Metrics []string `json:"metrics"` -} - -type MetricDesQueryResp struct { - Metrics []MetricsWithDescription `json:"metrics"` -} - -type MetricsWithDescription struct { - Name string `json:"name"` - Description string `json:"description"` -} - -type TagPair struct { - Key string `json:"key"` - Value string `json:"value"` -} - -//type IndexQueryParam struct { -// Metric string `json:"metric"` -// Idents []string `json:"idents"` -// Include []*TagPair `json:"include"` -// Exclude []*TagPair `json:"exclude"` -// Start int64 `json:"start" description:"inclusive"` -// End int64 `json:"end" description:"exclusive"` -// StartInclusive time.Time `json:"-"` -// EndExclusive time.Time `json:"-"` -//} -// -//func (p *IndexQueryParam) Validate() (err error) { -// p.StartInclusive, p.EndExclusive, err = timeRangeValidate(p.Start, p.End) -// return -//} -// -//type IndexQueryResp struct { -// Metric string `json:"metric"` -// Ident string `json:"ident"` -// Tags map[string]string `json:"tags"` -//} - -func timeRangeValidate(start, end int64) (startInclusive, endExclusive time.Time, err error) { - if end == 0 { - endExclusive = time.Now() - } else { - endExclusive = time.Unix(end, 0) - } - - if start == 0 { - startInclusive = endExclusive.Add(-time.Hour * 25) - } else { - startInclusive = time.Unix(start, 0) - } - - if startInclusive.After(endExclusive) { - err = fmt.Errorf("start is after end") - } - - return -}