go mod vendor
This commit is contained in:
parent
37868777e7
commit
b5becda6fc
18
go.mod
18
go.mod
|
@ -8,9 +8,9 @@ require (
|
||||||
github.com/codegangsta/negroni v1.0.0
|
github.com/codegangsta/negroni v1.0.0
|
||||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||||
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe
|
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe
|
||||||
github.com/ericchiang/k8s v1.2.0
|
github.com/ericchiang/k8s v1.2.0 // indirect
|
||||||
github.com/garyburd/redigo v1.6.2
|
github.com/garyburd/redigo v1.6.2
|
||||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
|
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
|
||||||
github.com/gin-contrib/pprof v1.3.0
|
github.com/gin-contrib/pprof v1.3.0
|
||||||
github.com/gin-gonic/gin v1.6.3
|
github.com/gin-gonic/gin v1.6.3
|
||||||
github.com/go-sql-driver/mysql v1.5.0
|
github.com/go-sql-driver/mysql v1.5.0
|
||||||
|
@ -20,20 +20,20 @@ require (
|
||||||
github.com/hpcloud/tail v1.0.0
|
github.com/hpcloud/tail v1.0.0
|
||||||
github.com/influxdata/influxdb v1.8.0
|
github.com/influxdata/influxdb v1.8.0
|
||||||
github.com/influxdata/telegraf v1.16.2
|
github.com/influxdata/telegraf v1.16.2
|
||||||
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
|
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
|
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 // indirect
|
||||||
github.com/m3db/m3 v0.15.17
|
github.com/m3db/m3 v0.15.17
|
||||||
github.com/mattn/go-isatty v0.0.12
|
github.com/mattn/go-isatty v0.0.12
|
||||||
github.com/mattn/go-sqlite3 v1.14.0 // indirect
|
github.com/mattn/go-sqlite3 v1.14.0 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/mojocn/base64Captcha v1.3.1
|
github.com/mojocn/base64Captcha v1.3.1
|
||||||
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad
|
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect
|
github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect
|
||||||
github.com/prometheus/client_model v0.2.0
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.9.1
|
github.com/prometheus/common v0.9.1 // indirect
|
||||||
github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 // indirect
|
github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 // indirect
|
||||||
github.com/shirou/gopsutil v3.20.11+incompatible // indirect
|
github.com/shirou/gopsutil v3.20.11+incompatible // indirect
|
||||||
github.com/soniah/gosnmp v1.25.0
|
github.com/soniah/gosnmp v1.25.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0
|
github.com/spaolacci/murmur3 v1.1.0
|
||||||
github.com/spf13/viper v1.7.1
|
github.com/spf13/viper v1.7.1
|
||||||
github.com/streadway/amqp v1.0.0
|
github.com/streadway/amqp v1.0.0
|
||||||
|
@ -47,7 +47,7 @@ require (
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||||
gopkg.in/ldap.v3 v3.1.0
|
gopkg.in/ldap.v3 v3.1.0
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
|
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.3.0
|
gopkg.in/yaml.v2 v2.3.0
|
||||||
xorm.io/core v0.7.3
|
xorm.io/core v0.7.3
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
@ -1,156 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// Package cmpopts provides common options for the cmp package.
|
|
||||||
package cmpopts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func equateAlways(_, _ interface{}) bool { return true }
|
|
||||||
|
|
||||||
// EquateEmpty returns a Comparer option that determines all maps and slices
|
|
||||||
// with a length of zero to be equal, regardless of whether they are nil.
|
|
||||||
//
|
|
||||||
// EquateEmpty can be used in conjunction with SortSlices and SortMaps.
|
|
||||||
func EquateEmpty() cmp.Option {
|
|
||||||
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmpty(x, y interface{}) bool {
|
|
||||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
|
||||||
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
|
|
||||||
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
|
|
||||||
(vx.Len() == 0 && vy.Len() == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EquateApprox returns a Comparer option that determines float32 or float64
|
|
||||||
// values to be equal if they are within a relative fraction or absolute margin.
|
|
||||||
// This option is not used when either x or y is NaN or infinite.
|
|
||||||
//
|
|
||||||
// The fraction determines that the difference of two values must be within the
|
|
||||||
// smaller fraction of the two values, while the margin determines that the two
|
|
||||||
// values must be within some absolute margin.
|
|
||||||
// To express only a fraction or only a margin, use 0 for the other parameter.
|
|
||||||
// The fraction and margin must be non-negative.
|
|
||||||
//
|
|
||||||
// The mathematical expression used is equivalent to:
|
|
||||||
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
|
|
||||||
//
|
|
||||||
// EquateApprox can be used in conjunction with EquateNaNs.
|
|
||||||
func EquateApprox(fraction, margin float64) cmp.Option {
|
|
||||||
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
|
|
||||||
panic("margin or fraction must be a non-negative number")
|
|
||||||
}
|
|
||||||
a := approximator{fraction, margin}
|
|
||||||
return cmp.Options{
|
|
||||||
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
|
|
||||||
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type approximator struct{ frac, marg float64 }
|
|
||||||
|
|
||||||
func areRealF64s(x, y float64) bool {
|
|
||||||
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
|
|
||||||
}
|
|
||||||
func areRealF32s(x, y float32) bool {
|
|
||||||
return areRealF64s(float64(x), float64(y))
|
|
||||||
}
|
|
||||||
func (a approximator) compareF64(x, y float64) bool {
|
|
||||||
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
|
|
||||||
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
|
|
||||||
}
|
|
||||||
func (a approximator) compareF32(x, y float32) bool {
|
|
||||||
return a.compareF64(float64(x), float64(y))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EquateNaNs returns a Comparer option that determines float32 and float64
|
|
||||||
// NaN values to be equal.
|
|
||||||
//
|
|
||||||
// EquateNaNs can be used in conjunction with EquateApprox.
|
|
||||||
func EquateNaNs() cmp.Option {
|
|
||||||
return cmp.Options{
|
|
||||||
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
|
|
||||||
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func areNaNsF64s(x, y float64) bool {
|
|
||||||
return math.IsNaN(x) && math.IsNaN(y)
|
|
||||||
}
|
|
||||||
func areNaNsF32s(x, y float32) bool {
|
|
||||||
return areNaNsF64s(float64(x), float64(y))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EquateApproxTime returns a Comparer option that determines two non-zero
|
|
||||||
// time.Time values to be equal if they are within some margin of one another.
|
|
||||||
// If both times have a monotonic clock reading, then the monotonic time
|
|
||||||
// difference will be used. The margin must be non-negative.
|
|
||||||
func EquateApproxTime(margin time.Duration) cmp.Option {
|
|
||||||
if margin < 0 {
|
|
||||||
panic("margin must be a non-negative number")
|
|
||||||
}
|
|
||||||
a := timeApproximator{margin}
|
|
||||||
return cmp.FilterValues(areNonZeroTimes, cmp.Comparer(a.compare))
|
|
||||||
}
|
|
||||||
|
|
||||||
func areNonZeroTimes(x, y time.Time) bool {
|
|
||||||
return !x.IsZero() && !y.IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
type timeApproximator struct {
|
|
||||||
margin time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a timeApproximator) compare(x, y time.Time) bool {
|
|
||||||
// Avoid subtracting times to avoid overflow when the
|
|
||||||
// difference is larger than the largest representible duration.
|
|
||||||
if x.After(y) {
|
|
||||||
// Ensure x is always before y
|
|
||||||
x, y = y, x
|
|
||||||
}
|
|
||||||
// We're within the margin if x+margin >= y.
|
|
||||||
// Note: time.Time doesn't have AfterOrEqual method hence the negation.
|
|
||||||
return !x.Add(a.margin).Before(y)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AnyError is an error that matches any non-nil error.
|
|
||||||
var AnyError anyError
|
|
||||||
|
|
||||||
type anyError struct{}
|
|
||||||
|
|
||||||
func (anyError) Error() string { return "any error" }
|
|
||||||
func (anyError) Is(err error) bool { return err != nil }
|
|
||||||
|
|
||||||
// EquateErrors returns a Comparer option that determines errors to be equal
|
|
||||||
// if errors.Is reports them to match. The AnyError error can be used to
|
|
||||||
// match any non-nil error.
|
|
||||||
func EquateErrors() cmp.Option {
|
|
||||||
return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors))
|
|
||||||
}
|
|
||||||
|
|
||||||
// areConcreteErrors reports whether x and y are types that implement error.
|
|
||||||
// The input types are deliberately of the interface{} type rather than the
|
|
||||||
// error type so that we can handle situations where the current type is an
|
|
||||||
// interface{}, but the underlying concrete types both happen to implement
|
|
||||||
// the error interface.
|
|
||||||
func areConcreteErrors(x, y interface{}) bool {
|
|
||||||
_, ok1 := x.(error)
|
|
||||||
_, ok2 := y.(error)
|
|
||||||
return ok1 && ok2
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareErrors(x, y interface{}) bool {
|
|
||||||
xe := x.(error)
|
|
||||||
ye := y.(error)
|
|
||||||
// TODO(≥go1.13): Use standard definition of errors.Is.
|
|
||||||
return xerrors.Is(xe, ye) || xerrors.Is(ye, xe)
|
|
||||||
}
|
|
|
@ -1,206 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmpopts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/function"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IgnoreFields returns an Option that ignores fields of the
|
|
||||||
// given names on a single struct type. It respects the names of exported fields
|
|
||||||
// that are forwarded due to struct embedding.
|
|
||||||
// The struct type is specified by passing in a value of that type.
|
|
||||||
//
|
|
||||||
// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
|
|
||||||
// specific sub-field that is embedded or nested within the parent struct.
|
|
||||||
func IgnoreFields(typ interface{}, names ...string) cmp.Option {
|
|
||||||
sf := newStructFilter(typ, names...)
|
|
||||||
return cmp.FilterPath(sf.filter, cmp.Ignore())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreTypes returns an Option that ignores all values assignable to
|
|
||||||
// certain types, which are specified by passing in a value of each type.
|
|
||||||
func IgnoreTypes(typs ...interface{}) cmp.Option {
|
|
||||||
tf := newTypeFilter(typs...)
|
|
||||||
return cmp.FilterPath(tf.filter, cmp.Ignore())
|
|
||||||
}
|
|
||||||
|
|
||||||
type typeFilter []reflect.Type
|
|
||||||
|
|
||||||
func newTypeFilter(typs ...interface{}) (tf typeFilter) {
|
|
||||||
for _, typ := range typs {
|
|
||||||
t := reflect.TypeOf(typ)
|
|
||||||
if t == nil {
|
|
||||||
// This occurs if someone tries to pass in sync.Locker(nil)
|
|
||||||
panic("cannot determine type; consider using IgnoreInterfaces")
|
|
||||||
}
|
|
||||||
tf = append(tf, t)
|
|
||||||
}
|
|
||||||
return tf
|
|
||||||
}
|
|
||||||
func (tf typeFilter) filter(p cmp.Path) bool {
|
|
||||||
if len(p) < 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
t := p.Last().Type()
|
|
||||||
for _, ti := range tf {
|
|
||||||
if t.AssignableTo(ti) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreInterfaces returns an Option that ignores all values or references of
|
|
||||||
// values assignable to certain interface types. These interfaces are specified
|
|
||||||
// by passing in an anonymous struct with the interface types embedded in it.
|
|
||||||
// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}.
|
|
||||||
func IgnoreInterfaces(ifaces interface{}) cmp.Option {
|
|
||||||
tf := newIfaceFilter(ifaces)
|
|
||||||
return cmp.FilterPath(tf.filter, cmp.Ignore())
|
|
||||||
}
|
|
||||||
|
|
||||||
type ifaceFilter []reflect.Type
|
|
||||||
|
|
||||||
func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
|
|
||||||
t := reflect.TypeOf(ifaces)
|
|
||||||
if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
|
|
||||||
panic("input must be an anonymous struct")
|
|
||||||
}
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
fi := t.Field(i)
|
|
||||||
switch {
|
|
||||||
case !fi.Anonymous:
|
|
||||||
panic("struct cannot have named fields")
|
|
||||||
case fi.Type.Kind() != reflect.Interface:
|
|
||||||
panic("embedded field must be an interface type")
|
|
||||||
case fi.Type.NumMethod() == 0:
|
|
||||||
// This matches everything; why would you ever want this?
|
|
||||||
panic("cannot ignore empty interface")
|
|
||||||
default:
|
|
||||||
tf = append(tf, fi.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tf
|
|
||||||
}
|
|
||||||
func (tf ifaceFilter) filter(p cmp.Path) bool {
|
|
||||||
if len(p) < 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
t := p.Last().Type()
|
|
||||||
for _, ti := range tf {
|
|
||||||
if t.AssignableTo(ti) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreUnexported returns an Option that only ignores the immediate unexported
|
|
||||||
// fields of a struct, including anonymous fields of unexported types.
|
|
||||||
// In particular, unexported fields within the struct's exported fields
|
|
||||||
// of struct types, including anonymous fields, will not be ignored unless the
|
|
||||||
// type of the field itself is also passed to IgnoreUnexported.
|
|
||||||
//
|
|
||||||
// Avoid ignoring unexported fields of a type which you do not control (i.e. a
|
|
||||||
// type from another repository), as changes to the implementation of such types
|
|
||||||
// may change how the comparison behaves. Prefer a custom Comparer instead.
|
|
||||||
func IgnoreUnexported(typs ...interface{}) cmp.Option {
|
|
||||||
ux := newUnexportedFilter(typs...)
|
|
||||||
return cmp.FilterPath(ux.filter, cmp.Ignore())
|
|
||||||
}
|
|
||||||
|
|
||||||
type unexportedFilter struct{ m map[reflect.Type]bool }
|
|
||||||
|
|
||||||
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
|
|
||||||
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
|
|
||||||
for _, typ := range typs {
|
|
||||||
t := reflect.TypeOf(typ)
|
|
||||||
if t == nil || t.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%T must be a non-pointer struct", typ))
|
|
||||||
}
|
|
||||||
ux.m[t] = true
|
|
||||||
}
|
|
||||||
return ux
|
|
||||||
}
|
|
||||||
func (xf unexportedFilter) filter(p cmp.Path) bool {
|
|
||||||
sf, ok := p.Index(-1).(cmp.StructField)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return xf.m[p.Index(-2).Type()] && !isExported(sf.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExported reports whether the identifier is exported.
|
|
||||||
func isExported(id string) bool {
|
|
||||||
r, _ := utf8.DecodeRuneInString(id)
|
|
||||||
return unicode.IsUpper(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreSliceElements returns an Option that ignores elements of []V.
|
|
||||||
// The discard function must be of the form "func(T) bool" which is used to
|
|
||||||
// ignore slice elements of type V, where V is assignable to T.
|
|
||||||
// Elements are ignored if the function reports true.
|
|
||||||
func IgnoreSliceElements(discardFunc interface{}) cmp.Option {
|
|
||||||
vf := reflect.ValueOf(discardFunc)
|
|
||||||
if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
|
|
||||||
}
|
|
||||||
return cmp.FilterPath(func(p cmp.Path) bool {
|
|
||||||
si, ok := p.Index(-1).(cmp.SliceIndex)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !si.Type().AssignableTo(vf.Type().In(0)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
vx, vy := si.Values()
|
|
||||||
if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}, cmp.Ignore())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreMapEntries returns an Option that ignores entries of map[K]V.
|
|
||||||
// The discard function must be of the form "func(T, R) bool" which is used to
|
|
||||||
// ignore map entries of type K and V, where K and V are assignable to T and R.
|
|
||||||
// Entries are ignored if the function reports true.
|
|
||||||
func IgnoreMapEntries(discardFunc interface{}) cmp.Option {
|
|
||||||
vf := reflect.ValueOf(discardFunc)
|
|
||||||
if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
|
|
||||||
}
|
|
||||||
return cmp.FilterPath(func(p cmp.Path) bool {
|
|
||||||
mi, ok := p.Index(-1).(cmp.MapIndex)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
k := mi.Key()
|
|
||||||
vx, vy := mi.Values()
|
|
||||||
if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}, cmp.Ignore())
|
|
||||||
}
|
|
|
@ -1,147 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmpopts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/function"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SortSlices returns a Transformer option that sorts all []V.
|
|
||||||
// The less function must be of the form "func(T, T) bool" which is used to
|
|
||||||
// sort any slice with element type V that is assignable to T.
|
|
||||||
//
|
|
||||||
// The less function must be:
|
|
||||||
// • Deterministic: less(x, y) == less(x, y)
|
|
||||||
// • Irreflexive: !less(x, x)
|
|
||||||
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
|
|
||||||
//
|
|
||||||
// The less function does not have to be "total". That is, if !less(x, y) and
|
|
||||||
// !less(y, x) for two elements x and y, their relative order is maintained.
|
|
||||||
//
|
|
||||||
// SortSlices can be used in conjunction with EquateEmpty.
|
|
||||||
func SortSlices(lessFunc interface{}) cmp.Option {
|
|
||||||
vf := reflect.ValueOf(lessFunc)
|
|
||||||
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid less function: %T", lessFunc))
|
|
||||||
}
|
|
||||||
ss := sliceSorter{vf.Type().In(0), vf}
|
|
||||||
return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort))
|
|
||||||
}
|
|
||||||
|
|
||||||
type sliceSorter struct {
|
|
||||||
in reflect.Type // T
|
|
||||||
fnc reflect.Value // func(T, T) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ss sliceSorter) filter(x, y interface{}) bool {
|
|
||||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
|
||||||
if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
|
|
||||||
!(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
|
|
||||||
(vx.Len() <= 1 && vy.Len() <= 1) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Check whether the slices are already sorted to avoid an infinite
|
|
||||||
// recursion cycle applying the same transform to itself.
|
|
||||||
ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
|
|
||||||
ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
|
|
||||||
return !ok1 || !ok2
|
|
||||||
}
|
|
||||||
func (ss sliceSorter) sort(x interface{}) interface{} {
|
|
||||||
src := reflect.ValueOf(x)
|
|
||||||
dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
|
|
||||||
for i := 0; i < src.Len(); i++ {
|
|
||||||
dst.Index(i).Set(src.Index(i))
|
|
||||||
}
|
|
||||||
sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
|
|
||||||
ss.checkSort(dst)
|
|
||||||
return dst.Interface()
|
|
||||||
}
|
|
||||||
func (ss sliceSorter) checkSort(v reflect.Value) {
|
|
||||||
start := -1 // Start of a sequence of equal elements.
|
|
||||||
for i := 1; i < v.Len(); i++ {
|
|
||||||
if ss.less(v, i-1, i) {
|
|
||||||
// Check that first and last elements in v[start:i] are equal.
|
|
||||||
if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
|
|
||||||
panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
|
|
||||||
}
|
|
||||||
start = -1
|
|
||||||
} else if start == -1 {
|
|
||||||
start = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
|
|
||||||
vx, vy := v.Index(i), v.Index(j)
|
|
||||||
return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortMaps returns a Transformer option that flattens map[K]V types to be a
|
|
||||||
// sorted []struct{K, V}. The less function must be of the form
|
|
||||||
// "func(T, T) bool" which is used to sort any map with key K that is
|
|
||||||
// assignable to T.
|
|
||||||
//
|
|
||||||
// Flattening the map into a slice has the property that cmp.Equal is able to
|
|
||||||
// use Comparers on K or the K.Equal method if it exists.
|
|
||||||
//
|
|
||||||
// The less function must be:
|
|
||||||
// • Deterministic: less(x, y) == less(x, y)
|
|
||||||
// • Irreflexive: !less(x, x)
|
|
||||||
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
|
|
||||||
// • Total: if x != y, then either less(x, y) or less(y, x)
|
|
||||||
//
|
|
||||||
// SortMaps can be used in conjunction with EquateEmpty.
|
|
||||||
func SortMaps(lessFunc interface{}) cmp.Option {
|
|
||||||
vf := reflect.ValueOf(lessFunc)
|
|
||||||
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid less function: %T", lessFunc))
|
|
||||||
}
|
|
||||||
ms := mapSorter{vf.Type().In(0), vf}
|
|
||||||
return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort))
|
|
||||||
}
|
|
||||||
|
|
||||||
type mapSorter struct {
|
|
||||||
in reflect.Type // T
|
|
||||||
fnc reflect.Value // func(T, T) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms mapSorter) filter(x, y interface{}) bool {
|
|
||||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
|
||||||
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
|
|
||||||
(vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
|
|
||||||
(vx.Len() != 0 || vy.Len() != 0)
|
|
||||||
}
|
|
||||||
func (ms mapSorter) sort(x interface{}) interface{} {
|
|
||||||
src := reflect.ValueOf(x)
|
|
||||||
outType := reflect.StructOf([]reflect.StructField{
|
|
||||||
{Name: "K", Type: src.Type().Key()},
|
|
||||||
{Name: "V", Type: src.Type().Elem()},
|
|
||||||
})
|
|
||||||
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
|
|
||||||
for i, k := range src.MapKeys() {
|
|
||||||
v := reflect.New(outType).Elem()
|
|
||||||
v.Field(0).Set(k)
|
|
||||||
v.Field(1).Set(src.MapIndex(k))
|
|
||||||
dst.Index(i).Set(v)
|
|
||||||
}
|
|
||||||
sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
|
|
||||||
ms.checkSort(dst)
|
|
||||||
return dst.Interface()
|
|
||||||
}
|
|
||||||
func (ms mapSorter) checkSort(v reflect.Value) {
|
|
||||||
for i := 1; i < v.Len(); i++ {
|
|
||||||
if !ms.less(v, i-1, i) {
|
|
||||||
panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
|
|
||||||
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
|
|
||||||
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
|
|
||||||
}
|
|
|
@ -1,187 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmpopts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// filterField returns a new Option where opt is only evaluated on paths that
|
|
||||||
// include a specific exported field on a single struct type.
|
|
||||||
// The struct type is specified by passing in a value of that type.
|
|
||||||
//
|
|
||||||
// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
|
|
||||||
// specific sub-field that is embedded or nested within the parent struct.
|
|
||||||
func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
|
|
||||||
// TODO: This is currently unexported over concerns of how helper filters
|
|
||||||
// can be composed together easily.
|
|
||||||
// TODO: Add tests for FilterField.
|
|
||||||
|
|
||||||
sf := newStructFilter(typ, name)
|
|
||||||
return cmp.FilterPath(sf.filter, opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
type structFilter struct {
|
|
||||||
t reflect.Type // The root struct type to match on
|
|
||||||
ft fieldTree // Tree of fields to match on
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStructFilter(typ interface{}, names ...string) structFilter {
|
|
||||||
// TODO: Perhaps allow * as a special identifier to allow ignoring any
|
|
||||||
// number of path steps until the next field match?
|
|
||||||
// This could be useful when a concrete struct gets transformed into
|
|
||||||
// an anonymous struct where it is not possible to specify that by type,
|
|
||||||
// but the transformer happens to provide guarantees about the names of
|
|
||||||
// the transformed fields.
|
|
||||||
|
|
||||||
t := reflect.TypeOf(typ)
|
|
||||||
if t == nil || t.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%T must be a non-pointer struct", typ))
|
|
||||||
}
|
|
||||||
var ft fieldTree
|
|
||||||
for _, name := range names {
|
|
||||||
cname, err := canonicalName(t, name)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
|
|
||||||
}
|
|
||||||
ft.insert(cname)
|
|
||||||
}
|
|
||||||
return structFilter{t, ft}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sf structFilter) filter(p cmp.Path) bool {
|
|
||||||
for i, ps := range p {
|
|
||||||
if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldTree represents a set of dot-separated identifiers.
|
|
||||||
//
|
|
||||||
// For example, inserting the following selectors:
|
|
||||||
// Foo
|
|
||||||
// Foo.Bar.Baz
|
|
||||||
// Foo.Buzz
|
|
||||||
// Nuka.Cola.Quantum
|
|
||||||
//
|
|
||||||
// Results in a tree of the form:
|
|
||||||
// {sub: {
|
|
||||||
// "Foo": {ok: true, sub: {
|
|
||||||
// "Bar": {sub: {
|
|
||||||
// "Baz": {ok: true},
|
|
||||||
// }},
|
|
||||||
// "Buzz": {ok: true},
|
|
||||||
// }},
|
|
||||||
// "Nuka": {sub: {
|
|
||||||
// "Cola": {sub: {
|
|
||||||
// "Quantum": {ok: true},
|
|
||||||
// }},
|
|
||||||
// }},
|
|
||||||
// }}
|
|
||||||
type fieldTree struct {
|
|
||||||
ok bool // Whether this is a specified node
|
|
||||||
sub map[string]fieldTree // The sub-tree of fields under this node
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts a sequence of field accesses into the tree.
|
|
||||||
func (ft *fieldTree) insert(cname []string) {
|
|
||||||
if ft.sub == nil {
|
|
||||||
ft.sub = make(map[string]fieldTree)
|
|
||||||
}
|
|
||||||
if len(cname) == 0 {
|
|
||||||
ft.ok = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sub := ft.sub[cname[0]]
|
|
||||||
sub.insert(cname[1:])
|
|
||||||
ft.sub[cname[0]] = sub
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchPrefix reports whether any selector in the fieldTree matches
|
|
||||||
// the start of path p.
|
|
||||||
func (ft fieldTree) matchPrefix(p cmp.Path) bool {
|
|
||||||
for _, ps := range p {
|
|
||||||
switch ps := ps.(type) {
|
|
||||||
case cmp.StructField:
|
|
||||||
ft = ft.sub[ps.Name()]
|
|
||||||
if ft.ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if len(ft.sub) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case cmp.Indirect:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// canonicalName returns a list of identifiers where any struct field access
|
|
||||||
// through an embedded field is expanded to include the names of the embedded
|
|
||||||
// types themselves.
|
|
||||||
//
|
|
||||||
// For example, suppose field "Foo" is not directly in the parent struct,
|
|
||||||
// but actually from an embedded struct of type "Bar". Then, the canonical name
|
|
||||||
// of "Foo" is actually "Bar.Foo".
|
|
||||||
//
|
|
||||||
// Suppose field "Foo" is not directly in the parent struct, but actually
|
|
||||||
// a field in two different embedded structs of types "Bar" and "Baz".
|
|
||||||
// Then the selector "Foo" causes a panic since it is ambiguous which one it
|
|
||||||
// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
|
|
||||||
func canonicalName(t reflect.Type, sel string) ([]string, error) {
|
|
||||||
var name string
|
|
||||||
sel = strings.TrimPrefix(sel, ".")
|
|
||||||
if sel == "" {
|
|
||||||
return nil, fmt.Errorf("name must not be empty")
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(sel, '.'); i < 0 {
|
|
||||||
name, sel = sel, ""
|
|
||||||
} else {
|
|
||||||
name, sel = sel[:i], sel[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type must be a struct or pointer to struct.
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
return nil, fmt.Errorf("%v must be a struct", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the canonical name for this current field name.
|
|
||||||
// If the field exists in an embedded struct, then it will be expanded.
|
|
||||||
sf, _ := t.FieldByName(name)
|
|
||||||
if !isExported(name) {
|
|
||||||
// Avoid using reflect.Type.FieldByName for unexported fields due to
|
|
||||||
// buggy behavior with regard to embeddeding and unexported fields.
|
|
||||||
// See https://golang.org/issue/4876 for details.
|
|
||||||
sf = reflect.StructField{}
|
|
||||||
for i := 0; i < t.NumField() && sf.Name == ""; i++ {
|
|
||||||
if t.Field(i).Name == name {
|
|
||||||
sf = t.Field(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sf.Name == "" {
|
|
||||||
return []string{name}, fmt.Errorf("does not exist")
|
|
||||||
}
|
|
||||||
var ss []string
|
|
||||||
for i := range sf.Index {
|
|
||||||
ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
|
|
||||||
}
|
|
||||||
if sel == "" {
|
|
||||||
return ss, nil
|
|
||||||
}
|
|
||||||
ssPost, err := canonicalName(sf.Type, sel)
|
|
||||||
return append(ss, ssPost...), err
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
// Copyright 2018, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmpopts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
)
|
|
||||||
|
|
||||||
type xformFilter struct{ xform cmp.Option }
|
|
||||||
|
|
||||||
func (xf xformFilter) filter(p cmp.Path) bool {
|
|
||||||
for _, ps := range p {
|
|
||||||
if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcyclicTransformer returns a Transformer with a filter applied that ensures
|
|
||||||
// that the transformer cannot be recursively applied upon its own output.
|
|
||||||
//
|
|
||||||
// An example use case is a transformer that splits a string by lines:
|
|
||||||
// AcyclicTransformer("SplitLines", func(s string) []string{
|
|
||||||
// return strings.Split(s, "\n")
|
|
||||||
// })
|
|
||||||
//
|
|
||||||
// Had this been an unfiltered Transformer instead, this would result in an
|
|
||||||
// infinite cycle converting a string to []string to [][]string and so on.
|
|
||||||
func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option {
|
|
||||||
xf := xformFilter{cmp.Transformer(name, xformFunc)}
|
|
||||||
return cmp.FilterPath(xf.filter, xf.xform)
|
|
||||||
}
|
|
|
@ -1,682 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// Package cmp determines equality of values.
|
|
||||||
//
|
|
||||||
// This package is intended to be a more powerful and safer alternative to
|
|
||||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
|
||||||
// It is intended to only be used in tests, as performance is not a goal and
|
|
||||||
// it may panic if it cannot compare the values. Its propensity towards
|
|
||||||
// panicking means that its unsuitable for production environments where a
|
|
||||||
// spurious panic may be fatal.
|
|
||||||
//
|
|
||||||
// The primary features of cmp are:
|
|
||||||
//
|
|
||||||
// • When the default behavior of equality does not suit the needs of the test,
|
|
||||||
// custom equality functions can override the equality operation.
|
|
||||||
// For example, an equality function may report floats as equal so long as they
|
|
||||||
// are within some tolerance of each other.
|
|
||||||
//
|
|
||||||
// • Types that have an Equal method may use that method to determine equality.
|
|
||||||
// This allows package authors to determine the equality operation for the types
|
|
||||||
// that they define.
|
|
||||||
//
|
|
||||||
// • If no custom equality functions are used and no Equal method is defined,
|
|
||||||
// equality is determined by recursively comparing the primitive kinds on both
|
|
||||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
|
||||||
// fields are not compared by default; they result in panics unless suppressed
|
|
||||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
|
||||||
// compared using the Exporter option.
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/diff"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/function"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Equal reports whether x and y are equal by recursively applying the
|
|
||||||
// following rules in the given order to x and y and all of their sub-values:
|
|
||||||
//
|
|
||||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
|
||||||
// remain after applying all path filters, value filters, and type filters.
|
|
||||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
|
||||||
// If the number of Transformer and Comparer options in S is greater than one,
|
|
||||||
// then Equal panics because it is ambiguous which option to use.
|
|
||||||
// If S contains a single Transformer, then use that to transform the current
|
|
||||||
// values and recursively call Equal on the output values.
|
|
||||||
// If S contains a single Comparer, then use that to compare the current values.
|
|
||||||
// Otherwise, evaluation proceeds to the next rule.
|
|
||||||
//
|
|
||||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
|
||||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
|
||||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
|
||||||
// evaluation proceeds to the next rule.
|
|
||||||
//
|
|
||||||
// • Lastly, try to compare x and y based on their basic kinds.
|
|
||||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
|
||||||
// channels are compared using the equivalent of the == operator in Go.
|
|
||||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
|
||||||
//
|
|
||||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
|
||||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
|
||||||
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
|
|
||||||
// explicitly permits comparing the unexported field.
|
|
||||||
//
|
|
||||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
|
||||||
// calling Equal on all non-ignored slice or array elements report equal.
|
|
||||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
|
||||||
// consider using cmpopts.EquateEmpty.
|
|
||||||
//
|
|
||||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
|
||||||
// calling Equal on all non-ignored map entries report equal.
|
|
||||||
// Map keys are equal according to the == operator.
|
|
||||||
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
|
|
||||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
|
||||||
// consider using cmpopts.EquateEmpty.
|
|
||||||
//
|
|
||||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
|
||||||
// where they have the same underlying concrete type and recursively
|
|
||||||
// calling Equal on the underlying values reports equal.
|
|
||||||
//
|
|
||||||
// Before recursing into a pointer, slice element, or map, the current path
|
|
||||||
// is checked to detect whether the address has already been visited.
|
|
||||||
// If there is a cycle, then the pointed at values are considered equal
|
|
||||||
// only if both addresses were previously visited in the same path step.
|
|
||||||
func Equal(x, y interface{}, opts ...Option) bool {
|
|
||||||
s := newState(opts)
|
|
||||||
s.compareAny(rootStep(x, y))
|
|
||||||
return s.result.Equal()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diff returns a human-readable report of the differences between two values:
|
|
||||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
|
||||||
// same input values and options.
|
|
||||||
//
|
|
||||||
// The output is displayed as a literal in pseudo-Go syntax.
|
|
||||||
// At the start of each line, a "-" prefix indicates an element removed from y,
|
|
||||||
// a "+" prefix to indicates an element added to y, and the lack of a prefix
|
|
||||||
// indicates an element common to both x and y. If possible, the output
|
|
||||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
|
||||||
// readable outputs. In such cases, the string is prefixed with either an
|
|
||||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
|
||||||
//
|
|
||||||
// Do not depend on this output being stable. If you need the ability to
|
|
||||||
// programmatically interpret the difference, consider using a custom Reporter.
|
|
||||||
func Diff(x, y interface{}, opts ...Option) string {
|
|
||||||
s := newState(opts)
|
|
||||||
|
|
||||||
// Optimization: If there are no other reporters, we can optimize for the
|
|
||||||
// common case where the result is equal (and thus no reported difference).
|
|
||||||
// This avoids the expensive construction of a difference tree.
|
|
||||||
if len(s.reporters) == 0 {
|
|
||||||
s.compareAny(rootStep(x, y))
|
|
||||||
if s.result.Equal() {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
s.result = diff.Result{} // Reset results
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(defaultReporter)
|
|
||||||
s.reporters = append(s.reporters, reporter{r})
|
|
||||||
s.compareAny(rootStep(x, y))
|
|
||||||
d := r.String()
|
|
||||||
if (d == "") != s.result.Equal() {
|
|
||||||
panic("inconsistent difference and equality results")
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// rootStep constructs the first path step. If x and y have differing types,
|
|
||||||
// then they are stored within an empty interface type.
|
|
||||||
func rootStep(x, y interface{}) PathStep {
|
|
||||||
vx := reflect.ValueOf(x)
|
|
||||||
vy := reflect.ValueOf(y)
|
|
||||||
|
|
||||||
// If the inputs are different types, auto-wrap them in an empty interface
|
|
||||||
// so that they have the same parent type.
|
|
||||||
var t reflect.Type
|
|
||||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
|
||||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
|
||||||
if vx.IsValid() {
|
|
||||||
vvx := reflect.New(t).Elem()
|
|
||||||
vvx.Set(vx)
|
|
||||||
vx = vvx
|
|
||||||
}
|
|
||||||
if vy.IsValid() {
|
|
||||||
vvy := reflect.New(t).Elem()
|
|
||||||
vvy.Set(vy)
|
|
||||||
vy = vvy
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t = vx.Type()
|
|
||||||
}
|
|
||||||
|
|
||||||
return &pathStep{t, vx, vy}
|
|
||||||
}
|
|
||||||
|
|
||||||
type state struct {
|
|
||||||
// These fields represent the "comparison state".
|
|
||||||
// Calling statelessCompare must not result in observable changes to these.
|
|
||||||
result diff.Result // The current result of comparison
|
|
||||||
curPath Path // The current path in the value tree
|
|
||||||
curPtrs pointerPath // The current set of visited pointers
|
|
||||||
reporters []reporter // Optional reporters
|
|
||||||
|
|
||||||
// recChecker checks for infinite cycles applying the same set of
|
|
||||||
// transformers upon the output of itself.
|
|
||||||
recChecker recChecker
|
|
||||||
|
|
||||||
// dynChecker triggers pseudo-random checks for option correctness.
|
|
||||||
// It is safe for statelessCompare to mutate this value.
|
|
||||||
dynChecker dynChecker
|
|
||||||
|
|
||||||
// These fields, once set by processOption, will not change.
|
|
||||||
exporters []exporter // List of exporters for structs with unexported fields
|
|
||||||
opts Options // List of all fundamental and filter options
|
|
||||||
}
|
|
||||||
|
|
||||||
func newState(opts []Option) *state {
|
|
||||||
// Always ensure a validator option exists to validate the inputs.
|
|
||||||
s := &state{opts: Options{validator{}}}
|
|
||||||
s.curPtrs.Init()
|
|
||||||
s.processOption(Options(opts))
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) processOption(opt Option) {
|
|
||||||
switch opt := opt.(type) {
|
|
||||||
case nil:
|
|
||||||
case Options:
|
|
||||||
for _, o := range opt {
|
|
||||||
s.processOption(o)
|
|
||||||
}
|
|
||||||
case coreOption:
|
|
||||||
type filtered interface {
|
|
||||||
isFiltered() bool
|
|
||||||
}
|
|
||||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
|
||||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
|
||||||
}
|
|
||||||
s.opts = append(s.opts, opt)
|
|
||||||
case exporter:
|
|
||||||
s.exporters = append(s.exporters, opt)
|
|
||||||
case reporter:
|
|
||||||
s.reporters = append(s.reporters, opt)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unknown option %T", opt))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// statelessCompare compares two values and returns the result.
|
|
||||||
// This function is stateless in that it does not alter the current result,
|
|
||||||
// or output to any registered reporters.
|
|
||||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
|
||||||
// We do not save and restore curPath and curPtrs because all of the
|
|
||||||
// compareX methods should properly push and pop from them.
|
|
||||||
// It is an implementation bug if the contents of the paths differ from
|
|
||||||
// when calling this function to when returning from it.
|
|
||||||
|
|
||||||
oldResult, oldReporters := s.result, s.reporters
|
|
||||||
s.result = diff.Result{} // Reset result
|
|
||||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
|
||||||
s.compareAny(step)
|
|
||||||
res := s.result
|
|
||||||
s.result, s.reporters = oldResult, oldReporters
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) compareAny(step PathStep) {
|
|
||||||
// Update the path stack.
|
|
||||||
s.curPath.push(step)
|
|
||||||
defer s.curPath.pop()
|
|
||||||
for _, r := range s.reporters {
|
|
||||||
r.PushStep(step)
|
|
||||||
defer r.PopStep()
|
|
||||||
}
|
|
||||||
s.recChecker.Check(s.curPath)
|
|
||||||
|
|
||||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
|
||||||
t := step.Type()
|
|
||||||
vx, vy := step.Values()
|
|
||||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
|
||||||
px, py := vx.Addr(), vy.Addr()
|
|
||||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
|
||||||
s.report(eq, reportByCycle)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer s.curPtrs.Pop(px, py)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
|
||||||
if s.tryOptions(t, vx, vy) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rule 2: Check whether the type has a valid Equal method.
|
|
||||||
if s.tryMethod(t, vx, vy) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rule 3: Compare based on the underlying kind.
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
s.report(vx.Bool() == vy.Bool(), 0)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
s.report(vx.Int() == vy.Int(), 0)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
s.report(vx.Uint() == vy.Uint(), 0)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
s.report(vx.Float() == vy.Float(), 0)
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
s.report(vx.Complex() == vy.Complex(), 0)
|
|
||||||
case reflect.String:
|
|
||||||
s.report(vx.String() == vy.String(), 0)
|
|
||||||
case reflect.Chan, reflect.UnsafePointer:
|
|
||||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
|
||||||
case reflect.Func:
|
|
||||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
|
||||||
case reflect.Struct:
|
|
||||||
s.compareStruct(t, vx, vy)
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
s.compareSlice(t, vx, vy)
|
|
||||||
case reflect.Map:
|
|
||||||
s.compareMap(t, vx, vy)
|
|
||||||
case reflect.Ptr:
|
|
||||||
s.comparePtr(t, vx, vy)
|
|
||||||
case reflect.Interface:
|
|
||||||
s.compareInterface(t, vx, vy)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
|
||||||
// Evaluate all filters and apply the remaining options.
|
|
||||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
|
||||||
opt.apply(s, vx, vy)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
|
||||||
// Check if this type even has an Equal method.
|
|
||||||
m, ok := t.MethodByName("Equal")
|
|
||||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
|
||||||
s.report(eq, reportByMethod)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
|
||||||
v = sanitizeValue(v, f.Type().In(0))
|
|
||||||
if !s.dynChecker.Next() {
|
|
||||||
return f.Call([]reflect.Value{v})[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the function twice and ensure that we get the same results back.
|
|
||||||
// We run in goroutines so that the race detector (if enabled) can detect
|
|
||||||
// unsafe mutations to the input.
|
|
||||||
c := make(chan reflect.Value)
|
|
||||||
go detectRaces(c, f, v)
|
|
||||||
got := <-c
|
|
||||||
want := f.Call([]reflect.Value{v})[0]
|
|
||||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
|
||||||
// To avoid false-positives with non-reflexive equality operations,
|
|
||||||
// we sanity check whether a value is equal to itself.
|
|
||||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
|
||||||
return want
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
|
||||||
}
|
|
||||||
return want
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
|
||||||
x = sanitizeValue(x, f.Type().In(0))
|
|
||||||
y = sanitizeValue(y, f.Type().In(1))
|
|
||||||
if !s.dynChecker.Next() {
|
|
||||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swapping the input arguments is sufficient to check that
|
|
||||||
// f is symmetric and deterministic.
|
|
||||||
// We run in goroutines so that the race detector (if enabled) can detect
|
|
||||||
// unsafe mutations to the input.
|
|
||||||
c := make(chan reflect.Value)
|
|
||||||
go detectRaces(c, f, y, x)
|
|
||||||
got := <-c
|
|
||||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
|
||||||
if !got.IsValid() || got.Bool() != want {
|
|
||||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
|
||||||
}
|
|
||||||
return want
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
|
||||||
var ret reflect.Value
|
|
||||||
defer func() {
|
|
||||||
recover() // Ignore panics, let the other call to f panic instead
|
|
||||||
c <- ret
|
|
||||||
}()
|
|
||||||
ret = f.Call(vs)[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitizeValue converts nil interfaces of type T to those of type R,
|
|
||||||
// assuming that T is assignable to R.
|
|
||||||
// Otherwise, it returns the input value as is.
|
|
||||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
|
||||||
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
|
||||||
if !flags.AtLeastGo110 {
|
|
||||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
|
||||||
return reflect.New(t).Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
|
||||||
var addr bool
|
|
||||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
|
||||||
|
|
||||||
var mayForce, mayForceInit bool
|
|
||||||
step := StructField{&structField{}}
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
step.typ = t.Field(i).Type
|
|
||||||
step.vx = vx.Field(i)
|
|
||||||
step.vy = vy.Field(i)
|
|
||||||
step.name = t.Field(i).Name
|
|
||||||
step.idx = i
|
|
||||||
step.unexported = !isExported(step.name)
|
|
||||||
if step.unexported {
|
|
||||||
if step.name == "_" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Defer checking of unexported fields until later to give an
|
|
||||||
// Ignore a chance to ignore the field.
|
|
||||||
if !vax.IsValid() || !vay.IsValid() {
|
|
||||||
// For retrieveUnexportedField to work, the parent struct must
|
|
||||||
// be addressable. Create a new copy of the values if
|
|
||||||
// necessary to make them addressable.
|
|
||||||
addr = vx.CanAddr() || vy.CanAddr()
|
|
||||||
vax = makeAddressable(vx)
|
|
||||||
vay = makeAddressable(vy)
|
|
||||||
}
|
|
||||||
if !mayForceInit {
|
|
||||||
for _, xf := range s.exporters {
|
|
||||||
mayForce = mayForce || xf(t)
|
|
||||||
}
|
|
||||||
mayForceInit = true
|
|
||||||
}
|
|
||||||
step.mayForce = mayForce
|
|
||||||
step.paddr = addr
|
|
||||||
step.pvx = vax
|
|
||||||
step.pvy = vay
|
|
||||||
step.field = t.Field(i)
|
|
||||||
}
|
|
||||||
s.compareAny(step)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
|
||||||
isSlice := t.Kind() == reflect.Slice
|
|
||||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
|
||||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
|
||||||
// since slices represents a list of pointers, rather than a single pointer.
|
|
||||||
// The pointer checking logic must be handled on a per-element basis
|
|
||||||
// in compareAny.
|
|
||||||
//
|
|
||||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
|
||||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
|
||||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
|
||||||
// [P+i*M for i in range(N)]
|
|
||||||
//
|
|
||||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
|
||||||
// but they are clearly different values. Using the slice pointer alone
|
|
||||||
// violates the assumption that equal pointers implies equal values.
|
|
||||||
|
|
||||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
|
||||||
withIndexes := func(ix, iy int) SliceIndex {
|
|
||||||
if ix >= 0 {
|
|
||||||
step.vx, step.xkey = vx.Index(ix), ix
|
|
||||||
} else {
|
|
||||||
step.vx, step.xkey = reflect.Value{}, -1
|
|
||||||
}
|
|
||||||
if iy >= 0 {
|
|
||||||
step.vy, step.ykey = vy.Index(iy), iy
|
|
||||||
} else {
|
|
||||||
step.vy, step.ykey = reflect.Value{}, -1
|
|
||||||
}
|
|
||||||
return step
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore options are able to ignore missing elements in a slice.
|
|
||||||
// However, detecting these reliably requires an optimal differencing
|
|
||||||
// algorithm, for which diff.Difference is not.
|
|
||||||
//
|
|
||||||
// Instead, we first iterate through both slices to detect which elements
|
|
||||||
// would be ignored if standing alone. The index of non-discarded elements
|
|
||||||
// are stored in a separate slice, which diffing is then performed on.
|
|
||||||
var indexesX, indexesY []int
|
|
||||||
var ignoredX, ignoredY []bool
|
|
||||||
for ix := 0; ix < vx.Len(); ix++ {
|
|
||||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
|
||||||
if !ignored {
|
|
||||||
indexesX = append(indexesX, ix)
|
|
||||||
}
|
|
||||||
ignoredX = append(ignoredX, ignored)
|
|
||||||
}
|
|
||||||
for iy := 0; iy < vy.Len(); iy++ {
|
|
||||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
|
||||||
if !ignored {
|
|
||||||
indexesY = append(indexesY, iy)
|
|
||||||
}
|
|
||||||
ignoredY = append(ignoredY, ignored)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
|
||||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
|
||||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Replay the ignore-scripts and the edit-script.
|
|
||||||
var ix, iy int
|
|
||||||
for ix < vx.Len() || iy < vy.Len() {
|
|
||||||
var e diff.EditType
|
|
||||||
switch {
|
|
||||||
case ix < len(ignoredX) && ignoredX[ix]:
|
|
||||||
e = diff.UniqueX
|
|
||||||
case iy < len(ignoredY) && ignoredY[iy]:
|
|
||||||
e = diff.UniqueY
|
|
||||||
default:
|
|
||||||
e, edits = edits[0], edits[1:]
|
|
||||||
}
|
|
||||||
switch e {
|
|
||||||
case diff.UniqueX:
|
|
||||||
s.compareAny(withIndexes(ix, -1))
|
|
||||||
ix++
|
|
||||||
case diff.UniqueY:
|
|
||||||
s.compareAny(withIndexes(-1, iy))
|
|
||||||
iy++
|
|
||||||
default:
|
|
||||||
s.compareAny(withIndexes(ix, iy))
|
|
||||||
ix++
|
|
||||||
iy++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
|
||||||
if vx.IsNil() || vy.IsNil() {
|
|
||||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cycle-detection for maps.
|
|
||||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
|
||||||
s.report(eq, reportByCycle)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer s.curPtrs.Pop(vx, vy)
|
|
||||||
|
|
||||||
// We combine and sort the two map keys so that we can perform the
|
|
||||||
// comparisons in a deterministic order.
|
|
||||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
|
||||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
|
||||||
step.vx = vx.MapIndex(k)
|
|
||||||
step.vy = vy.MapIndex(k)
|
|
||||||
step.key = k
|
|
||||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
|
||||||
// It is possible for both vx and vy to be invalid if the
|
|
||||||
// key contained a NaN value in it.
|
|
||||||
//
|
|
||||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
|
||||||
// there still isn't a sensible way to compare the values since
|
|
||||||
// a NaN key may map to multiple unordered values.
|
|
||||||
// The most reasonable way to compare NaNs would be to compare the
|
|
||||||
// set of values. However, this is impossible to do efficiently
|
|
||||||
// since set equality is provably an O(n^2) operation given only
|
|
||||||
// an Equal function. If we had a Less function or Hash function,
|
|
||||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
|
||||||
//
|
|
||||||
// Rather than adding complex logic to deal with NaNs, make it
|
|
||||||
// the user's responsibility to compare such obscure maps.
|
|
||||||
const help = "consider providing a Comparer to compare the map"
|
|
||||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
|
||||||
}
|
|
||||||
s.compareAny(step)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
|
||||||
if vx.IsNil() || vy.IsNil() {
|
|
||||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cycle-detection for pointers.
|
|
||||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
|
||||||
s.report(eq, reportByCycle)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer s.curPtrs.Pop(vx, vy)
|
|
||||||
|
|
||||||
vx, vy = vx.Elem(), vy.Elem()
|
|
||||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
|
||||||
if vx.IsNil() || vy.IsNil() {
|
|
||||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
vx, vy = vx.Elem(), vy.Elem()
|
|
||||||
if vx.Type() != vy.Type() {
|
|
||||||
s.report(false, 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state) report(eq bool, rf resultFlags) {
|
|
||||||
if rf&reportByIgnore == 0 {
|
|
||||||
if eq {
|
|
||||||
s.result.NumSame++
|
|
||||||
rf |= reportEqual
|
|
||||||
} else {
|
|
||||||
s.result.NumDiff++
|
|
||||||
rf |= reportUnequal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r := range s.reporters {
|
|
||||||
r.Report(Result{flags: rf})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// recChecker tracks the state needed to periodically perform checks that
|
|
||||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
|
||||||
type recChecker struct{ next int }
|
|
||||||
|
|
||||||
// Check scans the Path for any recursive transformers and panics when any
|
|
||||||
// recursive transformers are detected. Note that the presence of a
|
|
||||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
|
||||||
// As such, this check only activates after some minimal number of path steps.
|
|
||||||
func (rc *recChecker) Check(p Path) {
|
|
||||||
const minLen = 1 << 16
|
|
||||||
if rc.next == 0 {
|
|
||||||
rc.next = minLen
|
|
||||||
}
|
|
||||||
if len(p) < rc.next {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rc.next <<= 1
|
|
||||||
|
|
||||||
// Check whether the same transformer has appeared at least twice.
|
|
||||||
var ss []string
|
|
||||||
m := map[Option]int{}
|
|
||||||
for _, ps := range p {
|
|
||||||
if t, ok := ps.(Transform); ok {
|
|
||||||
t := t.Option()
|
|
||||||
if m[t] == 1 { // Transformer was used exactly once before
|
|
||||||
tf := t.(*transformer).fnc.Type()
|
|
||||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
|
||||||
}
|
|
||||||
m[t]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(ss) > 0 {
|
|
||||||
const warning = "recursive set of Transformers detected"
|
|
||||||
const help = "consider using cmpopts.AcyclicTransformer"
|
|
||||||
set := strings.Join(ss, "\n\t")
|
|
||||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dynChecker tracks the state needed to periodically perform checks that
|
|
||||||
// user provided functions are symmetric and deterministic.
|
|
||||||
// The zero value is safe for immediate use.
|
|
||||||
type dynChecker struct{ curr, next int }
|
|
||||||
|
|
||||||
// Next increments the state and reports whether a check should be performed.
|
|
||||||
//
|
|
||||||
// Checks occur every Nth function call, where N is a triangular number:
|
|
||||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
|
||||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
|
||||||
//
|
|
||||||
// This sequence ensures that the cost of checks drops significantly as
|
|
||||||
// the number of functions calls grows larger.
|
|
||||||
func (dc *dynChecker) Next() bool {
|
|
||||||
ok := dc.curr == dc.next
|
|
||||||
if ok {
|
|
||||||
dc.curr = 0
|
|
||||||
dc.next++
|
|
||||||
}
|
|
||||||
dc.curr++
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeAddressable returns a value that is always addressable.
|
|
||||||
// It returns the input verbatim if it is already addressable,
|
|
||||||
// otherwise it creates a new value and returns an addressable copy.
|
|
||||||
func makeAddressable(v reflect.Value) reflect.Value {
|
|
||||||
if v.CanAddr() {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
vc := reflect.New(v.Type()).Elem()
|
|
||||||
vc.Set(v)
|
|
||||||
return vc
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build purego
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
const supportExporters = false
|
|
||||||
|
|
||||||
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
|
||||||
panic("no support for forcibly accessing unexported fields")
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const supportExporters = true
|
|
||||||
|
|
||||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
|
||||||
// a struct such that the value has read-write permissions.
|
|
||||||
//
|
|
||||||
// The parent struct, v, must be addressable, while f must be a StructField
|
|
||||||
// describing the field to retrieve. If addr is false,
|
|
||||||
// then the returned value will be shallowed copied to be non-addressable.
|
|
||||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
|
||||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
|
||||||
if !addr {
|
|
||||||
// A field is addressable if and only if the struct is addressable.
|
|
||||||
// If the original parent value was not addressable, shallow copy the
|
|
||||||
// value to make it non-addressable to avoid leaking an implementation
|
|
||||||
// detail of how forcibly exporting a field works.
|
|
||||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
|
||||||
return reflect.Zero(f.Type)
|
|
||||||
}
|
|
||||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
|
||||||
}
|
|
||||||
return ve
|
|
||||||
}
|
|
|
@ -1,17 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build !cmp_debug
|
|
||||||
|
|
||||||
package diff
|
|
||||||
|
|
||||||
var debug debugger
|
|
||||||
|
|
||||||
type debugger struct{}
|
|
||||||
|
|
||||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
func (debugger) Update() {}
|
|
||||||
func (debugger) Finish() {}
|
|
|
@ -1,122 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build cmp_debug
|
|
||||||
|
|
||||||
package diff
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The algorithm can be seen running in real-time by enabling debugging:
|
|
||||||
// go test -tags=cmp_debug -v
|
|
||||||
//
|
|
||||||
// Example output:
|
|
||||||
// === RUN TestDifference/#34
|
|
||||||
// ┌───────────────────────────────┐
|
|
||||||
// │ \ · · · · · · · · · · · · · · │
|
|
||||||
// │ · # · · · · · · · · · · · · · │
|
|
||||||
// │ · \ · · · · · · · · · · · · · │
|
|
||||||
// │ · · \ · · · · · · · · · · · · │
|
|
||||||
// │ · · · X # · · · · · · · · · · │
|
|
||||||
// │ · · · # \ · · · · · · · · · · │
|
|
||||||
// │ · · · · · # # · · · · · · · · │
|
|
||||||
// │ · · · · · # \ · · · · · · · · │
|
|
||||||
// │ · · · · · · · \ · · · · · · · │
|
|
||||||
// │ · · · · · · · · \ · · · · · · │
|
|
||||||
// │ · · · · · · · · · \ · · · · · │
|
|
||||||
// │ · · · · · · · · · · \ · · # · │
|
|
||||||
// │ · · · · · · · · · · · \ # # · │
|
|
||||||
// │ · · · · · · · · · · · # # # · │
|
|
||||||
// │ · · · · · · · · · · # # # # · │
|
|
||||||
// │ · · · · · · · · · # # # # # · │
|
|
||||||
// │ · · · · · · · · · · · · · · \ │
|
|
||||||
// └───────────────────────────────┘
|
|
||||||
// [.Y..M.XY......YXYXY.|]
|
|
||||||
//
|
|
||||||
// The grid represents the edit-graph where the horizontal axis represents
|
|
||||||
// list X and the vertical axis represents list Y. The start of the two lists
|
|
||||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
|
||||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
|
||||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
|
||||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
|
||||||
// are different (and not similar). The algorithm traverses this graph trying to
|
|
||||||
// make the paths starting in the top-left and the bottom-right connect.
|
|
||||||
//
|
|
||||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
|
||||||
// the currently established path from the forward and reverse searches,
|
|
||||||
// separated by a '|' character.
|
|
||||||
|
|
||||||
const (
|
|
||||||
updateDelay = 100 * time.Millisecond
|
|
||||||
finishDelay = 500 * time.Millisecond
|
|
||||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
|
||||||
)
|
|
||||||
|
|
||||||
var debug debugger
|
|
||||||
|
|
||||||
type debugger struct {
|
|
||||||
sync.Mutex
|
|
||||||
p1, p2 EditScript
|
|
||||||
fwdPath, revPath *EditScript
|
|
||||||
grid []byte
|
|
||||||
lines int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
|
||||||
dbg.Lock()
|
|
||||||
dbg.fwdPath, dbg.revPath = p1, p2
|
|
||||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
|
||||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
|
||||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
|
||||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
|
||||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
|
||||||
fmt.Print(dbg)
|
|
||||||
|
|
||||||
// Wrap the EqualFunc so that we can intercept each result.
|
|
||||||
return func(ix, iy int) (r Result) {
|
|
||||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
|
||||||
for i := range cell {
|
|
||||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
|
||||||
}
|
|
||||||
switch r = f(ix, iy); {
|
|
||||||
case r.Equal():
|
|
||||||
cell[0] = '\\'
|
|
||||||
case r.Similar():
|
|
||||||
cell[0] = 'X'
|
|
||||||
default:
|
|
||||||
cell[0] = '#'
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dbg *debugger) Update() {
|
|
||||||
dbg.print(updateDelay)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dbg *debugger) Finish() {
|
|
||||||
dbg.print(finishDelay)
|
|
||||||
dbg.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dbg *debugger) String() string {
|
|
||||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
|
||||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
|
||||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dbg *debugger) print(d time.Duration) {
|
|
||||||
if ansiTerminal {
|
|
||||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
|
||||||
}
|
|
||||||
fmt.Print(dbg)
|
|
||||||
time.Sleep(d)
|
|
||||||
}
|
|
|
@ -1,392 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// Package diff implements an algorithm for producing edit-scripts.
|
|
||||||
// The edit-script is a sequence of operations needed to transform one list
|
|
||||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
|
||||||
// deletions, and modifications. The summation of all edits is called the
|
|
||||||
// Levenshtein distance as this problem is well-known in computer science.
|
|
||||||
//
|
|
||||||
// This package prioritizes performance over accuracy. That is, the run time
|
|
||||||
// is more important than obtaining a minimal Levenshtein distance.
|
|
||||||
package diff
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EditType represents a single operation within an edit-script.
|
|
||||||
type EditType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
|
||||||
Identity EditType = iota
|
|
||||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
|
||||||
UniqueX
|
|
||||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
|
||||||
UniqueY
|
|
||||||
// Modified indicates that a symbol pair is a modification of each other.
|
|
||||||
Modified
|
|
||||||
)
|
|
||||||
|
|
||||||
// EditScript represents the series of differences between two lists.
|
|
||||||
type EditScript []EditType
|
|
||||||
|
|
||||||
// String returns a human-readable string representing the edit-script where
|
|
||||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
|
||||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
|
||||||
func (es EditScript) String() string {
|
|
||||||
b := make([]byte, len(es))
|
|
||||||
for i, e := range es {
|
|
||||||
switch e {
|
|
||||||
case Identity:
|
|
||||||
b[i] = '.'
|
|
||||||
case UniqueX:
|
|
||||||
b[i] = 'X'
|
|
||||||
case UniqueY:
|
|
||||||
b[i] = 'Y'
|
|
||||||
case Modified:
|
|
||||||
b[i] = 'M'
|
|
||||||
default:
|
|
||||||
panic("invalid edit-type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stats returns a histogram of the number of each type of edit operation.
|
|
||||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
|
||||||
for _, e := range es {
|
|
||||||
switch e {
|
|
||||||
case Identity:
|
|
||||||
s.NI++
|
|
||||||
case UniqueX:
|
|
||||||
s.NX++
|
|
||||||
case UniqueY:
|
|
||||||
s.NY++
|
|
||||||
case Modified:
|
|
||||||
s.NM++
|
|
||||||
default:
|
|
||||||
panic("invalid edit-type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
|
||||||
// lists X and Y are equal.
|
|
||||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
|
||||||
|
|
||||||
// LenX is the length of the X list.
|
|
||||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
|
||||||
|
|
||||||
// LenY is the length of the Y list.
|
|
||||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
|
||||||
|
|
||||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
|
||||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
|
||||||
type EqualFunc func(ix int, iy int) Result
|
|
||||||
|
|
||||||
// Result is the result of comparison.
|
|
||||||
// NumSame is the number of sub-elements that are equal.
|
|
||||||
// NumDiff is the number of sub-elements that are not equal.
|
|
||||||
type Result struct{ NumSame, NumDiff int }
|
|
||||||
|
|
||||||
// BoolResult returns a Result that is either Equal or not Equal.
|
|
||||||
func BoolResult(b bool) Result {
|
|
||||||
if b {
|
|
||||||
return Result{NumSame: 1} // Equal, Similar
|
|
||||||
} else {
|
|
||||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
|
||||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
|
||||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
|
||||||
|
|
||||||
// Similar indicates whether two symbols are similar and may be represented
|
|
||||||
// by using the Modified type. As a special case, we consider binary comparisons
|
|
||||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
|
||||||
//
|
|
||||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
|
||||||
func (r Result) Similar() bool {
|
|
||||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
|
||||||
return r.NumSame+1 >= r.NumDiff
|
|
||||||
}
|
|
||||||
|
|
||||||
var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2)
|
|
||||||
|
|
||||||
// Difference reports whether two lists of lengths nx and ny are equal
|
|
||||||
// given the definition of equality provided as f.
|
|
||||||
//
|
|
||||||
// This function returns an edit-script, which is a sequence of operations
|
|
||||||
// needed to convert one list into the other. The following invariants for
|
|
||||||
// the edit-script are maintained:
|
|
||||||
// • eq == (es.Dist()==0)
|
|
||||||
// • nx == es.LenX()
|
|
||||||
// • ny == es.LenY()
|
|
||||||
//
|
|
||||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
|
||||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
|
||||||
// favors performance over optimality. The exact output is not guaranteed to
|
|
||||||
// be stable and may change over time.
|
|
||||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
|
||||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
|
||||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
|
||||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
|
||||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
|
||||||
// interested in the optimal path, but at least some "decent" path.
|
|
||||||
//
|
|
||||||
// For example, let X and Y be lists of symbols:
|
|
||||||
// X = [A B C A B B A]
|
|
||||||
// Y = [C B A B A C]
|
|
||||||
//
|
|
||||||
// The edit-graph can be drawn as the following:
|
|
||||||
// A B C A B B A
|
|
||||||
// ┌─────────────┐
|
|
||||||
// C │_|_|\|_|_|_|_│ 0
|
|
||||||
// B │_|\|_|_|\|\|_│ 1
|
|
||||||
// A │\|_|_|\|_|_|\│ 2
|
|
||||||
// B │_|\|_|_|\|\|_│ 3
|
|
||||||
// A │\|_|_|\|_|_|\│ 4
|
|
||||||
// C │ | |\| | | | │ 5
|
|
||||||
// └─────────────┘ 6
|
|
||||||
// 0 1 2 3 4 5 6 7
|
|
||||||
//
|
|
||||||
// List X is written along the horizontal axis, while list Y is written
|
|
||||||
// along the vertical axis. At any point on this grid, if the symbol in
|
|
||||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
|
||||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
|
||||||
// top-left corner to the bottom-right corner, while traveling through the
|
|
||||||
// fewest horizontal or vertical edges.
|
|
||||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
|
||||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
|
||||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
|
||||||
|
|
||||||
// To ensure flexibility in changing the algorithm in the future,
|
|
||||||
// introduce some degree of deliberate instability.
|
|
||||||
// This is achieved by fiddling the zigzag iterator to start searching
|
|
||||||
// the graph starting from the bottom-right versus than the top-left.
|
|
||||||
// The result may differ depending on the starting search location,
|
|
||||||
// but still produces a valid edit script.
|
|
||||||
zigzagInit := randInt // either 0 or 1
|
|
||||||
if flags.Deterministic {
|
|
||||||
zigzagInit = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invariants:
|
|
||||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
|
||||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
|
||||||
//
|
|
||||||
// In general:
|
|
||||||
// • fwdFrontier.X < revFrontier.X
|
|
||||||
// • fwdFrontier.Y < revFrontier.Y
|
|
||||||
// Unless, it is time for the algorithm to terminate.
|
|
||||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
|
||||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
|
||||||
fwdFrontier := fwdPath.point // Forward search frontier
|
|
||||||
revFrontier := revPath.point // Reverse search frontier
|
|
||||||
|
|
||||||
// Search budget bounds the cost of searching for better paths.
|
|
||||||
// The longest sequence of non-matching symbols that can be tolerated is
|
|
||||||
// approximately the square-root of the search budget.
|
|
||||||
searchBudget := 4 * (nx + ny) // O(n)
|
|
||||||
|
|
||||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
|
||||||
// computing sub-optimal edit-scripts between two lists.
|
|
||||||
//
|
|
||||||
// The algorithm is approximately as follows:
|
|
||||||
// • Searching for differences switches back-and-forth between
|
|
||||||
// a search that starts at the beginning (the top-left corner), and
|
|
||||||
// a search that starts at the end (the bottom-right corner). The goal of
|
|
||||||
// the search is connect with the search from the opposite corner.
|
|
||||||
// • As we search, we build a path in a greedy manner, where the first
|
|
||||||
// match seen is added to the path (this is sub-optimal, but provides a
|
|
||||||
// decent result in practice). When matches are found, we try the next pair
|
|
||||||
// of symbols in the lists and follow all matches as far as possible.
|
|
||||||
// • When searching for matches, we search along a diagonal going through
|
|
||||||
// through the "frontier" point. If no matches are found, we advance the
|
|
||||||
// frontier towards the opposite corner.
|
|
||||||
// • This algorithm terminates when either the X coordinates or the
|
|
||||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
|
||||||
//
|
|
||||||
// This algorithm is correct even if searching only in the forward direction
|
|
||||||
// or in the reverse direction. We do both because it is commonly observed
|
|
||||||
// that two lists commonly differ because elements were added to the front
|
|
||||||
// or end of the other list.
|
|
||||||
//
|
|
||||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
|
||||||
// of the algorithm running in real-time. This is educational for
|
|
||||||
// understanding how the algorithm works. See debug_enable.go.
|
|
||||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
|
||||||
for {
|
|
||||||
// Forward search from the beginning.
|
|
||||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ {
|
|
||||||
// Search in a diagonal pattern for a match.
|
|
||||||
z := zigzag(i)
|
|
||||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
|
||||||
switch {
|
|
||||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
|
||||||
stop1 = true // Hit top-right corner
|
|
||||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
|
||||||
stop2 = true // Hit bottom-left corner
|
|
||||||
case f(p.X, p.Y).Equal():
|
|
||||||
// Match found, so connect the path to this point.
|
|
||||||
fwdPath.connect(p, f)
|
|
||||||
fwdPath.append(Identity)
|
|
||||||
// Follow sequence of matches as far as possible.
|
|
||||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
|
||||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fwdPath.append(Identity)
|
|
||||||
}
|
|
||||||
fwdFrontier = fwdPath.point
|
|
||||||
stop1, stop2 = true, true
|
|
||||||
default:
|
|
||||||
searchBudget-- // Match not found
|
|
||||||
}
|
|
||||||
debug.Update()
|
|
||||||
}
|
|
||||||
// Advance the frontier towards reverse point.
|
|
||||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
|
||||||
fwdFrontier.X++
|
|
||||||
} else {
|
|
||||||
fwdFrontier.Y++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reverse search from the end.
|
|
||||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
|
||||||
// Search in a diagonal pattern for a match.
|
|
||||||
z := zigzag(i)
|
|
||||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
|
||||||
switch {
|
|
||||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
|
||||||
stop1 = true // Hit bottom-left corner
|
|
||||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
|
||||||
stop2 = true // Hit top-right corner
|
|
||||||
case f(p.X-1, p.Y-1).Equal():
|
|
||||||
// Match found, so connect the path to this point.
|
|
||||||
revPath.connect(p, f)
|
|
||||||
revPath.append(Identity)
|
|
||||||
// Follow sequence of matches as far as possible.
|
|
||||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
|
||||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
revPath.append(Identity)
|
|
||||||
}
|
|
||||||
revFrontier = revPath.point
|
|
||||||
stop1, stop2 = true, true
|
|
||||||
default:
|
|
||||||
searchBudget-- // Match not found
|
|
||||||
}
|
|
||||||
debug.Update()
|
|
||||||
}
|
|
||||||
// Advance the frontier towards forward point.
|
|
||||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
|
||||||
revFrontier.X--
|
|
||||||
} else {
|
|
||||||
revFrontier.Y--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join the forward and reverse paths and then append the reverse path.
|
|
||||||
fwdPath.connect(revPath.point, f)
|
|
||||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
|
||||||
t := revPath.es[i]
|
|
||||||
revPath.es = revPath.es[:i]
|
|
||||||
fwdPath.append(t)
|
|
||||||
}
|
|
||||||
debug.Finish()
|
|
||||||
return fwdPath.es
|
|
||||||
}
|
|
||||||
|
|
||||||
type path struct {
|
|
||||||
dir int // +1 if forward, -1 if reverse
|
|
||||||
point // Leading point of the EditScript path
|
|
||||||
es EditScript
|
|
||||||
}
|
|
||||||
|
|
||||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
|
||||||
// to the edit-script to connect p.point to dst.
|
|
||||||
func (p *path) connect(dst point, f EqualFunc) {
|
|
||||||
if p.dir > 0 {
|
|
||||||
// Connect in forward direction.
|
|
||||||
for dst.X > p.X && dst.Y > p.Y {
|
|
||||||
switch r := f(p.X, p.Y); {
|
|
||||||
case r.Equal():
|
|
||||||
p.append(Identity)
|
|
||||||
case r.Similar():
|
|
||||||
p.append(Modified)
|
|
||||||
case dst.X-p.X >= dst.Y-p.Y:
|
|
||||||
p.append(UniqueX)
|
|
||||||
default:
|
|
||||||
p.append(UniqueY)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for dst.X > p.X {
|
|
||||||
p.append(UniqueX)
|
|
||||||
}
|
|
||||||
for dst.Y > p.Y {
|
|
||||||
p.append(UniqueY)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Connect in reverse direction.
|
|
||||||
for p.X > dst.X && p.Y > dst.Y {
|
|
||||||
switch r := f(p.X-1, p.Y-1); {
|
|
||||||
case r.Equal():
|
|
||||||
p.append(Identity)
|
|
||||||
case r.Similar():
|
|
||||||
p.append(Modified)
|
|
||||||
case p.Y-dst.Y >= p.X-dst.X:
|
|
||||||
p.append(UniqueY)
|
|
||||||
default:
|
|
||||||
p.append(UniqueX)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for p.X > dst.X {
|
|
||||||
p.append(UniqueX)
|
|
||||||
}
|
|
||||||
for p.Y > dst.Y {
|
|
||||||
p.append(UniqueY)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *path) append(t EditType) {
|
|
||||||
p.es = append(p.es, t)
|
|
||||||
switch t {
|
|
||||||
case Identity, Modified:
|
|
||||||
p.add(p.dir, p.dir)
|
|
||||||
case UniqueX:
|
|
||||||
p.add(p.dir, 0)
|
|
||||||
case UniqueY:
|
|
||||||
p.add(0, p.dir)
|
|
||||||
}
|
|
||||||
debug.Update()
|
|
||||||
}
|
|
||||||
|
|
||||||
type point struct{ X, Y int }
|
|
||||||
|
|
||||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
|
||||||
|
|
||||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
|
||||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
|
||||||
func zigzag(x int) int {
|
|
||||||
if x&1 != 0 {
|
|
||||||
x = ^x
|
|
||||||
}
|
|
||||||
return x >> 1
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package flags
|
|
||||||
|
|
||||||
// Deterministic controls whether the output of Diff should be deterministic.
|
|
||||||
// This is only used for testing.
|
|
||||||
var Deterministic bool
|
|
|
@ -1,10 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build !go1.10
|
|
||||||
|
|
||||||
package flags
|
|
||||||
|
|
||||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
|
||||||
const AtLeastGo110 = false
|
|
|
@ -1,10 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build go1.10
|
|
||||||
|
|
||||||
package flags
|
|
||||||
|
|
||||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
|
||||||
const AtLeastGo110 = true
|
|
|
@ -1,99 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// Package function provides functionality for identifying function types.
|
|
||||||
package function
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type funcType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
_ funcType = iota
|
|
||||||
|
|
||||||
tbFunc // func(T) bool
|
|
||||||
ttbFunc // func(T, T) bool
|
|
||||||
trbFunc // func(T, R) bool
|
|
||||||
tibFunc // func(T, I) bool
|
|
||||||
trFunc // func(T) R
|
|
||||||
|
|
||||||
Equal = ttbFunc // func(T, T) bool
|
|
||||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
|
||||||
Transformer = trFunc // func(T) R
|
|
||||||
ValueFilter = ttbFunc // func(T, T) bool
|
|
||||||
Less = ttbFunc // func(T, T) bool
|
|
||||||
ValuePredicate = tbFunc // func(T) bool
|
|
||||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
|
||||||
)
|
|
||||||
|
|
||||||
var boolType = reflect.TypeOf(true)
|
|
||||||
|
|
||||||
// IsType reports whether the reflect.Type is of the specified function type.
|
|
||||||
func IsType(t reflect.Type, ft funcType) bool {
|
|
||||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
ni, no := t.NumIn(), t.NumOut()
|
|
||||||
switch ft {
|
|
||||||
case tbFunc: // func(T) bool
|
|
||||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case ttbFunc: // func(T, T) bool
|
|
||||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case trbFunc: // func(T, R) bool
|
|
||||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case tibFunc: // func(T, I) bool
|
|
||||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case trFunc: // func(T) R
|
|
||||||
if ni == 1 && no == 1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
|
||||||
|
|
||||||
// NameOf returns the name of the function value.
|
|
||||||
func NameOf(v reflect.Value) string {
|
|
||||||
fnc := runtime.FuncForPC(v.Pointer())
|
|
||||||
if fnc == nil {
|
|
||||||
return "<unknown>"
|
|
||||||
}
|
|
||||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
|
||||||
|
|
||||||
// Method closures have a "-fm" suffix.
|
|
||||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
|
||||||
|
|
||||||
var name string
|
|
||||||
for len(fullName) > 0 {
|
|
||||||
inParen := strings.HasSuffix(fullName, ")")
|
|
||||||
fullName = strings.TrimSuffix(fullName, ")")
|
|
||||||
|
|
||||||
s := lastIdentRx.FindString(fullName)
|
|
||||||
if s == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
name = s + "." + name
|
|
||||||
fullName = strings.TrimSuffix(fullName, s)
|
|
||||||
|
|
||||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
|
||||||
fullName = fullName[:i]
|
|
||||||
}
|
|
||||||
fullName = strings.TrimSuffix(fullName, ".")
|
|
||||||
}
|
|
||||||
return strings.TrimSuffix(name, ".")
|
|
||||||
}
|
|
|
@ -1,157 +0,0 @@
|
||||||
// Copyright 2020, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TypeString is nearly identical to reflect.Type.String,
|
|
||||||
// but has an additional option to specify that full type names be used.
|
|
||||||
func TypeString(t reflect.Type, qualified bool) string {
|
|
||||||
return string(appendTypeName(nil, t, qualified, false))
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
|
||||||
// BUG: Go reflection provides no way to disambiguate two named types
|
|
||||||
// of the same name and within the same package,
|
|
||||||
// but declared within the namespace of different functions.
|
|
||||||
|
|
||||||
// Named type.
|
|
||||||
if t.Name() != "" {
|
|
||||||
if qualified && t.PkgPath() != "" {
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, t.PkgPath()...)
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, '.')
|
|
||||||
b = append(b, t.Name()...)
|
|
||||||
} else {
|
|
||||||
b = append(b, t.String()...)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unnamed type.
|
|
||||||
switch k := t.Kind(); k {
|
|
||||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
|
||||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
|
||||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
|
||||||
b = append(b, k.String()...)
|
|
||||||
case reflect.Chan:
|
|
||||||
if t.ChanDir() == reflect.RecvDir {
|
|
||||||
b = append(b, "<-"...)
|
|
||||||
}
|
|
||||||
b = append(b, "chan"...)
|
|
||||||
if t.ChanDir() == reflect.SendDir {
|
|
||||||
b = append(b, "<-"...)
|
|
||||||
}
|
|
||||||
b = append(b, ' ')
|
|
||||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
|
||||||
case reflect.Func:
|
|
||||||
if !elideFunc {
|
|
||||||
b = append(b, "func"...)
|
|
||||||
}
|
|
||||||
b = append(b, '(')
|
|
||||||
for i := 0; i < t.NumIn(); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
b = append(b, ", "...)
|
|
||||||
}
|
|
||||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
|
||||||
b = append(b, "..."...)
|
|
||||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
|
||||||
} else {
|
|
||||||
b = appendTypeName(b, t.In(i), qualified, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b = append(b, ')')
|
|
||||||
switch t.NumOut() {
|
|
||||||
case 0:
|
|
||||||
// Do nothing
|
|
||||||
case 1:
|
|
||||||
b = append(b, ' ')
|
|
||||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
|
||||||
default:
|
|
||||||
b = append(b, " ("...)
|
|
||||||
for i := 0; i < t.NumOut(); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
b = append(b, ", "...)
|
|
||||||
}
|
|
||||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
|
||||||
}
|
|
||||||
b = append(b, ')')
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
b = append(b, "struct{ "...)
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
b = append(b, "; "...)
|
|
||||||
}
|
|
||||||
sf := t.Field(i)
|
|
||||||
if !sf.Anonymous {
|
|
||||||
if qualified && sf.PkgPath != "" {
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, sf.PkgPath...)
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, '.')
|
|
||||||
}
|
|
||||||
b = append(b, sf.Name...)
|
|
||||||
b = append(b, ' ')
|
|
||||||
}
|
|
||||||
b = appendTypeName(b, sf.Type, qualified, false)
|
|
||||||
if sf.Tag != "" {
|
|
||||||
b = append(b, ' ')
|
|
||||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if b[len(b)-1] == ' ' {
|
|
||||||
b = b[:len(b)-1]
|
|
||||||
} else {
|
|
||||||
b = append(b, ' ')
|
|
||||||
}
|
|
||||||
b = append(b, '}')
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
b = append(b, '[')
|
|
||||||
if k == reflect.Array {
|
|
||||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
|
||||||
}
|
|
||||||
b = append(b, ']')
|
|
||||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
|
||||||
case reflect.Map:
|
|
||||||
b = append(b, "map["...)
|
|
||||||
b = appendTypeName(b, t.Key(), qualified, false)
|
|
||||||
b = append(b, ']')
|
|
||||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
|
||||||
case reflect.Ptr:
|
|
||||||
b = append(b, '*')
|
|
||||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
|
||||||
case reflect.Interface:
|
|
||||||
b = append(b, "interface{ "...)
|
|
||||||
for i := 0; i < t.NumMethod(); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
b = append(b, "; "...)
|
|
||||||
}
|
|
||||||
m := t.Method(i)
|
|
||||||
if qualified && m.PkgPath != "" {
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, m.PkgPath...)
|
|
||||||
b = append(b, '"')
|
|
||||||
b = append(b, '.')
|
|
||||||
}
|
|
||||||
b = append(b, m.Name...)
|
|
||||||
b = appendTypeName(b, m.Type, qualified, true)
|
|
||||||
}
|
|
||||||
if b[len(b)-1] == ' ' {
|
|
||||||
b = b[:len(b)-1]
|
|
||||||
} else {
|
|
||||||
b = append(b, ' ')
|
|
||||||
}
|
|
||||||
b = append(b, '}')
|
|
||||||
default:
|
|
||||||
panic("invalid kind: " + k.String())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
// Copyright 2018, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build purego
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
|
||||||
type Pointer struct {
|
|
||||||
p uintptr
|
|
||||||
t reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
// PointerOf returns a Pointer from v, which must be a
|
|
||||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
|
||||||
func PointerOf(v reflect.Value) Pointer {
|
|
||||||
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
|
|
||||||
// assumes that the GC implementation does not use a moving collector.
|
|
||||||
return Pointer{v.Pointer(), v.Type()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether the pointer is nil.
|
|
||||||
func (p Pointer) IsNil() bool {
|
|
||||||
return p.p == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uintptr returns the pointer as a uintptr.
|
|
||||||
func (p Pointer) Uintptr() uintptr {
|
|
||||||
return p.p
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
// Copyright 2018, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
|
||||||
type Pointer struct {
|
|
||||||
p unsafe.Pointer
|
|
||||||
t reflect.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
// PointerOf returns a Pointer from v, which must be a
|
|
||||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
|
||||||
func PointerOf(v reflect.Value) Pointer {
|
|
||||||
// The proper representation of a pointer is unsafe.Pointer,
|
|
||||||
// which is necessary if the GC ever uses a moving collector.
|
|
||||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether the pointer is nil.
|
|
||||||
func (p Pointer) IsNil() bool {
|
|
||||||
return p.p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uintptr returns the pointer as a uintptr.
|
|
||||||
func (p Pointer) Uintptr() uintptr {
|
|
||||||
return uintptr(p.p)
|
|
||||||
}
|
|
|
@ -1,106 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
|
||||||
// The type of each value must be comparable.
|
|
||||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
|
||||||
if len(vs) == 0 {
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the map keys.
|
|
||||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
|
||||||
|
|
||||||
// Deduplicate keys (fails for NaNs).
|
|
||||||
vs2 := vs[:1]
|
|
||||||
for _, v := range vs[1:] {
|
|
||||||
if isLess(vs2[len(vs2)-1], v) {
|
|
||||||
vs2 = append(vs2, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return vs2
|
|
||||||
}
|
|
||||||
|
|
||||||
// isLess is a generic function for sorting arbitrary map keys.
|
|
||||||
// The inputs must be of the same type and must be comparable.
|
|
||||||
func isLess(x, y reflect.Value) bool {
|
|
||||||
switch x.Type().Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return !x.Bool() && y.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return x.Int() < y.Int()
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return x.Uint() < y.Uint()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
// NOTE: This does not sort -0 as less than +0
|
|
||||||
// since Go maps treat -0 and +0 as equal keys.
|
|
||||||
fx, fy := x.Float(), y.Float()
|
|
||||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
cx, cy := x.Complex(), y.Complex()
|
|
||||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
|
||||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
|
||||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
|
||||||
}
|
|
||||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
|
||||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
|
||||||
return x.Pointer() < y.Pointer()
|
|
||||||
case reflect.String:
|
|
||||||
return x.String() < y.String()
|
|
||||||
case reflect.Array:
|
|
||||||
for i := 0; i < x.Len(); i++ {
|
|
||||||
if isLess(x.Index(i), y.Index(i)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if isLess(y.Index(i), x.Index(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < x.NumField(); i++ {
|
|
||||||
if isLess(x.Field(i), y.Field(i)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if isLess(y.Field(i), x.Field(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case reflect.Interface:
|
|
||||||
vx, vy := x.Elem(), y.Elem()
|
|
||||||
if !vx.IsValid() || !vy.IsValid() {
|
|
||||||
return !vx.IsValid() && vy.IsValid()
|
|
||||||
}
|
|
||||||
tx, ty := vx.Type(), vy.Type()
|
|
||||||
if tx == ty {
|
|
||||||
return isLess(x.Elem(), y.Elem())
|
|
||||||
}
|
|
||||||
if tx.Kind() != ty.Kind() {
|
|
||||||
return vx.Kind() < vy.Kind()
|
|
||||||
}
|
|
||||||
if tx.String() != ty.String() {
|
|
||||||
return tx.String() < ty.String()
|
|
||||||
}
|
|
||||||
if tx.PkgPath() != ty.PkgPath() {
|
|
||||||
return tx.PkgPath() < ty.PkgPath()
|
|
||||||
}
|
|
||||||
// This can happen in rare situations, so we fallback to just comparing
|
|
||||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
|
||||||
// ordering within a program, but it is obviously not stable.
|
|
||||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
|
||||||
default:
|
|
||||||
// Must be Func, Map, or Slice; which are not comparable.
|
|
||||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsZero reports whether v is the zero value.
|
|
||||||
// This does not rely on Interface and so can be used on unexported fields.
|
|
||||||
func IsZero(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return v.Bool() == false
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return math.Float64bits(v.Float()) == 0
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
|
||||||
case reflect.String:
|
|
||||||
return v.String() == ""
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
return v.Pointer() == 0
|
|
||||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
case reflect.Array:
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if !IsZero(v.Index(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
if !IsZero(v.Field(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,552 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/function"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Option configures for specific behavior of Equal and Diff. In particular,
|
|
||||||
// the fundamental Option functions (Ignore, Transformer, and Comparer),
|
|
||||||
// configure how equality is determined.
|
|
||||||
//
|
|
||||||
// The fundamental options may be composed with filters (FilterPath and
|
|
||||||
// FilterValues) to control the scope over which they are applied.
|
|
||||||
//
|
|
||||||
// The cmp/cmpopts package provides helper functions for creating options that
|
|
||||||
// may be used with Equal and Diff.
|
|
||||||
type Option interface {
|
|
||||||
// filter applies all filters and returns the option that remains.
|
|
||||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
|
||||||
//
|
|
||||||
// An Options is returned only if multiple comparers or transformers
|
|
||||||
// can apply simultaneously and will only contain values of those types
|
|
||||||
// or sub-Options containing values of those types.
|
|
||||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
|
||||||
}
|
|
||||||
|
|
||||||
// applicableOption represents the following types:
|
|
||||||
// Fundamental: ignore | validator | *comparer | *transformer
|
|
||||||
// Grouping: Options
|
|
||||||
type applicableOption interface {
|
|
||||||
Option
|
|
||||||
|
|
||||||
// apply executes the option, which may mutate s or panic.
|
|
||||||
apply(s *state, vx, vy reflect.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// coreOption represents the following types:
|
|
||||||
// Fundamental: ignore | validator | *comparer | *transformer
|
|
||||||
// Filters: *pathFilter | *valuesFilter
|
|
||||||
type coreOption interface {
|
|
||||||
Option
|
|
||||||
isCore()
|
|
||||||
}
|
|
||||||
|
|
||||||
type core struct{}
|
|
||||||
|
|
||||||
func (core) isCore() {}
|
|
||||||
|
|
||||||
// Options is a list of Option values that also satisfies the Option interface.
|
|
||||||
// Helper comparison packages may return an Options value when packing multiple
|
|
||||||
// Option values into a single Option. When this package processes an Options,
|
|
||||||
// it will be implicitly expanded into a flat list.
|
|
||||||
//
|
|
||||||
// Applying a filter on an Options is equivalent to applying that same filter
|
|
||||||
// on all individual options held within.
|
|
||||||
type Options []Option
|
|
||||||
|
|
||||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
|
||||||
for _, opt := range opts {
|
|
||||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
|
||||||
case ignore:
|
|
||||||
return ignore{} // Only ignore can short-circuit evaluation
|
|
||||||
case validator:
|
|
||||||
out = validator{} // Takes precedence over comparer or transformer
|
|
||||||
case *comparer, *transformer, Options:
|
|
||||||
switch out.(type) {
|
|
||||||
case nil:
|
|
||||||
out = opt
|
|
||||||
case validator:
|
|
||||||
// Keep validator
|
|
||||||
case *comparer, *transformer, Options:
|
|
||||||
out = Options{out, opt} // Conflicting comparers or transformers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
|
||||||
const warning = "ambiguous set of applicable options"
|
|
||||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
|
||||||
var ss []string
|
|
||||||
for _, opt := range flattenOptions(nil, opts) {
|
|
||||||
ss = append(ss, fmt.Sprint(opt))
|
|
||||||
}
|
|
||||||
set := strings.Join(ss, "\n\t")
|
|
||||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts Options) String() string {
|
|
||||||
var ss []string
|
|
||||||
for _, opt := range opts {
|
|
||||||
ss = append(ss, fmt.Sprint(opt))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterPath returns a new Option where opt is only evaluated if filter f
|
|
||||||
// returns true for the current Path in the value tree.
|
|
||||||
//
|
|
||||||
// This filter is called even if a slice element or map entry is missing and
|
|
||||||
// provides an opportunity to ignore such cases. The filter function must be
|
|
||||||
// symmetric such that the filter result is identical regardless of whether the
|
|
||||||
// missing value is from x or y.
|
|
||||||
//
|
|
||||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
|
||||||
// a previously filtered Option.
|
|
||||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
|
||||||
if f == nil {
|
|
||||||
panic("invalid path filter function")
|
|
||||||
}
|
|
||||||
if opt := normalizeOption(opt); opt != nil {
|
|
||||||
return &pathFilter{fnc: f, opt: opt}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type pathFilter struct {
|
|
||||||
core
|
|
||||||
fnc func(Path) bool
|
|
||||||
opt Option
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
|
||||||
if f.fnc(s.curPath) {
|
|
||||||
return f.opt.filter(s, t, vx, vy)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f pathFilter) String() string {
|
|
||||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterValues returns a new Option where opt is only evaluated if filter f,
|
|
||||||
// which is a function of the form "func(T, T) bool", returns true for the
|
|
||||||
// current pair of values being compared. If either value is invalid or
|
|
||||||
// the type of the values is not assignable to T, then this filter implicitly
|
|
||||||
// returns false.
|
|
||||||
//
|
|
||||||
// The filter function must be
|
|
||||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
|
||||||
// deterministic (i.e., produces the same result when given the same inputs).
|
|
||||||
// If T is an interface, it is possible that f is called with two values with
|
|
||||||
// different concrete types that both implement T.
|
|
||||||
//
|
|
||||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
|
||||||
// a previously filtered Option.
|
|
||||||
func FilterValues(f interface{}, opt Option) Option {
|
|
||||||
v := reflect.ValueOf(f)
|
|
||||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
|
||||||
}
|
|
||||||
if opt := normalizeOption(opt); opt != nil {
|
|
||||||
vf := &valuesFilter{fnc: v, opt: opt}
|
|
||||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
|
||||||
vf.typ = ti
|
|
||||||
}
|
|
||||||
return vf
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type valuesFilter struct {
|
|
||||||
core
|
|
||||||
typ reflect.Type // T
|
|
||||||
fnc reflect.Value // func(T, T) bool
|
|
||||||
opt Option
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
|
||||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
|
||||||
return f.opt.filter(s, t, vx, vy)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f valuesFilter) String() string {
|
|
||||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore is an Option that causes all comparisons to be ignored.
|
|
||||||
// This value is intended to be combined with FilterPath or FilterValues.
|
|
||||||
// It is an error to pass an unfiltered Ignore option to Equal.
|
|
||||||
func Ignore() Option { return ignore{} }
|
|
||||||
|
|
||||||
type ignore struct{ core }
|
|
||||||
|
|
||||||
func (ignore) isFiltered() bool { return false }
|
|
||||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
|
||||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
|
||||||
func (ignore) String() string { return "Ignore()" }
|
|
||||||
|
|
||||||
// validator is a sentinel Option type to indicate that some options could not
|
|
||||||
// be evaluated due to unexported fields, missing slice elements, or
|
|
||||||
// missing map entries. Both values are validator only for unexported fields.
|
|
||||||
type validator struct{ core }
|
|
||||||
|
|
||||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
|
||||||
if !vx.IsValid() || !vy.IsValid() {
|
|
||||||
return validator{}
|
|
||||||
}
|
|
||||||
if !vx.CanInterface() || !vy.CanInterface() {
|
|
||||||
return validator{}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
|
||||||
// Implies missing slice element or map entry.
|
|
||||||
if !vx.IsValid() || !vy.IsValid() {
|
|
||||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unable to Interface implies unexported field without visibility access.
|
|
||||||
if !vx.CanInterface() || !vy.CanInterface() {
|
|
||||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
|
||||||
var name string
|
|
||||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
|
||||||
// Named type with unexported fields.
|
|
||||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
|
||||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
|
||||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
|
||||||
var pkgPath string
|
|
||||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
|
||||||
pkgPath = t.Field(i).PkgPath
|
|
||||||
}
|
|
||||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("not reachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// identRx represents a valid identifier according to the Go specification.
|
|
||||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
|
||||||
|
|
||||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
|
||||||
|
|
||||||
// Transformer returns an Option that applies a transformation function that
|
|
||||||
// converts values of a certain type into that of another.
|
|
||||||
//
|
|
||||||
// The transformer f must be a function "func(T) R" that converts values of
|
|
||||||
// type T to those of type R and is implicitly filtered to input values
|
|
||||||
// assignable to T. The transformer must not mutate T in any way.
|
|
||||||
//
|
|
||||||
// To help prevent some cases of infinite recursive cycles applying the
|
|
||||||
// same transform to the output of itself (e.g., in the case where the
|
|
||||||
// input and output types are the same), an implicit filter is added such that
|
|
||||||
// a transformer is applicable only if that exact transformer is not already
|
|
||||||
// in the tail of the Path since the last non-Transform step.
|
|
||||||
// For situations where the implicit filter is still insufficient,
|
|
||||||
// consider using cmpopts.AcyclicTransformer, which adds a filter
|
|
||||||
// to prevent the transformer from being recursively applied upon itself.
|
|
||||||
//
|
|
||||||
// The name is a user provided label that is used as the Transform.Name in the
|
|
||||||
// transformation PathStep (and eventually shown in the Diff output).
|
|
||||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
|
||||||
// If empty, an arbitrary name is used.
|
|
||||||
func Transformer(name string, f interface{}) Option {
|
|
||||||
v := reflect.ValueOf(f)
|
|
||||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
|
||||||
}
|
|
||||||
if name == "" {
|
|
||||||
name = function.NameOf(v)
|
|
||||||
if !identsRx.MatchString(name) {
|
|
||||||
name = "λ" // Lambda-symbol as placeholder name
|
|
||||||
}
|
|
||||||
} else if !identsRx.MatchString(name) {
|
|
||||||
panic(fmt.Sprintf("invalid name: %q", name))
|
|
||||||
}
|
|
||||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
|
||||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
|
||||||
tr.typ = ti
|
|
||||||
}
|
|
||||||
return tr
|
|
||||||
}
|
|
||||||
|
|
||||||
type transformer struct {
|
|
||||||
core
|
|
||||||
name string
|
|
||||||
typ reflect.Type // T
|
|
||||||
fnc reflect.Value // func(T) R
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
|
||||||
|
|
||||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
|
||||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
|
||||||
if t, ok := s.curPath[i].(Transform); !ok {
|
|
||||||
break // Hit most recent non-Transform step
|
|
||||||
} else if tr == t.trans {
|
|
||||||
return nil // Cannot directly use same Transform
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
|
||||||
return tr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
|
||||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
|
||||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
|
||||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
|
||||||
step.vx, step.vy = vvx, vvy
|
|
||||||
s.compareAny(step)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr transformer) String() string {
|
|
||||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Comparer returns an Option that determines whether two values are equal
|
|
||||||
// to each other.
|
|
||||||
//
|
|
||||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
|
||||||
// filtered to input values assignable to T. If T is an interface, it is
|
|
||||||
// possible that f is called with two values of different concrete types that
|
|
||||||
// both implement T.
|
|
||||||
//
|
|
||||||
// The equality function must be:
|
|
||||||
// • Symmetric: equal(x, y) == equal(y, x)
|
|
||||||
// • Deterministic: equal(x, y) == equal(x, y)
|
|
||||||
// • Pure: equal(x, y) does not modify x or y
|
|
||||||
func Comparer(f interface{}) Option {
|
|
||||||
v := reflect.ValueOf(f)
|
|
||||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
|
||||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
|
||||||
}
|
|
||||||
cm := &comparer{fnc: v}
|
|
||||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
|
||||||
cm.typ = ti
|
|
||||||
}
|
|
||||||
return cm
|
|
||||||
}
|
|
||||||
|
|
||||||
type comparer struct {
|
|
||||||
core
|
|
||||||
typ reflect.Type // T
|
|
||||||
fnc reflect.Value // func(T, T) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
|
||||||
|
|
||||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
|
||||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
|
||||||
return cm
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
|
||||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
|
||||||
s.report(eq, reportByFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm comparer) String() string {
|
|
||||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exporter returns an Option that specifies whether Equal is allowed to
|
|
||||||
// introspect into the unexported fields of certain struct types.
|
|
||||||
//
|
|
||||||
// Users of this option must understand that comparing on unexported fields
|
|
||||||
// from external packages is not safe since changes in the internal
|
|
||||||
// implementation of some external package may cause the result of Equal
|
|
||||||
// to unexpectedly change. However, it may be valid to use this option on types
|
|
||||||
// defined in an internal package where the semantic meaning of an unexported
|
|
||||||
// field is in the control of the user.
|
|
||||||
//
|
|
||||||
// In many cases, a custom Comparer should be used instead that defines
|
|
||||||
// equality as a function of the public API of a type rather than the underlying
|
|
||||||
// unexported implementation.
|
|
||||||
//
|
|
||||||
// For example, the reflect.Type documentation defines equality to be determined
|
|
||||||
// by the == operator on the interface (essentially performing a shallow pointer
|
|
||||||
// comparison) and most attempts to compare *regexp.Regexp types are interested
|
|
||||||
// in only checking that the regular expression strings are equal.
|
|
||||||
// Both of these are accomplished using Comparers:
|
|
||||||
//
|
|
||||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
|
||||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
|
||||||
//
|
|
||||||
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
|
|
||||||
// all unexported fields on specified struct types.
|
|
||||||
func Exporter(f func(reflect.Type) bool) Option {
|
|
||||||
if !supportExporters {
|
|
||||||
panic("Exporter is not supported on purego builds")
|
|
||||||
}
|
|
||||||
return exporter(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
type exporter func(reflect.Type) bool
|
|
||||||
|
|
||||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowUnexported returns an Options that allows Equal to forcibly introspect
|
|
||||||
// unexported fields of the specified struct types.
|
|
||||||
//
|
|
||||||
// See Exporter for the proper use of this option.
|
|
||||||
func AllowUnexported(types ...interface{}) Option {
|
|
||||||
m := make(map[reflect.Type]bool)
|
|
||||||
for _, typ := range types {
|
|
||||||
t := reflect.TypeOf(typ)
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
|
||||||
}
|
|
||||||
m[t] = true
|
|
||||||
}
|
|
||||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result represents the comparison result for a single node and
|
|
||||||
// is provided by cmp when calling Result (see Reporter).
|
|
||||||
type Result struct {
|
|
||||||
_ [0]func() // Make Result incomparable
|
|
||||||
flags resultFlags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal reports whether the node was determined to be equal or not.
|
|
||||||
// As a special case, ignored nodes are considered equal.
|
|
||||||
func (r Result) Equal() bool {
|
|
||||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByIgnore reports whether the node is equal because it was ignored.
|
|
||||||
// This never reports true if Equal reports false.
|
|
||||||
func (r Result) ByIgnore() bool {
|
|
||||||
return r.flags&reportByIgnore != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByMethod reports whether the Equal method determined equality.
|
|
||||||
func (r Result) ByMethod() bool {
|
|
||||||
return r.flags&reportByMethod != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByFunc reports whether a Comparer function determined equality.
|
|
||||||
func (r Result) ByFunc() bool {
|
|
||||||
return r.flags&reportByFunc != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ByCycle reports whether a reference cycle was detected.
|
|
||||||
func (r Result) ByCycle() bool {
|
|
||||||
return r.flags&reportByCycle != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type resultFlags uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
_ resultFlags = (1 << iota) / 2
|
|
||||||
|
|
||||||
reportEqual
|
|
||||||
reportUnequal
|
|
||||||
reportByIgnore
|
|
||||||
reportByMethod
|
|
||||||
reportByFunc
|
|
||||||
reportByCycle
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reporter is an Option that can be passed to Equal. When Equal traverses
|
|
||||||
// the value trees, it calls PushStep as it descends into each node in the
|
|
||||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
|
||||||
// either compared (determined to be equal or not equal) or ignored and reported
|
|
||||||
// as such by calling the Report method.
|
|
||||||
func Reporter(r interface {
|
|
||||||
// PushStep is called when a tree-traversal operation is performed.
|
|
||||||
// The PathStep itself is only valid until the step is popped.
|
|
||||||
// The PathStep.Values are valid for the duration of the entire traversal
|
|
||||||
// and must not be mutated.
|
|
||||||
//
|
|
||||||
// Equal always calls PushStep at the start to provide an operation-less
|
|
||||||
// PathStep used to report the root values.
|
|
||||||
//
|
|
||||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
|
||||||
// is unspecified and may change in future implementations.
|
|
||||||
// The entries of a map are iterated through in an unspecified order.
|
|
||||||
PushStep(PathStep)
|
|
||||||
|
|
||||||
// Report is called exactly once on leaf nodes to report whether the
|
|
||||||
// comparison identified the node as equal, unequal, or ignored.
|
|
||||||
// A leaf node is one that is immediately preceded by and followed by
|
|
||||||
// a pair of PushStep and PopStep calls.
|
|
||||||
Report(Result)
|
|
||||||
|
|
||||||
// PopStep ascends back up the value tree.
|
|
||||||
// There is always a matching pop call for every push call.
|
|
||||||
PopStep()
|
|
||||||
}) Option {
|
|
||||||
return reporter{r}
|
|
||||||
}
|
|
||||||
|
|
||||||
type reporter struct{ reporterIface }
|
|
||||||
type reporterIface interface {
|
|
||||||
PushStep(PathStep)
|
|
||||||
Report(Result)
|
|
||||||
PopStep()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeOption normalizes the input options such that all Options groups
|
|
||||||
// are flattened and groups with a single element are reduced to that element.
|
|
||||||
// Only coreOptions and Options containing coreOptions are allowed.
|
|
||||||
func normalizeOption(src Option) Option {
|
|
||||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case 1:
|
|
||||||
return opts[0]
|
|
||||||
default:
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flattenOptions copies all options in src to dst as a flat list.
|
|
||||||
// Only coreOptions and Options containing coreOptions are allowed.
|
|
||||||
func flattenOptions(dst, src Options) Options {
|
|
||||||
for _, opt := range src {
|
|
||||||
switch opt := opt.(type) {
|
|
||||||
case nil:
|
|
||||||
continue
|
|
||||||
case Options:
|
|
||||||
dst = flattenOptions(dst, opt)
|
|
||||||
case coreOption:
|
|
||||||
dst = append(dst, opt)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
|
@ -1,378 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Path is a list of PathSteps describing the sequence of operations to get
|
|
||||||
// from some root type to the current position in the value tree.
|
|
||||||
// The first Path element is always an operation-less PathStep that exists
|
|
||||||
// simply to identify the initial type.
|
|
||||||
//
|
|
||||||
// When traversing structs with embedded structs, the embedded struct will
|
|
||||||
// always be accessed as a field before traversing the fields of the
|
|
||||||
// embedded struct themselves. That is, an exported field from the
|
|
||||||
// embedded struct will never be accessed directly from the parent struct.
|
|
||||||
type Path []PathStep
|
|
||||||
|
|
||||||
// PathStep is a union-type for specific operations to traverse
|
|
||||||
// a value's tree structure. Users of this package never need to implement
|
|
||||||
// these types as values of this type will be returned by this package.
|
|
||||||
//
|
|
||||||
// Implementations of this interface are
|
|
||||||
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
|
|
||||||
type PathStep interface {
|
|
||||||
String() string
|
|
||||||
|
|
||||||
// Type is the resulting type after performing the path step.
|
|
||||||
Type() reflect.Type
|
|
||||||
|
|
||||||
// Values is the resulting values after performing the path step.
|
|
||||||
// The type of each valid value is guaranteed to be identical to Type.
|
|
||||||
//
|
|
||||||
// In some cases, one or both may be invalid or have restrictions:
|
|
||||||
// • For StructField, both are not interface-able if the current field
|
|
||||||
// is unexported and the struct type is not explicitly permitted by
|
|
||||||
// an Exporter to traverse unexported fields.
|
|
||||||
// • For SliceIndex, one may be invalid if an element is missing from
|
|
||||||
// either the x or y slice.
|
|
||||||
// • For MapIndex, one may be invalid if an entry is missing from
|
|
||||||
// either the x or y map.
|
|
||||||
//
|
|
||||||
// The provided values must not be mutated.
|
|
||||||
Values() (vx, vy reflect.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ PathStep = StructField{}
|
|
||||||
_ PathStep = SliceIndex{}
|
|
||||||
_ PathStep = MapIndex{}
|
|
||||||
_ PathStep = Indirect{}
|
|
||||||
_ PathStep = TypeAssertion{}
|
|
||||||
_ PathStep = Transform{}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (pa *Path) push(s PathStep) {
|
|
||||||
*pa = append(*pa, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pa *Path) pop() {
|
|
||||||
*pa = (*pa)[:len(*pa)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Last returns the last PathStep in the Path.
|
|
||||||
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
|
|
||||||
func (pa Path) Last() PathStep {
|
|
||||||
return pa.Index(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index returns the ith step in the Path and supports negative indexing.
|
|
||||||
// A negative index starts counting from the tail of the Path such that -1
|
|
||||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
|
||||||
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
|
|
||||||
func (pa Path) Index(i int) PathStep {
|
|
||||||
if i < 0 {
|
|
||||||
i = len(pa) + i
|
|
||||||
}
|
|
||||||
if i < 0 || i >= len(pa) {
|
|
||||||
return pathStep{}
|
|
||||||
}
|
|
||||||
return pa[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the simplified path to a node.
|
|
||||||
// The simplified path only contains struct field accesses.
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
// MyMap.MySlices.MyField
|
|
||||||
func (pa Path) String() string {
|
|
||||||
var ss []string
|
|
||||||
for _, s := range pa {
|
|
||||||
if _, ok := s.(StructField); ok {
|
|
||||||
ss = append(ss, s.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoString returns the path to a specific node using Go syntax.
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
|
||||||
func (pa Path) GoString() string {
|
|
||||||
var ssPre, ssPost []string
|
|
||||||
var numIndirect int
|
|
||||||
for i, s := range pa {
|
|
||||||
var nextStep PathStep
|
|
||||||
if i+1 < len(pa) {
|
|
||||||
nextStep = pa[i+1]
|
|
||||||
}
|
|
||||||
switch s := s.(type) {
|
|
||||||
case Indirect:
|
|
||||||
numIndirect++
|
|
||||||
pPre, pPost := "(", ")"
|
|
||||||
switch nextStep.(type) {
|
|
||||||
case Indirect:
|
|
||||||
continue // Next step is indirection, so let them batch up
|
|
||||||
case StructField:
|
|
||||||
numIndirect-- // Automatic indirection on struct fields
|
|
||||||
case nil:
|
|
||||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
|
||||||
}
|
|
||||||
if numIndirect > 0 {
|
|
||||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
|
||||||
ssPost = append(ssPost, pPost)
|
|
||||||
}
|
|
||||||
numIndirect = 0
|
|
||||||
continue
|
|
||||||
case Transform:
|
|
||||||
ssPre = append(ssPre, s.trans.name+"(")
|
|
||||||
ssPost = append(ssPost, ")")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ssPost = append(ssPost, s.String())
|
|
||||||
}
|
|
||||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
|
||||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
|
||||||
}
|
|
||||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
type pathStep struct {
|
|
||||||
typ reflect.Type
|
|
||||||
vx, vy reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
|
||||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
|
||||||
func (ps pathStep) String() string {
|
|
||||||
if ps.typ == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
s := ps.typ.String()
|
|
||||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
|
||||||
return "root" // Type too simple or complex to print
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("{%s}", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructField represents a struct field access on a field called Name.
|
|
||||||
type StructField struct{ *structField }
|
|
||||||
type structField struct {
|
|
||||||
pathStep
|
|
||||||
name string
|
|
||||||
idx int
|
|
||||||
|
|
||||||
// These fields are used for forcibly accessing an unexported field.
|
|
||||||
// pvx, pvy, and field are only valid if unexported is true.
|
|
||||||
unexported bool
|
|
||||||
mayForce bool // Forcibly allow visibility
|
|
||||||
paddr bool // Was parent addressable?
|
|
||||||
pvx, pvy reflect.Value // Parent values (always addressible)
|
|
||||||
field reflect.StructField // Field information
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
|
||||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
|
||||||
if !sf.unexported {
|
|
||||||
return sf.vx, sf.vy // CanInterface reports true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forcibly obtain read-write access to an unexported struct field.
|
|
||||||
if sf.mayForce {
|
|
||||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
|
||||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
|
||||||
return vx, vy // CanInterface reports true
|
|
||||||
}
|
|
||||||
return sf.vx, sf.vy // CanInterface reports false
|
|
||||||
}
|
|
||||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
|
||||||
|
|
||||||
// Name is the field name.
|
|
||||||
func (sf StructField) Name() string { return sf.name }
|
|
||||||
|
|
||||||
// Index is the index of the field in the parent struct type.
|
|
||||||
// See reflect.Type.Field.
|
|
||||||
func (sf StructField) Index() int { return sf.idx }
|
|
||||||
|
|
||||||
// SliceIndex is an index operation on a slice or array at some index Key.
|
|
||||||
type SliceIndex struct{ *sliceIndex }
|
|
||||||
type sliceIndex struct {
|
|
||||||
pathStep
|
|
||||||
xkey, ykey int
|
|
||||||
isSlice bool // False for reflect.Array
|
|
||||||
}
|
|
||||||
|
|
||||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
|
||||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
|
||||||
func (si SliceIndex) String() string {
|
|
||||||
switch {
|
|
||||||
case si.xkey == si.ykey:
|
|
||||||
return fmt.Sprintf("[%d]", si.xkey)
|
|
||||||
case si.ykey == -1:
|
|
||||||
// [5->?] means "I don't know where X[5] went"
|
|
||||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
|
||||||
case si.xkey == -1:
|
|
||||||
// [?->3] means "I don't know where Y[3] came from"
|
|
||||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
|
||||||
default:
|
|
||||||
// [5->3] means "X[5] moved to Y[3]"
|
|
||||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key is the index key; it may return -1 if in a split state
|
|
||||||
func (si SliceIndex) Key() int {
|
|
||||||
if si.xkey != si.ykey {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return si.xkey
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitKeys are the indexes for indexing into slices in the
|
|
||||||
// x and y values, respectively. These indexes may differ due to the
|
|
||||||
// insertion or removal of an element in one of the slices, causing
|
|
||||||
// all of the indexes to be shifted. If an index is -1, then that
|
|
||||||
// indicates that the element does not exist in the associated slice.
|
|
||||||
//
|
|
||||||
// Key is guaranteed to return -1 if and only if the indexes returned
|
|
||||||
// by SplitKeys are not the same. SplitKeys will never return -1 for
|
|
||||||
// both indexes.
|
|
||||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
|
||||||
|
|
||||||
// MapIndex is an index operation on a map at some index Key.
|
|
||||||
type MapIndex struct{ *mapIndex }
|
|
||||||
type mapIndex struct {
|
|
||||||
pathStep
|
|
||||||
key reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
|
||||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
|
||||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
|
||||||
|
|
||||||
// Key is the value of the map key.
|
|
||||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
|
||||||
|
|
||||||
// Indirect represents pointer indirection on the parent type.
|
|
||||||
type Indirect struct{ *indirect }
|
|
||||||
type indirect struct {
|
|
||||||
pathStep
|
|
||||||
}
|
|
||||||
|
|
||||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
|
||||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
|
||||||
func (in Indirect) String() string { return "*" }
|
|
||||||
|
|
||||||
// TypeAssertion represents a type assertion on an interface.
|
|
||||||
type TypeAssertion struct{ *typeAssertion }
|
|
||||||
type typeAssertion struct {
|
|
||||||
pathStep
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
|
||||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
|
||||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
|
||||||
|
|
||||||
// Transform is a transformation from the parent type to the current type.
|
|
||||||
type Transform struct{ *transform }
|
|
||||||
type transform struct {
|
|
||||||
pathStep
|
|
||||||
trans *transformer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
|
||||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
|
||||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
|
||||||
|
|
||||||
// Name is the name of the Transformer.
|
|
||||||
func (tf Transform) Name() string { return tf.trans.name }
|
|
||||||
|
|
||||||
// Func is the function pointer to the transformer function.
|
|
||||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
|
||||||
|
|
||||||
// Option returns the originally constructed Transformer option.
|
|
||||||
// The == operator can be used to detect the exact option used.
|
|
||||||
func (tf Transform) Option() Option { return tf.trans }
|
|
||||||
|
|
||||||
// pointerPath represents a dual-stack of pointers encountered when
|
|
||||||
// recursively traversing the x and y values. This data structure supports
|
|
||||||
// detection of cycles and determining whether the cycles are equal.
|
|
||||||
// In Go, cycles can occur via pointers, slices, and maps.
|
|
||||||
//
|
|
||||||
// The pointerPath uses a map to represent a stack; where descension into a
|
|
||||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
|
||||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
|
||||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
|
||||||
// by checking whether the pointer has already been visited. The cycle detection
|
|
||||||
// uses a seperate stack for the x and y values.
|
|
||||||
//
|
|
||||||
// If a cycle is detected we need to determine whether the two pointers
|
|
||||||
// should be considered equal. The definition of equality chosen by Equal
|
|
||||||
// requires two graphs to have the same structure. To determine this, both the
|
|
||||||
// x and y values must have a cycle where the previous pointers were also
|
|
||||||
// encountered together as a pair.
|
|
||||||
//
|
|
||||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
|
||||||
// MapIndex with pointer information for the x and y values.
|
|
||||||
// Suppose px and py are two pointers to compare, we then search the
|
|
||||||
// Path for whether px was ever encountered in the Path history of x, and
|
|
||||||
// similarly so with py. If either side has a cycle, the comparison is only
|
|
||||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
|
||||||
//
|
|
||||||
// Using a map as a stack is more performant as we can perform cycle detection
|
|
||||||
// in O(1) instead of O(N) where N is len(Path).
|
|
||||||
type pointerPath struct {
|
|
||||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
|
||||||
mx map[value.Pointer]value.Pointer
|
|
||||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
|
||||||
my map[value.Pointer]value.Pointer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pointerPath) Init() {
|
|
||||||
p.mx = make(map[value.Pointer]value.Pointer)
|
|
||||||
p.my = make(map[value.Pointer]value.Pointer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push indicates intent to descend into pointers vx and vy where
|
|
||||||
// visited reports whether either has been seen before. If visited before,
|
|
||||||
// equal reports whether both pointers were encountered together.
|
|
||||||
// Pop must be called if and only if the pointers were never visited.
|
|
||||||
//
|
|
||||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
|
||||||
// and be non-nil.
|
|
||||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
|
||||||
px := value.PointerOf(vx)
|
|
||||||
py := value.PointerOf(vy)
|
|
||||||
_, ok1 := p.mx[px]
|
|
||||||
_, ok2 := p.my[py]
|
|
||||||
if ok1 || ok2 {
|
|
||||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
|
||||||
return equal, true
|
|
||||||
}
|
|
||||||
p.mx[px] = py
|
|
||||||
p.my[py] = px
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pop ascends from pointers vx and vy.
|
|
||||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
|
||||||
delete(p.mx, value.PointerOf(vx))
|
|
||||||
delete(p.my, value.PointerOf(vy))
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExported reports whether the identifier is exported.
|
|
||||||
func isExported(id string) bool {
|
|
||||||
r, _ := utf8.DecodeRuneInString(id)
|
|
||||||
return unicode.IsUpper(r)
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
// defaultReporter implements the reporter interface.
|
|
||||||
//
|
|
||||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
|
||||||
// defaultReporter constructs a tree-based representation of the compared value
|
|
||||||
// and the result of each comparison (see valueNode).
|
|
||||||
//
|
|
||||||
// When the String method is called, the FormatDiff method transforms the
|
|
||||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
|
||||||
// of the textual output (see textNode).
|
|
||||||
//
|
|
||||||
// Lastly, the textNode.String method produces the final report as a string.
|
|
||||||
type defaultReporter struct {
|
|
||||||
root *valueNode
|
|
||||||
curr *valueNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
|
||||||
r.curr = r.curr.PushStep(ps)
|
|
||||||
if r.root == nil {
|
|
||||||
r.root = r.curr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (r *defaultReporter) Report(rs Result) {
|
|
||||||
r.curr.Report(rs)
|
|
||||||
}
|
|
||||||
func (r *defaultReporter) PopStep() {
|
|
||||||
r.curr = r.curr.PopStep()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String provides a full report of the differences detected as a structured
|
|
||||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
|
||||||
// has been traversed.
|
|
||||||
func (r *defaultReporter) String() string {
|
|
||||||
assert(r.root != nil && r.curr == nil)
|
|
||||||
if r.root.NumDiff == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
ptrs := new(pointerReferences)
|
|
||||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
|
||||||
resolveReferences(text)
|
|
||||||
return text.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func assert(ok bool) {
|
|
||||||
if !ok {
|
|
||||||
panic("assertion failure")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,432 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
|
||||||
|
|
||||||
// numContextRecords is the number of surrounding equal records to print.
|
|
||||||
const numContextRecords = 2
|
|
||||||
|
|
||||||
type diffMode byte
|
|
||||||
|
|
||||||
const (
|
|
||||||
diffUnknown diffMode = 0
|
|
||||||
diffIdentical diffMode = ' '
|
|
||||||
diffRemoved diffMode = '-'
|
|
||||||
diffInserted diffMode = '+'
|
|
||||||
)
|
|
||||||
|
|
||||||
type typeMode int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// emitType always prints the type.
|
|
||||||
emitType typeMode = iota
|
|
||||||
// elideType never prints the type.
|
|
||||||
elideType
|
|
||||||
// autoType prints the type only for composite kinds
|
|
||||||
// (i.e., structs, slices, arrays, and maps).
|
|
||||||
autoType
|
|
||||||
)
|
|
||||||
|
|
||||||
type formatOptions struct {
|
|
||||||
// DiffMode controls the output mode of FormatDiff.
|
|
||||||
//
|
|
||||||
// If diffUnknown, then produce a diff of the x and y values.
|
|
||||||
// If diffIdentical, then emit values as if they were equal.
|
|
||||||
// If diffRemoved, then only emit x values (ignoring y values).
|
|
||||||
// If diffInserted, then only emit y values (ignoring x values).
|
|
||||||
DiffMode diffMode
|
|
||||||
|
|
||||||
// TypeMode controls whether to print the type for the current node.
|
|
||||||
//
|
|
||||||
// As a general rule of thumb, we always print the type of the next node
|
|
||||||
// after an interface, and always elide the type of the next node after
|
|
||||||
// a slice or map node.
|
|
||||||
TypeMode typeMode
|
|
||||||
|
|
||||||
// formatValueOptions are options specific to printing reflect.Values.
|
|
||||||
formatValueOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
|
||||||
opts.DiffMode = d
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
|
||||||
opts.TypeMode = t
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
|
||||||
opts.VerbosityLevel = level
|
|
||||||
opts.LimitVerbosity = true
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
func (opts formatOptions) verbosity() uint {
|
|
||||||
switch {
|
|
||||||
case opts.VerbosityLevel < 0:
|
|
||||||
return 0
|
|
||||||
case opts.VerbosityLevel > 16:
|
|
||||||
return 16 // some reasonable maximum to avoid shift overflow
|
|
||||||
default:
|
|
||||||
return uint(opts.VerbosityLevel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxVerbosityPreset = 3
|
|
||||||
|
|
||||||
// verbosityPreset modifies the verbosity settings given an index
|
|
||||||
// between 0 and maxVerbosityPreset, inclusive.
|
|
||||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
|
||||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
|
||||||
if i > 0 {
|
|
||||||
opts.AvoidStringer = true
|
|
||||||
}
|
|
||||||
if i >= maxVerbosityPreset {
|
|
||||||
opts.PrintAddresses = true
|
|
||||||
opts.QualifiedNames = true
|
|
||||||
}
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
|
||||||
// is a textual representation of the differences detected in the former.
|
|
||||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
|
||||||
if opts.DiffMode == diffIdentical {
|
|
||||||
opts = opts.WithVerbosity(1)
|
|
||||||
} else {
|
|
||||||
opts = opts.WithVerbosity(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether we have specialized formatting for this node.
|
|
||||||
// This is not necessary, but helpful for producing more readable outputs.
|
|
||||||
if opts.CanFormatDiffSlice(v) {
|
|
||||||
return opts.FormatDiffSlice(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
var parentKind reflect.Kind
|
|
||||||
if v.parent != nil && v.parent.TransformerName == "" {
|
|
||||||
parentKind = v.parent.Type.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
|
||||||
if v.MaxDepth == 0 {
|
|
||||||
switch opts.DiffMode {
|
|
||||||
case diffUnknown, diffIdentical:
|
|
||||||
// Format Equal.
|
|
||||||
if v.NumDiff == 0 {
|
|
||||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
|
||||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
|
||||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
|
||||||
return textEllipsis
|
|
||||||
} else if outx.Len() < outy.Len() {
|
|
||||||
return outx
|
|
||||||
} else {
|
|
||||||
return outy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format unequal.
|
|
||||||
assert(opts.DiffMode == diffUnknown)
|
|
||||||
var list textList
|
|
||||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
|
||||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
|
||||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
|
||||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
|
||||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
|
||||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
|
||||||
}
|
|
||||||
if outx != nil {
|
|
||||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
|
||||||
}
|
|
||||||
if outy != nil {
|
|
||||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
|
||||||
}
|
|
||||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
|
||||||
case diffRemoved:
|
|
||||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
|
||||||
case diffInserted:
|
|
||||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
|
||||||
default:
|
|
||||||
panic("invalid diff mode")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register slice element to support cycle detection.
|
|
||||||
if parentKind == reflect.Slice {
|
|
||||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
|
||||||
defer ptrs.Pop()
|
|
||||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Descend into the child value node.
|
|
||||||
if v.TransformerName != "" {
|
|
||||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
|
||||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
|
||||||
return opts.FormatType(v.Type, out)
|
|
||||||
} else {
|
|
||||||
switch k := v.Type.Kind(); k {
|
|
||||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
|
||||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
|
||||||
out = opts.FormatType(v.Type, out)
|
|
||||||
case reflect.Map:
|
|
||||||
// Register map to support cycle detection.
|
|
||||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
|
||||||
defer ptrs.Pop()
|
|
||||||
|
|
||||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
|
||||||
out = wrapTrunkReferences(ptrRefs, out)
|
|
||||||
out = opts.FormatType(v.Type, out)
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Register pointer to support cycle detection.
|
|
||||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
|
||||||
defer ptrs.Pop()
|
|
||||||
|
|
||||||
out = opts.FormatDiff(v.Value, ptrs)
|
|
||||||
out = wrapTrunkReferences(ptrRefs, out)
|
|
||||||
out = &textWrap{Prefix: "&", Value: out}
|
|
||||||
case reflect.Interface:
|
|
||||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("%v cannot have children", k))
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
|
||||||
// Derive record name based on the data structure kind.
|
|
||||||
var name string
|
|
||||||
var formatKey func(reflect.Value) string
|
|
||||||
switch k {
|
|
||||||
case reflect.Struct:
|
|
||||||
name = "field"
|
|
||||||
opts = opts.WithTypeMode(autoType)
|
|
||||||
formatKey = func(v reflect.Value) string { return v.String() }
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
name = "element"
|
|
||||||
opts = opts.WithTypeMode(elideType)
|
|
||||||
formatKey = func(reflect.Value) string { return "" }
|
|
||||||
case reflect.Map:
|
|
||||||
name = "entry"
|
|
||||||
opts = opts.WithTypeMode(elideType)
|
|
||||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
|
||||||
}
|
|
||||||
|
|
||||||
maxLen := -1
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
if opts.DiffMode == diffIdentical {
|
|
||||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
|
||||||
} else {
|
|
||||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
|
||||||
}
|
|
||||||
opts.VerbosityLevel--
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle unification.
|
|
||||||
switch opts.DiffMode {
|
|
||||||
case diffIdentical, diffRemoved, diffInserted:
|
|
||||||
var list textList
|
|
||||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
|
||||||
for _, r := range recs {
|
|
||||||
if len(list) == maxLen {
|
|
||||||
deferredEllipsis = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Elide struct fields that are zero value.
|
|
||||||
if k == reflect.Struct {
|
|
||||||
var isZero bool
|
|
||||||
switch opts.DiffMode {
|
|
||||||
case diffIdentical:
|
|
||||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
|
||||||
case diffRemoved:
|
|
||||||
isZero = value.IsZero(r.Value.ValueX)
|
|
||||||
case diffInserted:
|
|
||||||
isZero = value.IsZero(r.Value.ValueY)
|
|
||||||
}
|
|
||||||
if isZero {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Elide ignored nodes.
|
|
||||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
|
||||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
|
||||||
if !deferredEllipsis {
|
|
||||||
list.AppendEllipsis(diffStats{})
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if deferredEllipsis {
|
|
||||||
list.AppendEllipsis(diffStats{})
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
case diffUnknown:
|
|
||||||
default:
|
|
||||||
panic("invalid diff mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle differencing.
|
|
||||||
var numDiffs int
|
|
||||||
var list textList
|
|
||||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
|
||||||
groups := coalesceAdjacentRecords(name, recs)
|
|
||||||
maxGroup := diffStats{Name: name}
|
|
||||||
for i, ds := range groups {
|
|
||||||
if maxLen >= 0 && numDiffs >= maxLen {
|
|
||||||
maxGroup = maxGroup.Append(ds)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle equal records.
|
|
||||||
if ds.NumDiff() == 0 {
|
|
||||||
// Compute the number of leading and trailing records to print.
|
|
||||||
var numLo, numHi int
|
|
||||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
|
||||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
|
||||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
numLo++
|
|
||||||
}
|
|
||||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
|
||||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
numHi++
|
|
||||||
}
|
|
||||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
|
||||||
numHi++ // Avoid pointless coalescing of a single equal record
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the equal values.
|
|
||||||
for _, r := range recs[:numLo] {
|
|
||||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
}
|
|
||||||
if numEqual > numLo+numHi {
|
|
||||||
ds.NumIdentical -= numLo + numHi
|
|
||||||
list.AppendEllipsis(ds)
|
|
||||||
for len(keys) < len(list) {
|
|
||||||
keys = append(keys, reflect.Value{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
|
||||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
}
|
|
||||||
recs = recs[numEqual:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle unequal records.
|
|
||||||
for _, r := range recs[:ds.NumDiff()] {
|
|
||||||
switch {
|
|
||||||
case opts.CanFormatDiffSlice(r.Value):
|
|
||||||
out := opts.FormatDiffSlice(r.Value)
|
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
|
||||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
|
||||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
|
||||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
|
||||||
opts2 := verbosityPreset(opts, i)
|
|
||||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
|
||||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
|
||||||
}
|
|
||||||
if outx != nil {
|
|
||||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
}
|
|
||||||
if outy != nil {
|
|
||||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
out := opts.FormatDiff(r.Value, ptrs)
|
|
||||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
|
||||||
keys = append(keys, r.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
recs = recs[ds.NumDiff():]
|
|
||||||
numDiffs += ds.NumDiff()
|
|
||||||
}
|
|
||||||
if maxGroup.IsZero() {
|
|
||||||
assert(len(recs) == 0)
|
|
||||||
} else {
|
|
||||||
list.AppendEllipsis(maxGroup)
|
|
||||||
for len(keys) < len(list) {
|
|
||||||
keys = append(keys, reflect.Value{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(len(list) == len(keys))
|
|
||||||
|
|
||||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
|
||||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
|
||||||
if k == reflect.Map {
|
|
||||||
var ambiguous bool
|
|
||||||
seenKeys := map[string]reflect.Value{}
|
|
||||||
for i, currKey := range keys {
|
|
||||||
if currKey.IsValid() {
|
|
||||||
strKey := list[i].Key
|
|
||||||
prevKey, seen := seenKeys[strKey]
|
|
||||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
|
||||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
|
||||||
if ambiguous {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
seenKeys[strKey] = currKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ambiguous {
|
|
||||||
for i, k := range keys {
|
|
||||||
if k.IsValid() {
|
|
||||||
list[i].Key = formatMapKey(k, true, ptrs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
|
||||||
// adjacent equal, or unequal counts.
|
|
||||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
|
||||||
var prevCase int // Arbitrary index into which case last occurred
|
|
||||||
lastStats := func(i int) *diffStats {
|
|
||||||
if prevCase != i {
|
|
||||||
groups = append(groups, diffStats{Name: name})
|
|
||||||
prevCase = i
|
|
||||||
}
|
|
||||||
return &groups[len(groups)-1]
|
|
||||||
}
|
|
||||||
for _, r := range recs {
|
|
||||||
switch rv := r.Value; {
|
|
||||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
|
||||||
lastStats(1).NumIgnored++
|
|
||||||
case rv.NumDiff == 0:
|
|
||||||
lastStats(1).NumIdentical++
|
|
||||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
|
||||||
lastStats(2).NumRemoved++
|
|
||||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
|
||||||
lastStats(2).NumInserted++
|
|
||||||
default:
|
|
||||||
lastStats(2).NumModified++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return groups
|
|
||||||
}
|
|
|
@ -1,264 +0,0 @@
|
||||||
// Copyright 2020, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
pointerDelimPrefix = "⟪"
|
|
||||||
pointerDelimSuffix = "⟫"
|
|
||||||
)
|
|
||||||
|
|
||||||
// formatPointer prints the address of the pointer.
|
|
||||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
|
||||||
v := p.Uintptr()
|
|
||||||
if flags.Deterministic {
|
|
||||||
v = 0xdeadf00f // Only used for stable testing purposes
|
|
||||||
}
|
|
||||||
if withDelims {
|
|
||||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
|
||||||
}
|
|
||||||
return formatHex(uint64(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointerReferences is a stack of pointers visited so far.
|
|
||||||
type pointerReferences [][2]value.Pointer
|
|
||||||
|
|
||||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
|
||||||
if deref && vx.IsValid() {
|
|
||||||
vx = vx.Addr()
|
|
||||||
}
|
|
||||||
if deref && vy.IsValid() {
|
|
||||||
vy = vy.Addr()
|
|
||||||
}
|
|
||||||
switch d {
|
|
||||||
case diffUnknown, diffIdentical:
|
|
||||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
|
||||||
case diffRemoved:
|
|
||||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
|
||||||
case diffInserted:
|
|
||||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
|
||||||
}
|
|
||||||
*ps = append(*ps, pp)
|
|
||||||
return pp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
|
||||||
p = value.PointerOf(v)
|
|
||||||
for _, pp := range *ps {
|
|
||||||
if p == pp[0] || p == pp[1] {
|
|
||||||
return p, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
|
||||||
return p, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *pointerReferences) Pop() {
|
|
||||||
*ps = (*ps)[:len(*ps)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
|
||||||
// represents the value for either pointer in a pair of references.
|
|
||||||
type trunkReferences struct{ pp [2]value.Pointer }
|
|
||||||
|
|
||||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
|
||||||
// represents the value for the given pointer reference.
|
|
||||||
type trunkReference struct{ p value.Pointer }
|
|
||||||
|
|
||||||
// leafReference is metadata for a textNode indicating that the value is
|
|
||||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
|
||||||
type leafReference struct{ p value.Pointer }
|
|
||||||
|
|
||||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
|
||||||
switch {
|
|
||||||
case pp[0].IsNil():
|
|
||||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
|
||||||
case pp[1].IsNil():
|
|
||||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
|
||||||
case pp[0] == pp[1]:
|
|
||||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
|
||||||
default:
|
|
||||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
|
||||||
var prefix string
|
|
||||||
if printAddress {
|
|
||||||
prefix = formatPointer(p, true)
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
|
||||||
}
|
|
||||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
|
||||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
|
||||||
var prefix string
|
|
||||||
if printAddress {
|
|
||||||
prefix = formatPointer(p, true)
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
|
||||||
// metadata and resolves each against the corresponding trunk references.
|
|
||||||
// Since pointer addresses in memory are not particularly readable to the user,
|
|
||||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
|
||||||
func resolveReferences(s textNode) {
|
|
||||||
var walkNodes func(textNode, func(textNode))
|
|
||||||
walkNodes = func(s textNode, f func(textNode)) {
|
|
||||||
f(s)
|
|
||||||
switch s := s.(type) {
|
|
||||||
case *textWrap:
|
|
||||||
walkNodes(s.Value, f)
|
|
||||||
case textList:
|
|
||||||
for _, r := range s {
|
|
||||||
walkNodes(r.Value, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect all trunks and leaves with reference metadata.
|
|
||||||
var trunks, leaves []*textWrap
|
|
||||||
walkNodes(s, func(s textNode) {
|
|
||||||
if s, ok := s.(*textWrap); ok {
|
|
||||||
switch s.Metadata.(type) {
|
|
||||||
case leafReference:
|
|
||||||
leaves = append(leaves, s)
|
|
||||||
case trunkReference, trunkReferences:
|
|
||||||
trunks = append(trunks, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// No leaf references to resolve.
|
|
||||||
if len(leaves) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect the set of all leaf references to resolve.
|
|
||||||
leafPtrs := make(map[value.Pointer]bool)
|
|
||||||
for _, leaf := range leaves {
|
|
||||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect the set of trunk pointers that are always paired together.
|
|
||||||
// This allows us to assign a single ID to both pointers for brevity.
|
|
||||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
|
||||||
// then the pair is broken.
|
|
||||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
|
||||||
unpair := func(p value.Pointer) {
|
|
||||||
if !pairedTrunkPtrs[p].IsNil() {
|
|
||||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
|
||||||
}
|
|
||||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
|
||||||
}
|
|
||||||
for _, trunk := range trunks {
|
|
||||||
switch p := trunk.Metadata.(type) {
|
|
||||||
case trunkReference:
|
|
||||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
|
||||||
case trunkReferences:
|
|
||||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
|
||||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
|
||||||
switch {
|
|
||||||
case !ok0 && !ok1:
|
|
||||||
// Register the newly seen pair.
|
|
||||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
|
||||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
|
||||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
|
||||||
// Exact pair already seen; do nothing.
|
|
||||||
default:
|
|
||||||
// Pair conflicts with some other pair; break all pairs.
|
|
||||||
unpair(p.pp[0])
|
|
||||||
unpair(p.pp[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
|
||||||
// and print the IDs for each trunk that matches those pointers.
|
|
||||||
var nextID uint
|
|
||||||
ptrIDs := make(map[value.Pointer]uint)
|
|
||||||
newID := func() uint {
|
|
||||||
id := nextID
|
|
||||||
nextID++
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
for _, trunk := range trunks {
|
|
||||||
switch p := trunk.Metadata.(type) {
|
|
||||||
case trunkReference:
|
|
||||||
if print := leafPtrs[p.p]; print {
|
|
||||||
id, ok := ptrIDs[p.p]
|
|
||||||
if !ok {
|
|
||||||
id = newID()
|
|
||||||
ptrIDs[p.p] = id
|
|
||||||
}
|
|
||||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
|
||||||
}
|
|
||||||
case trunkReferences:
|
|
||||||
print0 := leafPtrs[p.pp[0]]
|
|
||||||
print1 := leafPtrs[p.pp[1]]
|
|
||||||
if print0 || print1 {
|
|
||||||
id0, ok0 := ptrIDs[p.pp[0]]
|
|
||||||
id1, ok1 := ptrIDs[p.pp[1]]
|
|
||||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
|
||||||
if isPair {
|
|
||||||
var id uint
|
|
||||||
assert(ok0 == ok1) // must be seen together or not at all
|
|
||||||
if ok0 {
|
|
||||||
assert(id0 == id1) // must have the same ID
|
|
||||||
id = id0
|
|
||||||
} else {
|
|
||||||
id = newID()
|
|
||||||
ptrIDs[p.pp[0]] = id
|
|
||||||
ptrIDs[p.pp[1]] = id
|
|
||||||
}
|
|
||||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
|
||||||
} else {
|
|
||||||
if print0 && !ok0 {
|
|
||||||
id0 = newID()
|
|
||||||
ptrIDs[p.pp[0]] = id0
|
|
||||||
}
|
|
||||||
if print1 && !ok1 {
|
|
||||||
id1 = newID()
|
|
||||||
ptrIDs[p.pp[1]] = id1
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case print0 && print1:
|
|
||||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
|
||||||
case print0:
|
|
||||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
|
||||||
case print1:
|
|
||||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update all leaf references with the unique identifier.
|
|
||||||
for _, leaf := range leaves {
|
|
||||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
|
||||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatReference(id uint) string {
|
|
||||||
return fmt.Sprintf("ref#%d", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateReferencePrefix(prefix, ref string) string {
|
|
||||||
if prefix == "" {
|
|
||||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
|
||||||
}
|
|
||||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
|
||||||
return pointerDelimPrefix + ref + ": " + suffix
|
|
||||||
}
|
|
|
@ -1,400 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
|
||||||
|
|
||||||
type formatValueOptions struct {
|
|
||||||
// AvoidStringer controls whether to avoid calling custom stringer
|
|
||||||
// methods like error.Error or fmt.Stringer.String.
|
|
||||||
AvoidStringer bool
|
|
||||||
|
|
||||||
// PrintAddresses controls whether to print the address of all pointers,
|
|
||||||
// slice elements, and maps.
|
|
||||||
PrintAddresses bool
|
|
||||||
|
|
||||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
|
||||||
// (including the full package path as opposed to just the package name).
|
|
||||||
QualifiedNames bool
|
|
||||||
|
|
||||||
// VerbosityLevel controls the amount of output to produce.
|
|
||||||
// A higher value produces more output. A value of zero or lower produces
|
|
||||||
// no output (represented using an ellipsis).
|
|
||||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
|
||||||
VerbosityLevel int
|
|
||||||
|
|
||||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
|
||||||
LimitVerbosity bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatType prints the type as if it were wrapping s.
|
|
||||||
// This may return s as-is depending on the current type and TypeMode mode.
|
|
||||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
|
||||||
// Check whether to emit the type or not.
|
|
||||||
switch opts.TypeMode {
|
|
||||||
case autoType:
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
|
||||||
if s.Equal(textNil) {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
if opts.DiffMode == diffIdentical {
|
|
||||||
return s // elide type for identical nodes
|
|
||||||
}
|
|
||||||
case elideType:
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the type label, applying special handling for unnamed types.
|
|
||||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
|
||||||
if t.Name() == "" {
|
|
||||||
// According to Go grammar, certain type literals contain symbols that
|
|
||||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
|
||||||
typeName = "(" + typeName + ")"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
|
||||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
|
||||||
// It handles unwrapping one level of pointer-reference nodes.
|
|
||||||
func wrapParens(s textNode) textNode {
|
|
||||||
var refNode *textWrap
|
|
||||||
if s2, ok := s.(*textWrap); ok {
|
|
||||||
// Unwrap a single pointer reference node.
|
|
||||||
switch s2.Metadata.(type) {
|
|
||||||
case leafReference, trunkReference, trunkReferences:
|
|
||||||
refNode = s2
|
|
||||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
|
||||||
s2 = s3
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Already has delimiters that make parenthesis unnecessary.
|
|
||||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
|
||||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
|
||||||
if hasParens || hasBraces {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if refNode != nil {
|
|
||||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
|
||||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
|
||||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
|
||||||
if !v.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t := v.Type()
|
|
||||||
|
|
||||||
// Check slice element for cycles.
|
|
||||||
if parentKind == reflect.Slice {
|
|
||||||
ptrRef, visited := ptrs.Push(v.Addr())
|
|
||||||
if visited {
|
|
||||||
return makeLeafReference(ptrRef, false)
|
|
||||||
}
|
|
||||||
defer ptrs.Pop()
|
|
||||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether there is an Error or String method to call.
|
|
||||||
if !opts.AvoidStringer && v.CanInterface() {
|
|
||||||
// Avoid calling Error or String methods on nil receivers since many
|
|
||||||
// implementations crash when doing so.
|
|
||||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
|
||||||
var prefix, strVal string
|
|
||||||
func() {
|
|
||||||
// Swallow and ignore any panics from String or Error.
|
|
||||||
defer func() { recover() }()
|
|
||||||
switch v := v.Interface().(type) {
|
|
||||||
case error:
|
|
||||||
strVal = v.Error()
|
|
||||||
prefix = "e"
|
|
||||||
case fmt.Stringer:
|
|
||||||
strVal = v.String()
|
|
||||||
prefix = "s"
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if prefix != "" {
|
|
||||||
return opts.formatString(prefix, strVal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether to explicitly wrap the result with the type.
|
|
||||||
var skipType bool
|
|
||||||
defer func() {
|
|
||||||
if !skipType {
|
|
||||||
out = opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return textLine(fmt.Sprint(v.Bool()))
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return textLine(fmt.Sprint(v.Int()))
|
|
||||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return textLine(fmt.Sprint(v.Uint()))
|
|
||||||
case reflect.Uint8:
|
|
||||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
|
||||||
return textLine(formatHex(v.Uint()))
|
|
||||||
}
|
|
||||||
return textLine(fmt.Sprint(v.Uint()))
|
|
||||||
case reflect.Uintptr:
|
|
||||||
return textLine(formatHex(v.Uint()))
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return textLine(fmt.Sprint(v.Float()))
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return textLine(fmt.Sprint(v.Complex()))
|
|
||||||
case reflect.String:
|
|
||||||
return opts.formatString("", v.String())
|
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
|
||||||
return textLine(formatPointer(value.PointerOf(v), true))
|
|
||||||
case reflect.Struct:
|
|
||||||
var list textList
|
|
||||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
|
||||||
maxLen := v.NumField()
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
|
||||||
opts.VerbosityLevel--
|
|
||||||
}
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
vv := v.Field(i)
|
|
||||||
if value.IsZero(vv) {
|
|
||||||
continue // Elide fields with zero values
|
|
||||||
}
|
|
||||||
if len(list) == maxLen {
|
|
||||||
list.AppendEllipsis(diffStats{})
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sf := t.Field(i)
|
|
||||||
if supportExporters && !isExported(sf.Name) {
|
|
||||||
vv = retrieveUnexportedField(v, sf, true)
|
|
||||||
}
|
|
||||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
|
||||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
|
||||||
}
|
|
||||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.IsNil() {
|
|
||||||
return textNil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether this is a []byte of text data.
|
|
||||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
|
||||||
b := v.Bytes()
|
|
||||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
|
|
||||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
|
||||||
out = opts.formatString("", string(b))
|
|
||||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fallthrough
|
|
||||||
case reflect.Array:
|
|
||||||
maxLen := v.Len()
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
|
||||||
opts.VerbosityLevel--
|
|
||||||
}
|
|
||||||
var list textList
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if len(list) == maxLen {
|
|
||||||
list.AppendEllipsis(diffStats{})
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
|
||||||
list = append(list, textRecord{Value: s})
|
|
||||||
}
|
|
||||||
|
|
||||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
|
||||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
|
||||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
case reflect.Map:
|
|
||||||
if v.IsNil() {
|
|
||||||
return textNil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check pointer for cycles.
|
|
||||||
ptrRef, visited := ptrs.Push(v)
|
|
||||||
if visited {
|
|
||||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
|
||||||
}
|
|
||||||
defer ptrs.Pop()
|
|
||||||
|
|
||||||
maxLen := v.Len()
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
|
||||||
opts.VerbosityLevel--
|
|
||||||
}
|
|
||||||
var list textList
|
|
||||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
|
||||||
if len(list) == maxLen {
|
|
||||||
list.AppendEllipsis(diffStats{})
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sk := formatMapKey(k, false, ptrs)
|
|
||||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
|
||||||
list = append(list, textRecord{Key: sk, Value: sv})
|
|
||||||
}
|
|
||||||
|
|
||||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
|
||||||
return out
|
|
||||||
case reflect.Ptr:
|
|
||||||
if v.IsNil() {
|
|
||||||
return textNil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check pointer for cycles.
|
|
||||||
ptrRef, visited := ptrs.Push(v)
|
|
||||||
if visited {
|
|
||||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
|
||||||
return &textWrap{Prefix: "&", Value: out}
|
|
||||||
}
|
|
||||||
defer ptrs.Pop()
|
|
||||||
|
|
||||||
skipType = true // Let the underlying value print the type instead
|
|
||||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
|
||||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
|
||||||
out = &textWrap{Prefix: "&", Value: out}
|
|
||||||
return out
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.IsNil() {
|
|
||||||
return textNil
|
|
||||||
}
|
|
||||||
// Interfaces accept different concrete types,
|
|
||||||
// so configure the underlying value to explicitly print the type.
|
|
||||||
skipType = true // Print the concrete type instead
|
|
||||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
|
||||||
maxLen := len(s)
|
|
||||||
maxLines := strings.Count(s, "\n") + 1
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
|
||||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
|
||||||
}
|
|
||||||
|
|
||||||
// For multiline strings, use the triple-quote syntax,
|
|
||||||
// but only use it when printing removed or inserted nodes since
|
|
||||||
// we only want the extra verbosity for those cases.
|
|
||||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
|
||||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
|
||||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
|
||||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
|
||||||
isPrintable := func(r rune) bool {
|
|
||||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
|
||||||
}
|
|
||||||
line := lines[i]
|
|
||||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
|
||||||
}
|
|
||||||
if isTripleQuoted {
|
|
||||||
var list textList
|
|
||||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
|
||||||
for i, line := range lines {
|
|
||||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
|
||||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
|
||||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
|
||||||
break
|
|
||||||
}
|
|
||||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
|
||||||
}
|
|
||||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
|
||||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the string as a single-line quoted string.
|
|
||||||
if len(s) > maxLen+len(textEllipsis) {
|
|
||||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
|
||||||
}
|
|
||||||
return textLine(prefix + formatString(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatMapKey formats v as if it were a map key.
|
|
||||||
// The result is guaranteed to be a single line.
|
|
||||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
|
||||||
var opts formatOptions
|
|
||||||
opts.DiffMode = diffIdentical
|
|
||||||
opts.TypeMode = elideType
|
|
||||||
opts.PrintAddresses = disambiguate
|
|
||||||
opts.AvoidStringer = disambiguate
|
|
||||||
opts.QualifiedNames = disambiguate
|
|
||||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
|
||||||
return strings.TrimSpace(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
|
||||||
func formatString(s string) string {
|
|
||||||
// Use quoted string if it the same length as a raw string literal.
|
|
||||||
// Otherwise, attempt to use the raw string form.
|
|
||||||
qs := strconv.Quote(s)
|
|
||||||
if len(qs) == 1+len(s)+1 {
|
|
||||||
return qs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disallow newlines to ensure output is a single line.
|
|
||||||
// Only allow printable runes for readability purposes.
|
|
||||||
rawInvalid := func(r rune) bool {
|
|
||||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
|
||||||
}
|
|
||||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
|
||||||
return "`" + s + "`"
|
|
||||||
}
|
|
||||||
return qs
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
|
||||||
func formatHex(u uint64) string {
|
|
||||||
var f string
|
|
||||||
switch {
|
|
||||||
case u <= 0xff:
|
|
||||||
f = "0x%02x"
|
|
||||||
case u <= 0xffff:
|
|
||||||
f = "0x%04x"
|
|
||||||
case u <= 0xffffff:
|
|
||||||
f = "0x%06x"
|
|
||||||
case u <= 0xffffffff:
|
|
||||||
f = "0x%08x"
|
|
||||||
case u <= 0xffffffffff:
|
|
||||||
f = "0x%010x"
|
|
||||||
case u <= 0xffffffffffff:
|
|
||||||
f = "0x%012x"
|
|
||||||
case u <= 0xffffffffffffff:
|
|
||||||
f = "0x%014x"
|
|
||||||
case u <= 0xffffffffffffffff:
|
|
||||||
f = "0x%016x"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(f, u)
|
|
||||||
}
|
|
|
@ -1,448 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/diff"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
|
||||||
// that are slices of primitive kinds or strings.
|
|
||||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
|
||||||
switch {
|
|
||||||
case opts.DiffMode != diffUnknown:
|
|
||||||
return false // Must be formatting in diff mode
|
|
||||||
case v.NumDiff == 0:
|
|
||||||
return false // No differences detected
|
|
||||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
|
||||||
return false // Both values must be valid
|
|
||||||
case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0):
|
|
||||||
return false // Both slice values have to be non-empty
|
|
||||||
case v.NumIgnored > 0:
|
|
||||||
return false // Some ignore option was used
|
|
||||||
case v.NumTransformed > 0:
|
|
||||||
return false // Some transform option was used
|
|
||||||
case v.NumCompared > 1:
|
|
||||||
return false // More than one comparison was used
|
|
||||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
|
||||||
// The need for cmp to check applicability of options on every element
|
|
||||||
// in a slice is a significant performance detriment for large []byte.
|
|
||||||
// The workaround is to specify Comparer(bytes.Equal),
|
|
||||||
// which enables cmp to compare []byte more efficiently.
|
|
||||||
// If they differ, we still want to provide batched diffing.
|
|
||||||
// The logic disallows named types since they tend to have their own
|
|
||||||
// String method, with nicer formatting than what this provides.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch t := v.Type; t.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
// Only slices of primitive types have specialized handling.
|
|
||||||
switch t.Elem().Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
|
||||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a sufficient number of elements already differ,
|
|
||||||
// use specialized formatting even if length requirement is not met.
|
|
||||||
if v.NumDiff > v.NumSame {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use specialized string diffing for longer slices or strings.
|
|
||||||
const minLength = 64
|
|
||||||
return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
|
||||||
// This provides custom-tailored logic to make printing of differences in
|
|
||||||
// textual strings and slices of primitive kinds more readable.
|
|
||||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
|
||||||
assert(opts.DiffMode == diffUnknown)
|
|
||||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
|
||||||
|
|
||||||
// Auto-detect the type of the data.
|
|
||||||
var isLinedText, isText, isBinary bool
|
|
||||||
var sx, sy string
|
|
||||||
switch {
|
|
||||||
case t.Kind() == reflect.String:
|
|
||||||
sx, sy = vx.String(), vy.String()
|
|
||||||
isText = true // Initial estimate, verify later
|
|
||||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
|
||||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
|
||||||
isBinary = true // Initial estimate, verify later
|
|
||||||
case t.Kind() == reflect.Array:
|
|
||||||
// Arrays need to be addressable for slice operations to work.
|
|
||||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
|
||||||
vx2.Set(vx)
|
|
||||||
vy2.Set(vy)
|
|
||||||
vx, vy = vx2, vy2
|
|
||||||
}
|
|
||||||
if isText || isBinary {
|
|
||||||
var numLines, lastLineIdx, maxLineLen int
|
|
||||||
isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
|
|
||||||
for i, r := range sx + sy {
|
|
||||||
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
|
||||||
isBinary = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if r == '\n' {
|
|
||||||
if maxLineLen < i-lastLineIdx {
|
|
||||||
maxLineLen = i - lastLineIdx
|
|
||||||
}
|
|
||||||
lastLineIdx = i + 1
|
|
||||||
numLines++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isText = !isBinary
|
|
||||||
isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the string into printable records.
|
|
||||||
var list textList
|
|
||||||
var delim string
|
|
||||||
switch {
|
|
||||||
// If the text appears to be multi-lined text,
|
|
||||||
// then perform differencing across individual lines.
|
|
||||||
case isLinedText:
|
|
||||||
ssx := strings.Split(sx, "\n")
|
|
||||||
ssy := strings.Split(sy, "\n")
|
|
||||||
list = opts.formatDiffSlice(
|
|
||||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
|
||||||
func(v reflect.Value, d diffMode) textRecord {
|
|
||||||
s := formatString(v.Index(0).String())
|
|
||||||
return textRecord{Diff: d, Value: textLine(s)}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
delim = "\n"
|
|
||||||
|
|
||||||
// If possible, use a custom triple-quote (""") syntax for printing
|
|
||||||
// differences in a string literal. This format is more readable,
|
|
||||||
// but has edge-cases where differences are visually indistinguishable.
|
|
||||||
// This format is avoided under the following conditions:
|
|
||||||
// • A line starts with `"""`
|
|
||||||
// • A line starts with "..."
|
|
||||||
// • A line contains non-printable characters
|
|
||||||
// • Adjacent different lines differ only by whitespace
|
|
||||||
//
|
|
||||||
// For example:
|
|
||||||
// """
|
|
||||||
// ... // 3 identical lines
|
|
||||||
// foo
|
|
||||||
// bar
|
|
||||||
// - baz
|
|
||||||
// + BAZ
|
|
||||||
// """
|
|
||||||
isTripleQuoted := true
|
|
||||||
prevRemoveLines := map[string]bool{}
|
|
||||||
prevInsertLines := map[string]bool{}
|
|
||||||
var list2 textList
|
|
||||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
|
||||||
for _, r := range list {
|
|
||||||
if !r.Value.Equal(textEllipsis) {
|
|
||||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
|
||||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
|
||||||
normLine := strings.Map(func(r rune) rune {
|
|
||||||
if unicode.IsSpace(r) {
|
|
||||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}, line)
|
|
||||||
isPrintable := func(r rune) bool {
|
|
||||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
|
||||||
}
|
|
||||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
|
||||||
switch r.Diff {
|
|
||||||
case diffRemoved:
|
|
||||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
|
||||||
prevRemoveLines[normLine] = true
|
|
||||||
case diffInserted:
|
|
||||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
|
||||||
prevInsertLines[normLine] = true
|
|
||||||
}
|
|
||||||
if !isTripleQuoted {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.Value = textLine(line)
|
|
||||||
r.ElideComma = true
|
|
||||||
}
|
|
||||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
|
||||||
prevRemoveLines = map[string]bool{}
|
|
||||||
prevInsertLines = map[string]bool{}
|
|
||||||
}
|
|
||||||
list2 = append(list2, r)
|
|
||||||
}
|
|
||||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
|
||||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
|
||||||
}
|
|
||||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
|
||||||
if isTripleQuoted {
|
|
||||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if t != reflect.TypeOf(string("")) {
|
|
||||||
out = opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
// Always emit type for slices since the triple-quote syntax
|
|
||||||
// looks like a string (not a slice).
|
|
||||||
opts = opts.WithTypeMode(emitType)
|
|
||||||
out = opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the text appears to be single-lined text,
|
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
|
||||||
// The output is printed as quoted strings.
|
|
||||||
case isText:
|
|
||||||
list = opts.formatDiffSlice(
|
|
||||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
|
||||||
func(v reflect.Value, d diffMode) textRecord {
|
|
||||||
s := formatString(v.String())
|
|
||||||
return textRecord{Diff: d, Value: textLine(s)}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
delim = ""
|
|
||||||
|
|
||||||
// If the text appears to be binary data,
|
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
|
||||||
// The output is inspired by hexdump.
|
|
||||||
case isBinary:
|
|
||||||
list = opts.formatDiffSlice(
|
|
||||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
|
||||||
func(v reflect.Value, d diffMode) textRecord {
|
|
||||||
var ss []string
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
|
||||||
}
|
|
||||||
s := strings.Join(ss, ", ")
|
|
||||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
|
||||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// For all other slices of primitive types,
|
|
||||||
// then perform differencing in approximately fixed-sized chunks.
|
|
||||||
// The size of each chunk depends on the width of the element kind.
|
|
||||||
default:
|
|
||||||
var chunkSize int
|
|
||||||
if t.Elem().Kind() == reflect.Bool {
|
|
||||||
chunkSize = 16
|
|
||||||
} else {
|
|
||||||
switch t.Elem().Bits() {
|
|
||||||
case 8:
|
|
||||||
chunkSize = 16
|
|
||||||
case 16:
|
|
||||||
chunkSize = 12
|
|
||||||
case 32:
|
|
||||||
chunkSize = 8
|
|
||||||
default:
|
|
||||||
chunkSize = 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
list = opts.formatDiffSlice(
|
|
||||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
|
||||||
func(v reflect.Value, d diffMode) textRecord {
|
|
||||||
var ss []string
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
switch t.Elem().Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
|
||||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
|
||||||
case reflect.Uint8, reflect.Uintptr:
|
|
||||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
|
||||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s := strings.Join(ss, ", ")
|
|
||||||
return textRecord{Diff: d, Value: textLine(s)}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap the output with appropriate type information.
|
|
||||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
|
||||||
if !isText {
|
|
||||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
|
||||||
// Emit the type for extra clarity (e.g. "string{...}").
|
|
||||||
if t.Kind() == reflect.String {
|
|
||||||
opts = opts.WithTypeMode(emitType)
|
|
||||||
}
|
|
||||||
return opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
|
||||||
if t != reflect.TypeOf(string("")) {
|
|
||||||
out = opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
|
||||||
if t != reflect.TypeOf([]byte(nil)) {
|
|
||||||
out = opts.FormatType(t, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatASCII formats s as an ASCII string.
|
|
||||||
// This is useful for printing binary strings in a semi-legible way.
|
|
||||||
func formatASCII(s string) string {
|
|
||||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if ' ' <= s[i] && s[i] <= '~' {
|
|
||||||
b[i] = s[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts formatOptions) formatDiffSlice(
|
|
||||||
vx, vy reflect.Value, chunkSize int, name string,
|
|
||||||
makeRec func(reflect.Value, diffMode) textRecord,
|
|
||||||
) (list textList) {
|
|
||||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
|
|
||||||
return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
|
|
||||||
})
|
|
||||||
|
|
||||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
|
||||||
n0 := v.Len()
|
|
||||||
for v.Len() > 0 {
|
|
||||||
n := chunkSize
|
|
||||||
if n > v.Len() {
|
|
||||||
n = v.Len()
|
|
||||||
}
|
|
||||||
list = append(list, makeRec(v.Slice(0, n), d))
|
|
||||||
v = v.Slice(n, v.Len())
|
|
||||||
}
|
|
||||||
return n0 - v.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
var numDiffs int
|
|
||||||
maxLen := -1
|
|
||||||
if opts.LimitVerbosity {
|
|
||||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
|
||||||
opts.VerbosityLevel--
|
|
||||||
}
|
|
||||||
|
|
||||||
groups := coalesceAdjacentEdits(name, es)
|
|
||||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
|
||||||
maxGroup := diffStats{Name: name}
|
|
||||||
for i, ds := range groups {
|
|
||||||
if maxLen >= 0 && numDiffs >= maxLen {
|
|
||||||
maxGroup = maxGroup.Append(ds)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print equal.
|
|
||||||
if ds.NumDiff() == 0 {
|
|
||||||
// Compute the number of leading and trailing equal bytes to print.
|
|
||||||
var numLo, numHi int
|
|
||||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
|
||||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
|
||||||
numLo++
|
|
||||||
}
|
|
||||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
|
||||||
numHi++
|
|
||||||
}
|
|
||||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
|
||||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print the equal bytes.
|
|
||||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
|
||||||
if numEqual > numLo+numHi {
|
|
||||||
ds.NumIdentical -= numLo + numHi
|
|
||||||
list.AppendEllipsis(ds)
|
|
||||||
}
|
|
||||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
|
||||||
vx = vx.Slice(numEqual, vx.Len())
|
|
||||||
vy = vy.Slice(numEqual, vy.Len())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print unequal.
|
|
||||||
len0 := len(list)
|
|
||||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
|
||||||
vx = vx.Slice(nx, vx.Len())
|
|
||||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
|
||||||
vy = vy.Slice(ny, vy.Len())
|
|
||||||
numDiffs += len(list) - len0
|
|
||||||
}
|
|
||||||
if maxGroup.IsZero() {
|
|
||||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
|
||||||
} else {
|
|
||||||
list.AppendEllipsis(maxGroup)
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
|
||||||
// equal or unequal counts.
|
|
||||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
|
||||||
var prevCase int // Arbitrary index into which case last occurred
|
|
||||||
lastStats := func(i int) *diffStats {
|
|
||||||
if prevCase != i {
|
|
||||||
groups = append(groups, diffStats{Name: name})
|
|
||||||
prevCase = i
|
|
||||||
}
|
|
||||||
return &groups[len(groups)-1]
|
|
||||||
}
|
|
||||||
for _, e := range es {
|
|
||||||
switch e {
|
|
||||||
case diff.Identity:
|
|
||||||
lastStats(1).NumIdentical++
|
|
||||||
case diff.UniqueX:
|
|
||||||
lastStats(2).NumRemoved++
|
|
||||||
case diff.UniqueY:
|
|
||||||
lastStats(2).NumInserted++
|
|
||||||
case diff.Modified:
|
|
||||||
lastStats(2).NumModified++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return groups
|
|
||||||
}
|
|
||||||
|
|
||||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
|
||||||
// equal groups into adjacent unequal groups that currently result in a
|
|
||||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
|
||||||
// out high-frequency changes within the windowSize.
|
|
||||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
|
||||||
groups, groupsOrig := groups[:0], groups
|
|
||||||
for i, ds := range groupsOrig {
|
|
||||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
|
||||||
prev := &groups[len(groups)-2] // Unequal group
|
|
||||||
curr := &groups[len(groups)-1] // Equal group
|
|
||||||
next := &groupsOrig[i] // Unequal group
|
|
||||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
|
||||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
|
||||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
|
||||||
*prev = prev.Append(*curr).Append(*next)
|
|
||||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
groups = append(groups, ds)
|
|
||||||
}
|
|
||||||
return groups
|
|
||||||
}
|
|
|
@ -1,431 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/flags"
|
|
||||||
)
|
|
||||||
|
|
||||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
|
||||||
|
|
||||||
const maxColumnLength = 80
|
|
||||||
|
|
||||||
type indentMode int
|
|
||||||
|
|
||||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
|
||||||
// The output of Diff is documented as being unstable to provide future
|
|
||||||
// flexibility in changing the output for more humanly readable reports.
|
|
||||||
// This logic intentionally introduces instability to the exact output
|
|
||||||
// so that users can detect accidental reliance on stability early on,
|
|
||||||
// rather than much later when an actual change to the format occurs.
|
|
||||||
if flags.Deterministic || randBool {
|
|
||||||
// Use regular spaces (U+0020).
|
|
||||||
switch d {
|
|
||||||
case diffUnknown, diffIdentical:
|
|
||||||
b = append(b, " "...)
|
|
||||||
case diffRemoved:
|
|
||||||
b = append(b, "- "...)
|
|
||||||
case diffInserted:
|
|
||||||
b = append(b, "+ "...)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use non-breaking spaces (U+00a0).
|
|
||||||
switch d {
|
|
||||||
case diffUnknown, diffIdentical:
|
|
||||||
b = append(b, " "...)
|
|
||||||
case diffRemoved:
|
|
||||||
b = append(b, "- "...)
|
|
||||||
case diffInserted:
|
|
||||||
b = append(b, "+ "...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return repeatCount(n).appendChar(b, '\t')
|
|
||||||
}
|
|
||||||
|
|
||||||
type repeatCount int
|
|
||||||
|
|
||||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
|
||||||
for ; n > 0; n-- {
|
|
||||||
b = append(b, c)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// textNode is a simplified tree-based representation of structured text.
|
|
||||||
// Possible node types are textWrap, textList, or textLine.
|
|
||||||
type textNode interface {
|
|
||||||
// Len reports the length in bytes of a single-line version of the tree.
|
|
||||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
|
||||||
Len() int
|
|
||||||
// Equal reports whether the two trees are structurally identical.
|
|
||||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
|
||||||
Equal(textNode) bool
|
|
||||||
// String returns the string representation of the text tree.
|
|
||||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
|
||||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
|
||||||
String() string
|
|
||||||
|
|
||||||
// formatCompactTo formats the contents of the tree as a single-line string
|
|
||||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
|
||||||
// fields are ignored.
|
|
||||||
//
|
|
||||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
|
||||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
|
||||||
// node. Since the top-level node cannot replace itself, this also returns
|
|
||||||
// the current node itself.
|
|
||||||
//
|
|
||||||
// This does not mutate the receiver.
|
|
||||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
|
||||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
|
||||||
// to the provided buffer. In order for column alignment to operate well,
|
|
||||||
// formatCompactTo must be called before calling formatExpandedTo.
|
|
||||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
|
||||||
// to the underlying node.
|
|
||||||
type textWrap struct {
|
|
||||||
Prefix string // e.g., "bytes.Buffer{"
|
|
||||||
Value textNode // textWrap | textList | textLine
|
|
||||||
Suffix string // e.g., "}"
|
|
||||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *textWrap) Len() int {
|
|
||||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
|
||||||
}
|
|
||||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
|
||||||
if s2, ok := s2.(*textWrap); ok {
|
|
||||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (s *textWrap) String() string {
|
|
||||||
var d diffMode
|
|
||||||
var n indentMode
|
|
||||||
_, s2 := s.formatCompactTo(nil, d)
|
|
||||||
b := n.appendIndent(nil, d) // Leading indent
|
|
||||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
|
||||||
b = append(b, '\n') // Trailing newline
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
|
||||||
n0 := len(b) // Original buffer length
|
|
||||||
b = append(b, s.Prefix...)
|
|
||||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
|
||||||
b = append(b, s.Suffix...)
|
|
||||||
if _, ok := s.Value.(textLine); ok {
|
|
||||||
return b, textLine(b[n0:])
|
|
||||||
}
|
|
||||||
return b, s
|
|
||||||
}
|
|
||||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
|
||||||
b = append(b, s.Prefix...)
|
|
||||||
b = s.Value.formatExpandedTo(b, d, n)
|
|
||||||
b = append(b, s.Suffix...)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
|
||||||
// The list may be formatted as multi-lines or single-line at the discretion
|
|
||||||
// of the textList.formatCompactTo method.
|
|
||||||
type textList []textRecord
|
|
||||||
type textRecord struct {
|
|
||||||
Diff diffMode // e.g., 0 or '-' or '+'
|
|
||||||
Key string // e.g., "MyField"
|
|
||||||
Value textNode // textWrap | textLine
|
|
||||||
ElideComma bool // avoid trailing comma
|
|
||||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
|
||||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
|
||||||
// previous diffStats.
|
|
||||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
|
||||||
hasStats := !ds.IsZero()
|
|
||||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
|
||||||
if hasStats {
|
|
||||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
|
||||||
} else {
|
|
||||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if hasStats {
|
|
||||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s textList) Len() (n int) {
|
|
||||||
for i, r := range s {
|
|
||||||
n += len(r.Key)
|
|
||||||
if r.Key != "" {
|
|
||||||
n += len(": ")
|
|
||||||
}
|
|
||||||
n += r.Value.Len()
|
|
||||||
if i < len(s)-1 {
|
|
||||||
n += len(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s1 textList) Equal(s2 textNode) bool {
|
|
||||||
if s2, ok := s2.(textList); ok {
|
|
||||||
if len(s1) != len(s2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := range s1 {
|
|
||||||
r1, r2 := s1[i], s2[i]
|
|
||||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s textList) String() string {
|
|
||||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
|
||||||
s = append(textList(nil), s...) // Avoid mutating original
|
|
||||||
|
|
||||||
// Determine whether we can collapse this list as a single line.
|
|
||||||
n0 := len(b) // Original buffer length
|
|
||||||
var multiLine bool
|
|
||||||
for i, r := range s {
|
|
||||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
|
||||||
multiLine = true
|
|
||||||
}
|
|
||||||
b = append(b, r.Key...)
|
|
||||||
if r.Key != "" {
|
|
||||||
b = append(b, ": "...)
|
|
||||||
}
|
|
||||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
|
||||||
if _, ok := s[i].Value.(textLine); !ok {
|
|
||||||
multiLine = true
|
|
||||||
}
|
|
||||||
if r.Comment != nil {
|
|
||||||
multiLine = true
|
|
||||||
}
|
|
||||||
if i < len(s)-1 {
|
|
||||||
b = append(b, ", "...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Force multi-lined output when printing a removed/inserted node that
|
|
||||||
// is sufficiently long.
|
|
||||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
|
||||||
multiLine = true
|
|
||||||
}
|
|
||||||
if !multiLine {
|
|
||||||
return b, textLine(b[n0:])
|
|
||||||
}
|
|
||||||
return b, s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
|
||||||
alignKeyLens := s.alignLens(
|
|
||||||
func(r textRecord) bool {
|
|
||||||
_, isLine := r.Value.(textLine)
|
|
||||||
return r.Key == "" || !isLine
|
|
||||||
},
|
|
||||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
|
||||||
)
|
|
||||||
alignValueLens := s.alignLens(
|
|
||||||
func(r textRecord) bool {
|
|
||||||
_, isLine := r.Value.(textLine)
|
|
||||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
|
||||||
},
|
|
||||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
|
||||||
)
|
|
||||||
|
|
||||||
// Format lists of simple lists in a batched form.
|
|
||||||
// If the list is sequence of only textLine values,
|
|
||||||
// then batch multiple values on a single line.
|
|
||||||
var isSimple bool
|
|
||||||
for _, r := range s {
|
|
||||||
_, isLine := r.Value.(textLine)
|
|
||||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
|
||||||
if !isSimple {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isSimple {
|
|
||||||
n++
|
|
||||||
var batch []byte
|
|
||||||
emitBatch := func() {
|
|
||||||
if len(batch) > 0 {
|
|
||||||
b = n.appendIndent(append(b, '\n'), d)
|
|
||||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
|
||||||
batch = batch[:0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r := range s {
|
|
||||||
line := r.Value.(textLine)
|
|
||||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
|
||||||
emitBatch()
|
|
||||||
}
|
|
||||||
batch = append(batch, line...)
|
|
||||||
batch = append(batch, ", "...)
|
|
||||||
}
|
|
||||||
emitBatch()
|
|
||||||
n--
|
|
||||||
return n.appendIndent(append(b, '\n'), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the list as a multi-lined output.
|
|
||||||
n++
|
|
||||||
for i, r := range s {
|
|
||||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
|
||||||
if r.Key != "" {
|
|
||||||
b = append(b, r.Key+": "...)
|
|
||||||
}
|
|
||||||
b = alignKeyLens[i].appendChar(b, ' ')
|
|
||||||
|
|
||||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
|
||||||
if !r.ElideComma {
|
|
||||||
b = append(b, ',')
|
|
||||||
}
|
|
||||||
b = alignValueLens[i].appendChar(b, ' ')
|
|
||||||
|
|
||||||
if r.Comment != nil {
|
|
||||||
b = append(b, " // "+r.Comment.String()...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n--
|
|
||||||
|
|
||||||
return n.appendIndent(append(b, '\n'), d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s textList) alignLens(
|
|
||||||
skipFunc func(textRecord) bool,
|
|
||||||
lenFunc func(textRecord) int,
|
|
||||||
) []repeatCount {
|
|
||||||
var startIdx, endIdx, maxLen int
|
|
||||||
lens := make([]repeatCount, len(s))
|
|
||||||
for i, r := range s {
|
|
||||||
if skipFunc(r) {
|
|
||||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
|
||||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
|
||||||
}
|
|
||||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
|
||||||
} else {
|
|
||||||
if maxLen < lenFunc(r) {
|
|
||||||
maxLen = lenFunc(r)
|
|
||||||
}
|
|
||||||
endIdx = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
|
||||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
|
||||||
}
|
|
||||||
return lens
|
|
||||||
}
|
|
||||||
|
|
||||||
// textLine is a single-line segment of text and is always a leaf node
|
|
||||||
// in the textNode tree.
|
|
||||||
type textLine []byte
|
|
||||||
|
|
||||||
var (
|
|
||||||
textNil = textLine("nil")
|
|
||||||
textEllipsis = textLine("...")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s textLine) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
func (s1 textLine) Equal(s2 textNode) bool {
|
|
||||||
if s2, ok := s2.(textLine); ok {
|
|
||||||
return bytes.Equal([]byte(s1), []byte(s2))
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (s textLine) String() string {
|
|
||||||
return string(s)
|
|
||||||
}
|
|
||||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
|
||||||
return append(b, s...), s
|
|
||||||
}
|
|
||||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
|
||||||
return append(b, s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
type diffStats struct {
|
|
||||||
Name string
|
|
||||||
NumIgnored int
|
|
||||||
NumIdentical int
|
|
||||||
NumRemoved int
|
|
||||||
NumInserted int
|
|
||||||
NumModified int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s diffStats) IsZero() bool {
|
|
||||||
s.Name = ""
|
|
||||||
return s == diffStats{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s diffStats) NumDiff() int {
|
|
||||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s diffStats) Append(ds diffStats) diffStats {
|
|
||||||
assert(s.Name == ds.Name)
|
|
||||||
s.NumIgnored += ds.NumIgnored
|
|
||||||
s.NumIdentical += ds.NumIdentical
|
|
||||||
s.NumRemoved += ds.NumRemoved
|
|
||||||
s.NumInserted += ds.NumInserted
|
|
||||||
s.NumModified += ds.NumModified
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// String prints a humanly-readable summary of coalesced records.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
|
||||||
func (s diffStats) String() string {
|
|
||||||
var ss []string
|
|
||||||
var sum int
|
|
||||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
|
||||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
|
||||||
for i, n := range counts {
|
|
||||||
if n > 0 {
|
|
||||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
|
||||||
}
|
|
||||||
sum += n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
|
||||||
name := s.Name
|
|
||||||
if sum > 1 {
|
|
||||||
name += "s"
|
|
||||||
if strings.HasSuffix(name, "ys") {
|
|
||||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format the list according to English grammar (with Oxford comma).
|
|
||||||
switch n := len(ss); n {
|
|
||||||
case 0:
|
|
||||||
return ""
|
|
||||||
case 1, 2:
|
|
||||||
return strings.Join(ss, " and ") + " " + name
|
|
||||||
default:
|
|
||||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type commentString string
|
|
||||||
|
|
||||||
func (s commentString) String() string { return string(s) }
|
|
|
@ -1,121 +0,0 @@
|
||||||
// Copyright 2019, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE.md file.
|
|
||||||
|
|
||||||
package cmp
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
// valueNode represents a single node within a report, which is a
|
|
||||||
// structured representation of the value tree, containing information
|
|
||||||
// regarding which nodes are equal or not.
|
|
||||||
type valueNode struct {
|
|
||||||
parent *valueNode
|
|
||||||
|
|
||||||
Type reflect.Type
|
|
||||||
ValueX reflect.Value
|
|
||||||
ValueY reflect.Value
|
|
||||||
|
|
||||||
// NumSame is the number of leaf nodes that are equal.
|
|
||||||
// All descendants are equal only if NumDiff is 0.
|
|
||||||
NumSame int
|
|
||||||
// NumDiff is the number of leaf nodes that are not equal.
|
|
||||||
NumDiff int
|
|
||||||
// NumIgnored is the number of leaf nodes that are ignored.
|
|
||||||
NumIgnored int
|
|
||||||
// NumCompared is the number of leaf nodes that were compared
|
|
||||||
// using an Equal method or Comparer function.
|
|
||||||
NumCompared int
|
|
||||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
|
||||||
NumTransformed int
|
|
||||||
// NumChildren is the number of transitive descendants of this node.
|
|
||||||
// This counts from zero; thus, leaf nodes have no descendants.
|
|
||||||
NumChildren int
|
|
||||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
|
||||||
// thus, leaf nodes have a depth of zero.
|
|
||||||
MaxDepth int
|
|
||||||
|
|
||||||
// Records is a list of struct fields, slice elements, or map entries.
|
|
||||||
Records []reportRecord // If populated, implies Value is not populated
|
|
||||||
|
|
||||||
// Value is the result of a transformation, pointer indirect, of
|
|
||||||
// type assertion.
|
|
||||||
Value *valueNode // If populated, implies Records is not populated
|
|
||||||
|
|
||||||
// TransformerName is the name of the transformer.
|
|
||||||
TransformerName string // If non-empty, implies Value is populated
|
|
||||||
}
|
|
||||||
type reportRecord struct {
|
|
||||||
Key reflect.Value // Invalid for slice element
|
|
||||||
Value *valueNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
|
||||||
vx, vy := ps.Values()
|
|
||||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
|
||||||
switch s := ps.(type) {
|
|
||||||
case StructField:
|
|
||||||
assert(parent.Value == nil)
|
|
||||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
|
||||||
case SliceIndex:
|
|
||||||
assert(parent.Value == nil)
|
|
||||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
|
||||||
case MapIndex:
|
|
||||||
assert(parent.Value == nil)
|
|
||||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
|
||||||
case Indirect:
|
|
||||||
assert(parent.Value == nil && parent.Records == nil)
|
|
||||||
parent.Value = child
|
|
||||||
case TypeAssertion:
|
|
||||||
assert(parent.Value == nil && parent.Records == nil)
|
|
||||||
parent.Value = child
|
|
||||||
case Transform:
|
|
||||||
assert(parent.Value == nil && parent.Records == nil)
|
|
||||||
parent.Value = child
|
|
||||||
parent.TransformerName = s.Name()
|
|
||||||
parent.NumTransformed++
|
|
||||||
default:
|
|
||||||
assert(parent == nil) // Must be the root step
|
|
||||||
}
|
|
||||||
return child
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *valueNode) Report(rs Result) {
|
|
||||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
|
||||||
|
|
||||||
if rs.ByIgnore() {
|
|
||||||
r.NumIgnored++
|
|
||||||
} else {
|
|
||||||
if rs.Equal() {
|
|
||||||
r.NumSame++
|
|
||||||
} else {
|
|
||||||
r.NumDiff++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
|
||||||
|
|
||||||
if rs.ByMethod() {
|
|
||||||
r.NumCompared++
|
|
||||||
}
|
|
||||||
if rs.ByFunc() {
|
|
||||||
r.NumCompared++
|
|
||||||
}
|
|
||||||
assert(r.NumCompared <= 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
|
||||||
if child.parent == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
parent = child.parent
|
|
||||||
parent.NumSame += child.NumSame
|
|
||||||
parent.NumDiff += child.NumDiff
|
|
||||||
parent.NumIgnored += child.NumIgnored
|
|
||||||
parent.NumCompared += child.NumCompared
|
|
||||||
parent.NumTransformed += child.NumTransformed
|
|
||||||
parent.NumChildren += child.NumChildren + 1
|
|
||||||
if parent.MaxDepth < child.MaxDepth+1 {
|
|
||||||
parent.MaxDepth = child.MaxDepth + 1
|
|
||||||
}
|
|
||||||
return parent
|
|
||||||
}
|
|
64
vendor/github.com/influxdata/telegraf/plugins/inputs/github/README.md
generated
vendored
Normal file
64
vendor/github.com/influxdata/telegraf/plugins/inputs/github/README.md
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
# GitHub Input Plugin
|
||||||
|
|
||||||
|
Gather repository information from [GitHub][] hosted repositories.
|
||||||
|
|
||||||
|
**Note:** Telegraf also contains the [webhook][] input which can be used as an
|
||||||
|
alternative method for collecting repository information.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.github]]
|
||||||
|
## List of repositories to monitor
|
||||||
|
repositories = [
|
||||||
|
"influxdata/telegraf",
|
||||||
|
"influxdata/influxdb"
|
||||||
|
]
|
||||||
|
|
||||||
|
## Github API access token. Unauthenticated requests are limited to 60 per hour.
|
||||||
|
# access_token = ""
|
||||||
|
|
||||||
|
## Github API enterprise url. Github Enterprise accounts must specify their base url.
|
||||||
|
# enterprise_base_url = ""
|
||||||
|
|
||||||
|
## Timeout for HTTP requests.
|
||||||
|
# http_timeout = "5s"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Metrics
|
||||||
|
|
||||||
|
- github_repository
|
||||||
|
- tags:
|
||||||
|
- name - The repository name
|
||||||
|
- owner - The owner of the repository
|
||||||
|
- language - The primary language of the repository
|
||||||
|
- license - The license set for the repository
|
||||||
|
- fields:
|
||||||
|
- forks (int)
|
||||||
|
- open_issues (int)
|
||||||
|
- networks (int)
|
||||||
|
- size (int)
|
||||||
|
- subscribers (int)
|
||||||
|
- stars (int)
|
||||||
|
- watchers (int)
|
||||||
|
|
||||||
|
When the [internal][] input is enabled:
|
||||||
|
|
||||||
|
+ internal_github
|
||||||
|
- tags:
|
||||||
|
- access_token - An obfuscated reference to the configured access token or "Unauthenticated"
|
||||||
|
- fields:
|
||||||
|
- limit - How many requests you are limited to (per hour)
|
||||||
|
- remaining - How many requests you have remaining (per hour)
|
||||||
|
- blocks - How many requests have been blocked due to rate limit
|
||||||
|
|
||||||
|
### Example Output
|
||||||
|
|
||||||
|
```
|
||||||
|
github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000
|
||||||
|
internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
[GitHub]: https://www.github.com
|
||||||
|
[internal]: /plugins/inputs/internal
|
||||||
|
[webhook]: /plugins/inputs/webhooks/github
|
200
vendor/github.com/influxdata/telegraf/plugins/inputs/github/github.go
generated
vendored
Normal file
200
vendor/github.com/influxdata/telegraf/plugins/inputs/github/github.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
||||||
|
package github
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-github/v32/github"
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"github.com/influxdata/telegraf/selfstat"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GitHub - plugin main structure
|
||||||
|
type GitHub struct {
|
||||||
|
Repositories []string `toml:"repositories"`
|
||||||
|
AccessToken string `toml:"access_token"`
|
||||||
|
EnterpriseBaseURL string `toml:"enterprise_base_url"`
|
||||||
|
HTTPTimeout internal.Duration `toml:"http_timeout"`
|
||||||
|
githubClient *github.Client
|
||||||
|
|
||||||
|
obfuscatedToken string
|
||||||
|
|
||||||
|
RateLimit selfstat.Stat
|
||||||
|
RateLimitErrors selfstat.Stat
|
||||||
|
RateRemaining selfstat.Stat
|
||||||
|
}
|
||||||
|
|
||||||
|
const sampleConfig = `
|
||||||
|
## List of repositories to monitor.
|
||||||
|
repositories = [
|
||||||
|
"influxdata/telegraf",
|
||||||
|
"influxdata/influxdb"
|
||||||
|
]
|
||||||
|
|
||||||
|
## Github API access token. Unauthenticated requests are limited to 60 per hour.
|
||||||
|
# access_token = ""
|
||||||
|
|
||||||
|
## Github API enterprise url. Github Enterprise accounts must specify their base url.
|
||||||
|
# enterprise_base_url = ""
|
||||||
|
|
||||||
|
## Timeout for HTTP requests.
|
||||||
|
# http_timeout = "5s"
|
||||||
|
`
|
||||||
|
|
||||||
|
// SampleConfig returns sample configuration for this plugin.
|
||||||
|
func (g *GitHub) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// Description returns the plugin description.
|
||||||
|
func (g *GitHub) Description() string {
|
||||||
|
return "Gather repository information from GitHub hosted repositories."
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create GitHub Client
|
||||||
|
func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) {
|
||||||
|
httpClient := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
},
|
||||||
|
Timeout: g.HTTPTimeout.Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
g.obfuscatedToken = "Unauthenticated"
|
||||||
|
|
||||||
|
if g.AccessToken != "" {
|
||||||
|
tokenSource := oauth2.StaticTokenSource(
|
||||||
|
&oauth2.Token{AccessToken: g.AccessToken},
|
||||||
|
)
|
||||||
|
oauthClient := oauth2.NewClient(ctx, tokenSource)
|
||||||
|
_ = context.WithValue(ctx, oauth2.HTTPClient, oauthClient)
|
||||||
|
|
||||||
|
g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
|
||||||
|
|
||||||
|
return g.newGithubClient(oauthClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
return g.newGithubClient(httpClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) {
|
||||||
|
if g.EnterpriseBaseURL != "" {
|
||||||
|
return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient)
|
||||||
|
}
|
||||||
|
return github.NewClient(httpClient), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather GitHub Metrics
|
||||||
|
func (g *GitHub) Gather(acc telegraf.Accumulator) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if g.githubClient == nil {
|
||||||
|
githubClient, err := g.createGitHubClient(ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
g.githubClient = githubClient
|
||||||
|
|
||||||
|
tokenTags := map[string]string{
|
||||||
|
"access_token": g.obfuscatedToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags)
|
||||||
|
g.RateLimit = selfstat.Register("github", "rate_limit_limit", tokenTags)
|
||||||
|
g.RateRemaining = selfstat.Register("github", "rate_limit_remaining", tokenTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(g.Repositories))
|
||||||
|
|
||||||
|
for _, repository := range g.Repositories {
|
||||||
|
go func(repositoryName string, acc telegraf.Accumulator) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
owner, repository, err := splitRepositoryName(repositoryName)
|
||||||
|
if err != nil {
|
||||||
|
acc.AddError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository)
|
||||||
|
|
||||||
|
if _, ok := err.(*github.RateLimitError); ok {
|
||||||
|
g.RateLimitErrors.Incr(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
acc.AddError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
g.RateLimit.Set(int64(response.Rate.Limit))
|
||||||
|
g.RateRemaining.Set(int64(response.Rate.Remaining))
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
tags := getTags(repositoryInfo)
|
||||||
|
fields := getFields(repositoryInfo)
|
||||||
|
|
||||||
|
acc.AddFields("github_repository", fields, tags, now)
|
||||||
|
}(repository, acc)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitRepositoryName(repositoryName string) (string, string, error) {
|
||||||
|
splits := strings.SplitN(repositoryName, "/", 2)
|
||||||
|
|
||||||
|
if len(splits) != 2 {
|
||||||
|
return "", "", fmt.Errorf("%v is not of format 'owner/repository'", repositoryName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return splits[0], splits[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLicense(rI *github.Repository) string {
|
||||||
|
if licenseName := rI.GetLicense().GetName(); licenseName != "" {
|
||||||
|
return licenseName
|
||||||
|
}
|
||||||
|
|
||||||
|
return "None"
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTags(repositoryInfo *github.Repository) map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"owner": repositoryInfo.GetOwner().GetLogin(),
|
||||||
|
"name": repositoryInfo.GetName(),
|
||||||
|
"language": repositoryInfo.GetLanguage(),
|
||||||
|
"license": getLicense(repositoryInfo),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFields(repositoryInfo *github.Repository) map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"stars": repositoryInfo.GetStargazersCount(),
|
||||||
|
"subscribers": repositoryInfo.GetSubscribersCount(),
|
||||||
|
"watchers": repositoryInfo.GetWatchersCount(),
|
||||||
|
"networks": repositoryInfo.GetNetworkCount(),
|
||||||
|
"forks": repositoryInfo.GetForksCount(),
|
||||||
|
"open_issues": repositoryInfo.GetOpenIssuesCount(),
|
||||||
|
"size": repositoryInfo.GetSize(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("github", func() telegraf.Input {
|
||||||
|
return &GitHub{
|
||||||
|
HTTPTimeout: internal.Duration{Duration: time.Second * 5},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
275
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/README.md
generated
vendored
Normal file
275
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/README.md
generated
vendored
Normal file
|
@ -0,0 +1,275 @@
|
||||||
|
# MongoDB Input Plugin
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.mongodb]]
|
||||||
|
## An array of URLs of the form:
|
||||||
|
## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||||
|
## For example:
|
||||||
|
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||||
|
## mongodb://10.10.3.33:18832,
|
||||||
|
servers = ["mongodb://127.0.0.1:27017"]
|
||||||
|
|
||||||
|
## When true, collect cluster status.
|
||||||
|
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||||
|
## may have an impact on performance.
|
||||||
|
# gather_cluster_status = true
|
||||||
|
|
||||||
|
## When true, collect per database stats
|
||||||
|
# gather_perdb_stats = false
|
||||||
|
|
||||||
|
## When true, collect per collection stats
|
||||||
|
# gather_col_stats = false
|
||||||
|
|
||||||
|
## List of db where collections stats are collected
|
||||||
|
## If empty, all db are concerned
|
||||||
|
# col_stats_dbs = ["local"]
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Permissions:
|
||||||
|
|
||||||
|
If your MongoDB instance has access control enabled you will need to connect
|
||||||
|
as a user with sufficient rights.
|
||||||
|
|
||||||
|
With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In
|
||||||
|
version 3.2 you may also need these additional permissions:
|
||||||
|
```
|
||||||
|
> db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}])
|
||||||
|
```
|
||||||
|
|
||||||
|
If the user is missing required privileges you may see an error in the
|
||||||
|
Telegraf logs similar to:
|
||||||
|
```
|
||||||
|
Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 }
|
||||||
|
```
|
||||||
|
|
||||||
|
Some permission related errors are logged at debug level, you can check these
|
||||||
|
messages by setting `debug = true` in the agent section of the configuration or
|
||||||
|
by running Telegraf with the `--debug` argument.
|
||||||
|
|
||||||
|
### Metrics:
|
||||||
|
|
||||||
|
- mongodb
|
||||||
|
- tags:
|
||||||
|
- hostname
|
||||||
|
- node_type
|
||||||
|
- rs_name
|
||||||
|
- fields:
|
||||||
|
- active_reads (integer)
|
||||||
|
- active_writes (integer)
|
||||||
|
- aggregate_command_failed (integer)
|
||||||
|
- aggregate_command_total (integer)
|
||||||
|
- assert_msg (integer)
|
||||||
|
- assert_regular (integer)
|
||||||
|
- assert_rollovers (integer)
|
||||||
|
- assert_user (integer)
|
||||||
|
- assert_warning (integer)
|
||||||
|
- available_reads (integer)
|
||||||
|
- available_writes (integer)
|
||||||
|
- commands (integer)
|
||||||
|
- connections_available (integer)
|
||||||
|
- connections_current (integer)
|
||||||
|
- connections_total_created (integer)
|
||||||
|
- count_command_failed (integer)
|
||||||
|
- count_command_total (integer)
|
||||||
|
- cursor_no_timeout_count (integer)
|
||||||
|
- cursor_pinned_count (integer)
|
||||||
|
- cursor_timed_out_count (integer)
|
||||||
|
- cursor_total_count (integer)
|
||||||
|
- delete_command_failed (integer)
|
||||||
|
- delete_command_total (integer)
|
||||||
|
- deletes (integer)
|
||||||
|
- distinct_command_failed (integer)
|
||||||
|
- distinct_command_total (integer)
|
||||||
|
- document_deleted (integer)
|
||||||
|
- document_inserted (integer)
|
||||||
|
- document_returned (integer)
|
||||||
|
- document_updated (integer)
|
||||||
|
- find_and_modify_command_failed (integer)
|
||||||
|
- find_and_modify_command_total (integer)
|
||||||
|
- find_command_failed (integer)
|
||||||
|
- find_command_total (integer)
|
||||||
|
- flushes (integer)
|
||||||
|
- flushes_total_time_ns (integer)
|
||||||
|
- get_more_command_failed (integer)
|
||||||
|
- get_more_command_total (integer)
|
||||||
|
- getmores (integer)
|
||||||
|
- insert_command_failed (integer)
|
||||||
|
- insert_command_total (integer)
|
||||||
|
- inserts (integer)
|
||||||
|
- jumbo_chunks (integer)
|
||||||
|
- latency_commands_count (integer)
|
||||||
|
- latency_commands (integer)
|
||||||
|
- latency_reads_count (integer)
|
||||||
|
- latency_reads (integer)
|
||||||
|
- latency_writes_count (integer)
|
||||||
|
- latency_writes (integer)
|
||||||
|
- member_status (string)
|
||||||
|
- net_in_bytes_count (integer)
|
||||||
|
- net_out_bytes_count (integer)
|
||||||
|
- open_connections (integer)
|
||||||
|
- operation_scan_and_order (integer)
|
||||||
|
- operation_write_conflicts (integer)
|
||||||
|
- page_faults (integer)
|
||||||
|
- percent_cache_dirty (float)
|
||||||
|
- percent_cache_used (float)
|
||||||
|
- queries (integer)
|
||||||
|
- queued_reads (integer)
|
||||||
|
- queued_writes (integer)
|
||||||
|
- repl_apply_batches_num (integer)
|
||||||
|
- repl_apply_batches_total_millis (integer)
|
||||||
|
- repl_apply_ops (integer)
|
||||||
|
- repl_buffer_count (integer)
|
||||||
|
- repl_buffer_size_bytes (integer)
|
||||||
|
- repl_commands (integer)
|
||||||
|
- repl_deletes (integer)
|
||||||
|
- repl_executor_pool_in_progress_count (integer)
|
||||||
|
- repl_executor_queues_network_in_progress (integer)
|
||||||
|
- repl_executor_queues_sleepers (integer)
|
||||||
|
- repl_executor_unsignaled_events (integer)
|
||||||
|
- repl_getmores (integer)
|
||||||
|
- repl_inserts (integer)
|
||||||
|
- repl_lag (integer)
|
||||||
|
- repl_network_bytes (integer)
|
||||||
|
- repl_network_getmores_num (integer)
|
||||||
|
- repl_network_getmores_total_millis (integer)
|
||||||
|
- repl_network_ops (integer)
|
||||||
|
- repl_queries (integer)
|
||||||
|
- repl_updates (integer)
|
||||||
|
- repl_oplog_window_sec (integer)
|
||||||
|
- repl_state (integer)
|
||||||
|
- resident_megabytes (integer)
|
||||||
|
- state (string)
|
||||||
|
- storage_freelist_search_bucket_exhausted (integer)
|
||||||
|
- storage_freelist_search_requests (integer)
|
||||||
|
- storage_freelist_search_scanned (integer)
|
||||||
|
- tcmalloc_central_cache_free_bytes (integer)
|
||||||
|
- tcmalloc_current_allocated_bytes (integer)
|
||||||
|
- tcmalloc_current_total_thread_cache_bytes (integer)
|
||||||
|
- tcmalloc_heap_size (integer)
|
||||||
|
- tcmalloc_max_total_thread_cache_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_commit_count (integer)
|
||||||
|
- tcmalloc_pageheap_committed_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_decommit_count (integer)
|
||||||
|
- tcmalloc_pageheap_free_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_reserve_count (integer)
|
||||||
|
- tcmalloc_pageheap_scavenge_count (integer)
|
||||||
|
- tcmalloc_pageheap_total_commit_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_total_decommit_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_total_reserve_bytes (integer)
|
||||||
|
- tcmalloc_pageheap_unmapped_bytes (integer)
|
||||||
|
- tcmalloc_spinlock_total_delay_ns (integer)
|
||||||
|
- tcmalloc_thread_cache_free_bytes (integer)
|
||||||
|
- tcmalloc_total_free_bytes (integer)
|
||||||
|
- tcmalloc_transfer_cache_free_bytes (integer)
|
||||||
|
- total_available (integer)
|
||||||
|
- total_created (integer)
|
||||||
|
- total_docs_scanned (integer)
|
||||||
|
- total_in_use (integer)
|
||||||
|
- total_keys_scanned (integer)
|
||||||
|
- total_refreshing (integer)
|
||||||
|
- total_tickets_reads (integer)
|
||||||
|
- total_tickets_writes (integer)
|
||||||
|
- ttl_deletes (integer)
|
||||||
|
- ttl_passes (integer)
|
||||||
|
- update_command_failed (integer)
|
||||||
|
- update_command_total (integer)
|
||||||
|
- updates (integer)
|
||||||
|
- uptime_ns (integer)
|
||||||
|
- version (string)
|
||||||
|
- vsize_megabytes (integer)
|
||||||
|
- wtcache_app_threads_page_read_count (integer)
|
||||||
|
- wtcache_app_threads_page_read_time (integer)
|
||||||
|
- wtcache_app_threads_page_write_count (integer)
|
||||||
|
- wtcache_bytes_read_into (integer)
|
||||||
|
- wtcache_bytes_written_from (integer)
|
||||||
|
- wtcache_pages_read_into (integer)
|
||||||
|
- wtcache_pages_requested_from (integer)
|
||||||
|
- wtcache_current_bytes (integer)
|
||||||
|
- wtcache_max_bytes_configured (integer)
|
||||||
|
- wtcache_internal_pages_evicted (integer)
|
||||||
|
- wtcache_modified_pages_evicted (integer)
|
||||||
|
- wtcache_unmodified_pages_evicted (integer)
|
||||||
|
- wtcache_pages_evicted_by_app_thread (integer)
|
||||||
|
- wtcache_pages_queued_for_eviction (integer)
|
||||||
|
- wtcache_server_evicting_pages (integer)
|
||||||
|
- wtcache_tracked_dirty_bytes (integer)
|
||||||
|
- wtcache_worker_thread_evictingpages (integer)
|
||||||
|
- commands_per_sec (integer, deprecated in 1.10; use `commands`))
|
||||||
|
- cursor_no_timeout (integer, opened/sec, deprecated in 1.10; use `cursor_no_timeout_count`))
|
||||||
|
- cursor_pinned (integer, opened/sec, deprecated in 1.10; use `cursor_pinned_count`))
|
||||||
|
- cursor_timed_out (integer, opened/sec, deprecated in 1.10; use `cursor_timed_out_count`))
|
||||||
|
- cursor_total (integer, opened/sec, deprecated in 1.10; use `cursor_total_count`))
|
||||||
|
- deletes_per_sec (integer, deprecated in 1.10; use `deletes`))
|
||||||
|
- flushes_per_sec (integer, deprecated in 1.10; use `flushes`))
|
||||||
|
- getmores_per_sec (integer, deprecated in 1.10; use `getmores`))
|
||||||
|
- inserts_per_sec (integer, deprecated in 1.10; use `inserts`))
|
||||||
|
- net_in_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`))
|
||||||
|
- net_out_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`))
|
||||||
|
- queries_per_sec (integer, deprecated in 1.10; use `queries`))
|
||||||
|
- repl_commands_per_sec (integer, deprecated in 1.10; use `repl_commands`))
|
||||||
|
- repl_deletes_per_sec (integer, deprecated in 1.10; use `repl_deletes`)
|
||||||
|
- repl_getmores_per_sec (integer, deprecated in 1.10; use `repl_getmores`)
|
||||||
|
- repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`))
|
||||||
|
- repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`))
|
||||||
|
- repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`))
|
||||||
|
- ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`))
|
||||||
|
- ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`))
|
||||||
|
- updates_per_sec (integer, deprecated in 1.10; use `updates`))
|
||||||
|
|
||||||
|
+ mongodb_db_stats
|
||||||
|
- tags:
|
||||||
|
- db_name
|
||||||
|
- hostname
|
||||||
|
- fields:
|
||||||
|
- avg_obj_size (float)
|
||||||
|
- collections (integer)
|
||||||
|
- data_size (integer)
|
||||||
|
- index_size (integer)
|
||||||
|
- indexes (integer)
|
||||||
|
- num_extents (integer)
|
||||||
|
- objects (integer)
|
||||||
|
- ok (integer)
|
||||||
|
- storage_size (integer)
|
||||||
|
- type (string)
|
||||||
|
|
||||||
|
- mongodb_col_stats
|
||||||
|
- tags:
|
||||||
|
- hostname
|
||||||
|
- collection
|
||||||
|
- db_name
|
||||||
|
- fields:
|
||||||
|
- size (integer)
|
||||||
|
- avg_obj_size (integer)
|
||||||
|
- storage_size (integer)
|
||||||
|
- total_index_size (integer)
|
||||||
|
- ok (integer)
|
||||||
|
- count (integer)
|
||||||
|
- type (string)
|
||||||
|
|
||||||
|
- mongodb_shard_stats
|
||||||
|
- tags:
|
||||||
|
- hostname
|
||||||
|
- fields:
|
||||||
|
- in_use (integer)
|
||||||
|
- available (integer)
|
||||||
|
- created (integer)
|
||||||
|
- refreshing (integer)
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
```
|
||||||
|
mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000
|
||||||
|
mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000
|
||||||
|
mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000
|
||||||
|
mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000
|
||||||
|
mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000
|
||||||
|
mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000
|
||||||
|
```
|
199
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb.go
generated
vendored
Normal file
199
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb.go
generated
vendored
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
package mongodb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
"gopkg.in/mgo.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MongoDB struct {
|
||||||
|
Servers []string
|
||||||
|
Ssl Ssl
|
||||||
|
mongos map[string]*Server
|
||||||
|
GatherClusterStatus bool
|
||||||
|
GatherPerdbStats bool
|
||||||
|
GatherColStats bool
|
||||||
|
ColStatsDbs []string
|
||||||
|
tlsint.ClientConfig
|
||||||
|
|
||||||
|
Log telegraf.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ssl struct {
|
||||||
|
Enabled bool
|
||||||
|
CaCerts []string `toml:"cacerts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## An array of URLs of the form:
|
||||||
|
## "mongodb://" [user ":" pass "@"] host [ ":" port]
|
||||||
|
## For example:
|
||||||
|
## mongodb://user:auth_key@10.10.3.30:27017,
|
||||||
|
## mongodb://10.10.3.33:18832,
|
||||||
|
servers = ["mongodb://127.0.0.1:27017"]
|
||||||
|
|
||||||
|
## When true, collect cluster status
|
||||||
|
## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
|
||||||
|
## may have an impact on performance.
|
||||||
|
# gather_cluster_status = true
|
||||||
|
|
||||||
|
## When true, collect per database stats
|
||||||
|
# gather_perdb_stats = false
|
||||||
|
|
||||||
|
## When true, collect per collection stats
|
||||||
|
# gather_col_stats = false
|
||||||
|
|
||||||
|
## List of db where collections stats are collected
|
||||||
|
## If empty, all db are concerned
|
||||||
|
# col_stats_dbs = ["local"]
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = "/etc/telegraf/ca.pem"
|
||||||
|
# tls_cert = "/etc/telegraf/cert.pem"
|
||||||
|
# tls_key = "/etc/telegraf/key.pem"
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
`
|
||||||
|
|
||||||
|
func (m *MongoDB) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*MongoDB) Description() string {
|
||||||
|
return "Read metrics from one or many MongoDB servers"
|
||||||
|
}
|
||||||
|
|
||||||
|
var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"}
|
||||||
|
|
||||||
|
// Reads stats from all configured servers accumulates stats.
|
||||||
|
// Returns one of the errors encountered while gather stats (if any).
|
||||||
|
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if len(m.Servers) == 0 {
|
||||||
|
m.gatherServer(m.getMongoServer(localhost), acc)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i, serv := range m.Servers {
|
||||||
|
if !strings.HasPrefix(serv, "mongodb://") {
|
||||||
|
// Preserve backwards compatibility for hostnames without a
|
||||||
|
// scheme, broken in go 1.8. Remove in Telegraf 2.0
|
||||||
|
serv = "mongodb://" + serv
|
||||||
|
m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv)
|
||||||
|
m.Servers[i] = serv
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(serv)
|
||||||
|
if err != nil {
|
||||||
|
m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if u.Host == "" {
|
||||||
|
m.Log.Errorf("Unable to parse address %q", serv)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(srv *Server) {
|
||||||
|
defer wg.Done()
|
||||||
|
err := m.gatherServer(srv, acc)
|
||||||
|
if err != nil {
|
||||||
|
m.Log.Errorf("Error in plugin: %v", err)
|
||||||
|
}
|
||||||
|
}(m.getMongoServer(u))
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MongoDB) getMongoServer(url *url.URL) *Server {
|
||||||
|
if _, ok := m.mongos[url.Host]; !ok {
|
||||||
|
m.mongos[url.Host] = &Server{
|
||||||
|
Log: m.Log,
|
||||||
|
Url: url,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m.mongos[url.Host]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||||
|
if server.Session == nil {
|
||||||
|
var dialAddrs []string
|
||||||
|
if server.Url.User != nil {
|
||||||
|
dialAddrs = []string{server.Url.String()}
|
||||||
|
} else {
|
||||||
|
dialAddrs = []string{server.Url.Host}
|
||||||
|
}
|
||||||
|
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error())
|
||||||
|
}
|
||||||
|
dialInfo.Direct = true
|
||||||
|
dialInfo.Timeout = 5 * time.Second
|
||||||
|
|
||||||
|
var tlsConfig *tls.Config
|
||||||
|
|
||||||
|
if m.Ssl.Enabled {
|
||||||
|
// Deprecated TLS config
|
||||||
|
tlsConfig = &tls.Config{}
|
||||||
|
if len(m.Ssl.CaCerts) > 0 {
|
||||||
|
roots := x509.NewCertPool()
|
||||||
|
for _, caCert := range m.Ssl.CaCerts {
|
||||||
|
ok := roots.AppendCertsFromPEM([]byte(caCert))
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("failed to parse root certificate")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tlsConfig.RootCAs = roots
|
||||||
|
} else {
|
||||||
|
tlsConfig.InsecureSkipVerify = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tlsConfig, err = m.ClientConfig.TLSConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If configured to use TLS, add a dial function
|
||||||
|
if tlsConfig != nil {
|
||||||
|
dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) {
|
||||||
|
conn, err := tls.Dial("tcp", addr.String(), tlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error in Dial, %s\n", err.Error())
|
||||||
|
}
|
||||||
|
return conn, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sess, err := mgo.DialWithInfo(dialInfo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to connect to MongoDB: %s", err.Error())
|
||||||
|
}
|
||||||
|
server.Session = sess
|
||||||
|
}
|
||||||
|
return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("mongodb", func() telegraf.Input {
|
||||||
|
return &MongoDB{
|
||||||
|
mongos: make(map[string]*Server),
|
||||||
|
GatherClusterStatus: true,
|
||||||
|
GatherPerdbStats: false,
|
||||||
|
GatherColStats: false,
|
||||||
|
ColStatsDbs: []string{"local"},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
412
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb_data.go
generated
vendored
Normal file
412
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb_data.go
generated
vendored
Normal file
|
@ -0,0 +1,412 @@
|
||||||
|
package mongodb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MongodbData struct {
|
||||||
|
StatLine *StatLine
|
||||||
|
Fields map[string]interface{}
|
||||||
|
Tags map[string]string
|
||||||
|
DbData []DbData
|
||||||
|
ColData []ColData
|
||||||
|
ShardHostData []DbData
|
||||||
|
}
|
||||||
|
|
||||||
|
type DbData struct {
|
||||||
|
Name string
|
||||||
|
Fields map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ColData struct {
|
||||||
|
Name string
|
||||||
|
DbName string
|
||||||
|
Fields map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
||||||
|
return &MongodbData{
|
||||||
|
StatLine: statLine,
|
||||||
|
Tags: tags,
|
||||||
|
Fields: make(map[string]interface{}),
|
||||||
|
DbData: []DbData{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultStats = map[string]string{
|
||||||
|
"uptime_ns": "UptimeNanos",
|
||||||
|
"inserts": "InsertCnt",
|
||||||
|
"inserts_per_sec": "Insert",
|
||||||
|
"queries": "QueryCnt",
|
||||||
|
"queries_per_sec": "Query",
|
||||||
|
"updates": "UpdateCnt",
|
||||||
|
"updates_per_sec": "Update",
|
||||||
|
"deletes": "DeleteCnt",
|
||||||
|
"deletes_per_sec": "Delete",
|
||||||
|
"getmores": "GetMoreCnt",
|
||||||
|
"getmores_per_sec": "GetMore",
|
||||||
|
"commands": "CommandCnt",
|
||||||
|
"commands_per_sec": "Command",
|
||||||
|
"flushes": "FlushesCnt",
|
||||||
|
"flushes_per_sec": "Flushes",
|
||||||
|
"flushes_total_time_ns": "FlushesTotalTime",
|
||||||
|
"vsize_megabytes": "Virtual",
|
||||||
|
"resident_megabytes": "Resident",
|
||||||
|
"queued_reads": "QueuedReaders",
|
||||||
|
"queued_writes": "QueuedWriters",
|
||||||
|
"active_reads": "ActiveReaders",
|
||||||
|
"active_writes": "ActiveWriters",
|
||||||
|
"available_reads": "AvailableReaders",
|
||||||
|
"available_writes": "AvailableWriters",
|
||||||
|
"total_tickets_reads": "TotalTicketsReaders",
|
||||||
|
"total_tickets_writes": "TotalTicketsWriters",
|
||||||
|
"net_in_bytes_count": "NetInCnt",
|
||||||
|
"net_in_bytes": "NetIn",
|
||||||
|
"net_out_bytes_count": "NetOutCnt",
|
||||||
|
"net_out_bytes": "NetOut",
|
||||||
|
"open_connections": "NumConnections",
|
||||||
|
"ttl_deletes": "DeletedDocumentsCnt",
|
||||||
|
"ttl_deletes_per_sec": "DeletedDocuments",
|
||||||
|
"ttl_passes": "PassesCnt",
|
||||||
|
"ttl_passes_per_sec": "Passes",
|
||||||
|
"cursor_timed_out": "TimedOutC",
|
||||||
|
"cursor_timed_out_count": "TimedOutCCnt",
|
||||||
|
"cursor_no_timeout": "NoTimeoutC",
|
||||||
|
"cursor_no_timeout_count": "NoTimeoutCCnt",
|
||||||
|
"cursor_pinned": "PinnedC",
|
||||||
|
"cursor_pinned_count": "PinnedCCnt",
|
||||||
|
"cursor_total": "TotalC",
|
||||||
|
"cursor_total_count": "TotalCCnt",
|
||||||
|
"document_deleted": "DeletedD",
|
||||||
|
"document_inserted": "InsertedD",
|
||||||
|
"document_returned": "ReturnedD",
|
||||||
|
"document_updated": "UpdatedD",
|
||||||
|
"connections_current": "CurrentC",
|
||||||
|
"connections_available": "AvailableC",
|
||||||
|
"connections_total_created": "TotalCreatedC",
|
||||||
|
"operation_scan_and_order": "ScanAndOrderOp",
|
||||||
|
"operation_write_conflicts": "WriteConflictsOp",
|
||||||
|
"total_keys_scanned": "TotalKeysScanned",
|
||||||
|
"total_docs_scanned": "TotalObjectsScanned",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultAssertsStats = map[string]string{
|
||||||
|
"assert_regular": "Regular",
|
||||||
|
"assert_warning": "Warning",
|
||||||
|
"assert_msg": "Msg",
|
||||||
|
"assert_user": "User",
|
||||||
|
"assert_rollovers": "Rollovers",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultCommandsStats = map[string]string{
|
||||||
|
"aggregate_command_total": "AggregateCommandTotal",
|
||||||
|
"aggregate_command_failed": "AggregateCommandFailed",
|
||||||
|
"count_command_total": "CountCommandTotal",
|
||||||
|
"count_command_failed": "CountCommandFailed",
|
||||||
|
"delete_command_total": "DeleteCommandTotal",
|
||||||
|
"delete_command_failed": "DeleteCommandFailed",
|
||||||
|
"distinct_command_total": "DistinctCommandTotal",
|
||||||
|
"distinct_command_failed": "DistinctCommandFailed",
|
||||||
|
"find_command_total": "FindCommandTotal",
|
||||||
|
"find_command_failed": "FindCommandFailed",
|
||||||
|
"find_and_modify_command_total": "FindAndModifyCommandTotal",
|
||||||
|
"find_and_modify_command_failed": "FindAndModifyCommandFailed",
|
||||||
|
"get_more_command_total": "GetMoreCommandTotal",
|
||||||
|
"get_more_command_failed": "GetMoreCommandFailed",
|
||||||
|
"insert_command_total": "InsertCommandTotal",
|
||||||
|
"insert_command_failed": "InsertCommandFailed",
|
||||||
|
"update_command_total": "UpdateCommandTotal",
|
||||||
|
"update_command_failed": "UpdateCommandFailed",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultLatencyStats = map[string]string{
|
||||||
|
"latency_writes_count": "WriteOpsCnt",
|
||||||
|
"latency_writes": "WriteLatency",
|
||||||
|
"latency_reads_count": "ReadOpsCnt",
|
||||||
|
"latency_reads": "ReadLatency",
|
||||||
|
"latency_commands_count": "CommandOpsCnt",
|
||||||
|
"latency_commands": "CommandLatency",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultReplStats = map[string]string{
|
||||||
|
"repl_inserts": "InsertRCnt",
|
||||||
|
"repl_inserts_per_sec": "InsertR",
|
||||||
|
"repl_queries": "QueryRCnt",
|
||||||
|
"repl_queries_per_sec": "QueryR",
|
||||||
|
"repl_updates": "UpdateRCnt",
|
||||||
|
"repl_updates_per_sec": "UpdateR",
|
||||||
|
"repl_deletes": "DeleteRCnt",
|
||||||
|
"repl_deletes_per_sec": "DeleteR",
|
||||||
|
"repl_getmores": "GetMoreRCnt",
|
||||||
|
"repl_getmores_per_sec": "GetMoreR",
|
||||||
|
"repl_commands": "CommandRCnt",
|
||||||
|
"repl_commands_per_sec": "CommandR",
|
||||||
|
"member_status": "NodeType",
|
||||||
|
"state": "NodeState",
|
||||||
|
"repl_state": "NodeStateInt",
|
||||||
|
"repl_lag": "ReplLag",
|
||||||
|
"repl_network_bytes": "ReplNetworkBytes",
|
||||||
|
"repl_network_getmores_num": "ReplNetworkGetmoresNum",
|
||||||
|
"repl_network_getmores_total_millis": "ReplNetworkGetmoresTotalMillis",
|
||||||
|
"repl_network_ops": "ReplNetworkOps",
|
||||||
|
"repl_buffer_count": "ReplBufferCount",
|
||||||
|
"repl_buffer_size_bytes": "ReplBufferSizeBytes",
|
||||||
|
"repl_apply_batches_num": "ReplApplyBatchesNum",
|
||||||
|
"repl_apply_batches_total_millis": "ReplApplyBatchesTotalMillis",
|
||||||
|
"repl_apply_ops": "ReplApplyOps",
|
||||||
|
"repl_executor_pool_in_progress_count": "ReplExecutorPoolInProgressCount",
|
||||||
|
"repl_executor_queues_network_in_progress": "ReplExecutorQueuesNetworkInProgress",
|
||||||
|
"repl_executor_queues_sleepers": "ReplExecutorQueuesSleepers",
|
||||||
|
"repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultClusterStats = map[string]string{
|
||||||
|
"jumbo_chunks": "JumboChunksCount",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultShardStats = map[string]string{
|
||||||
|
"total_in_use": "TotalInUse",
|
||||||
|
"total_available": "TotalAvailable",
|
||||||
|
"total_created": "TotalCreated",
|
||||||
|
"total_refreshing": "TotalRefreshing",
|
||||||
|
}
|
||||||
|
|
||||||
|
var ShardHostStats = map[string]string{
|
||||||
|
"in_use": "InUse",
|
||||||
|
"available": "Available",
|
||||||
|
"created": "Created",
|
||||||
|
"refreshing": "Refreshing",
|
||||||
|
}
|
||||||
|
|
||||||
|
var MmapStats = map[string]string{
|
||||||
|
"mapped_megabytes": "Mapped",
|
||||||
|
"non-mapped_megabytes": "NonMapped",
|
||||||
|
"page_faults": "FaultsCnt",
|
||||||
|
"page_faults_per_sec": "Faults",
|
||||||
|
}
|
||||||
|
|
||||||
|
var WiredTigerStats = map[string]string{
|
||||||
|
"percent_cache_dirty": "CacheDirtyPercent",
|
||||||
|
"percent_cache_used": "CacheUsedPercent",
|
||||||
|
}
|
||||||
|
|
||||||
|
var WiredTigerExtStats = map[string]string{
|
||||||
|
"wtcache_tracked_dirty_bytes": "TrackedDirtyBytes",
|
||||||
|
"wtcache_current_bytes": "CurrentCachedBytes",
|
||||||
|
"wtcache_max_bytes_configured": "MaxBytesConfigured",
|
||||||
|
"wtcache_app_threads_page_read_count": "AppThreadsPageReadCount",
|
||||||
|
"wtcache_app_threads_page_read_time": "AppThreadsPageReadTime",
|
||||||
|
"wtcache_app_threads_page_write_count": "AppThreadsPageWriteCount",
|
||||||
|
"wtcache_bytes_written_from": "BytesWrittenFrom",
|
||||||
|
"wtcache_bytes_read_into": "BytesReadInto",
|
||||||
|
"wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread",
|
||||||
|
"wtcache_pages_queued_for_eviction": "PagesQueuedForEviction",
|
||||||
|
"wtcache_pages_read_into": "PagesReadIntoCache",
|
||||||
|
"wtcache_pages_written_from": "PagesWrittenFromCache",
|
||||||
|
"wtcache_pages_requested_from": "PagesRequestedFromCache",
|
||||||
|
"wtcache_server_evicting_pages": "ServerEvictingPages",
|
||||||
|
"wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages",
|
||||||
|
"wtcache_internal_pages_evicted": "InternalPagesEvicted",
|
||||||
|
"wtcache_modified_pages_evicted": "ModifiedPagesEvicted",
|
||||||
|
"wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultTCMallocStats = map[string]string{
|
||||||
|
"tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes",
|
||||||
|
"tcmalloc_heap_size": "TCMallocHeapSize",
|
||||||
|
"tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes",
|
||||||
|
"tcmalloc_current_total_thread_cache_bytes": "TCMallocCurrentTotalThreadCacheBytes",
|
||||||
|
"tcmalloc_max_total_thread_cache_bytes": "TCMallocMaxTotalThreadCacheBytes",
|
||||||
|
"tcmalloc_total_free_bytes": "TCMallocTotalFreeBytes",
|
||||||
|
"tcmalloc_transfer_cache_free_bytes": "TCMallocTransferCacheFreeBytes",
|
||||||
|
"tcmalloc_thread_cache_free_bytes": "TCMallocThreadCacheFreeBytes",
|
||||||
|
"tcmalloc_spinlock_total_delay_ns": "TCMallocSpinLockTotalDelayNanos",
|
||||||
|
"tcmalloc_pageheap_free_bytes": "TCMallocPageheapFreeBytes",
|
||||||
|
"tcmalloc_pageheap_unmapped_bytes": "TCMallocPageheapUnmappedBytes",
|
||||||
|
"tcmalloc_pageheap_committed_bytes": "TCMallocPageheapComittedBytes",
|
||||||
|
"tcmalloc_pageheap_scavenge_count": "TCMallocPageheapScavengeCount",
|
||||||
|
"tcmalloc_pageheap_commit_count": "TCMallocPageheapCommitCount",
|
||||||
|
"tcmalloc_pageheap_total_commit_bytes": "TCMallocPageheapTotalCommitBytes",
|
||||||
|
"tcmalloc_pageheap_decommit_count": "TCMallocPageheapDecommitCount",
|
||||||
|
"tcmalloc_pageheap_total_decommit_bytes": "TCMallocPageheapTotalDecommitBytes",
|
||||||
|
"tcmalloc_pageheap_reserve_count": "TCMallocPageheapReserveCount",
|
||||||
|
"tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultStorageStats = map[string]string{
|
||||||
|
"storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted",
|
||||||
|
"storage_freelist_search_requests": "StorageFreelistSearchRequests",
|
||||||
|
"storage_freelist_search_scanned": "StorageFreelistSearchScanned",
|
||||||
|
}
|
||||||
|
|
||||||
|
var DbDataStats = map[string]string{
|
||||||
|
"collections": "Collections",
|
||||||
|
"objects": "Objects",
|
||||||
|
"avg_obj_size": "AvgObjSize",
|
||||||
|
"data_size": "DataSize",
|
||||||
|
"storage_size": "StorageSize",
|
||||||
|
"num_extents": "NumExtents",
|
||||||
|
"indexes": "Indexes",
|
||||||
|
"index_size": "IndexSize",
|
||||||
|
"ok": "Ok",
|
||||||
|
}
|
||||||
|
|
||||||
|
var ColDataStats = map[string]string{
|
||||||
|
"count": "Count",
|
||||||
|
"size": "Size",
|
||||||
|
"avg_obj_size": "AvgObjSize",
|
||||||
|
"storage_size": "StorageSize",
|
||||||
|
"total_index_size": "TotalIndexSize",
|
||||||
|
"ok": "Ok",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) AddDbStats() {
|
||||||
|
for _, dbstat := range d.StatLine.DbStatsLines {
|
||||||
|
dbStatLine := reflect.ValueOf(&dbstat).Elem()
|
||||||
|
newDbData := &DbData{
|
||||||
|
Name: dbstat.Name,
|
||||||
|
Fields: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
newDbData.Fields["type"] = "db_stat"
|
||||||
|
for key, value := range DbDataStats {
|
||||||
|
val := dbStatLine.FieldByName(value).Interface()
|
||||||
|
newDbData.Fields[key] = val
|
||||||
|
}
|
||||||
|
d.DbData = append(d.DbData, *newDbData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) AddColStats() {
|
||||||
|
for _, colstat := range d.StatLine.ColStatsLines {
|
||||||
|
colStatLine := reflect.ValueOf(&colstat).Elem()
|
||||||
|
newColData := &ColData{
|
||||||
|
Name: colstat.Name,
|
||||||
|
DbName: colstat.DbName,
|
||||||
|
Fields: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
newColData.Fields["type"] = "col_stat"
|
||||||
|
for key, value := range ColDataStats {
|
||||||
|
val := colStatLine.FieldByName(value).Interface()
|
||||||
|
newColData.Fields[key] = val
|
||||||
|
}
|
||||||
|
d.ColData = append(d.ColData, *newColData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) AddShardHostStats() {
|
||||||
|
for host, hostStat := range d.StatLine.ShardHostStatsLines {
|
||||||
|
hostStatLine := reflect.ValueOf(&hostStat).Elem()
|
||||||
|
newDbData := &DbData{
|
||||||
|
Name: host,
|
||||||
|
Fields: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
newDbData.Fields["type"] = "shard_host_stat"
|
||||||
|
for k, v := range ShardHostStats {
|
||||||
|
val := hostStatLine.FieldByName(v).Interface()
|
||||||
|
newDbData.Fields[k] = val
|
||||||
|
}
|
||||||
|
d.ShardHostData = append(d.ShardHostData, *newDbData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) AddDefaultStats() {
|
||||||
|
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||||
|
d.addStat(statLine, DefaultStats)
|
||||||
|
if d.StatLine.NodeType != "" {
|
||||||
|
d.addStat(statLine, DefaultReplStats)
|
||||||
|
d.Tags["node_type"] = d.StatLine.NodeType
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.StatLine.ReadLatency > 0 {
|
||||||
|
d.addStat(statLine, DefaultLatencyStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.StatLine.ReplSetName != "" {
|
||||||
|
d.Tags["rs_name"] = d.StatLine.ReplSetName
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.StatLine.OplogStats != nil {
|
||||||
|
d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.StatLine.Version != "" {
|
||||||
|
d.add("version", d.StatLine.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.addStat(statLine, DefaultAssertsStats)
|
||||||
|
d.addStat(statLine, DefaultClusterStats)
|
||||||
|
d.addStat(statLine, DefaultCommandsStats)
|
||||||
|
d.addStat(statLine, DefaultShardStats)
|
||||||
|
d.addStat(statLine, DefaultStorageStats)
|
||||||
|
d.addStat(statLine, DefaultTCMallocStats)
|
||||||
|
|
||||||
|
if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" {
|
||||||
|
d.addStat(statLine, MmapStats)
|
||||||
|
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||||
|
for key, value := range WiredTigerStats {
|
||||||
|
val := statLine.FieldByName(value).Interface()
|
||||||
|
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||||
|
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||||
|
d.add(key, floatVal)
|
||||||
|
}
|
||||||
|
d.addStat(statLine, WiredTigerExtStats)
|
||||||
|
d.add("page_faults", d.StatLine.FaultsCnt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) addStat(statLine reflect.Value, stats map[string]string) {
|
||||||
|
for key, value := range stats {
|
||||||
|
val := statLine.FieldByName(value).Interface()
|
||||||
|
d.add(key, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) add(key string, val interface{}) {
|
||||||
|
d.Fields[key] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MongodbData) flush(acc telegraf.Accumulator) {
|
||||||
|
acc.AddFields(
|
||||||
|
"mongodb",
|
||||||
|
d.Fields,
|
||||||
|
d.Tags,
|
||||||
|
d.StatLine.Time,
|
||||||
|
)
|
||||||
|
d.Fields = make(map[string]interface{})
|
||||||
|
|
||||||
|
for _, db := range d.DbData {
|
||||||
|
d.Tags["db_name"] = db.Name
|
||||||
|
acc.AddFields(
|
||||||
|
"mongodb_db_stats",
|
||||||
|
db.Fields,
|
||||||
|
d.Tags,
|
||||||
|
d.StatLine.Time,
|
||||||
|
)
|
||||||
|
db.Fields = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
for _, col := range d.ColData {
|
||||||
|
d.Tags["collection"] = col.Name
|
||||||
|
d.Tags["db_name"] = col.DbName
|
||||||
|
acc.AddFields(
|
||||||
|
"mongodb_col_stats",
|
||||||
|
col.Fields,
|
||||||
|
d.Tags,
|
||||||
|
d.StatLine.Time,
|
||||||
|
)
|
||||||
|
col.Fields = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
for _, host := range d.ShardHostData {
|
||||||
|
d.Tags["hostname"] = host.Name
|
||||||
|
acc.AddFields(
|
||||||
|
"mongodb_shard_stats",
|
||||||
|
host.Fields,
|
||||||
|
d.Tags,
|
||||||
|
d.StatLine.Time,
|
||||||
|
)
|
||||||
|
host.Fields = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
}
|
299
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb_server.go
generated
vendored
Normal file
299
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongodb_server.go
generated
vendored
Normal file
|
@ -0,0 +1,299 @@
|
||||||
|
package mongodb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"gopkg.in/mgo.v2"
|
||||||
|
"gopkg.in/mgo.v2/bson"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
Url *url.URL
|
||||||
|
Session *mgo.Session
|
||||||
|
lastResult *MongoStatus
|
||||||
|
|
||||||
|
Log telegraf.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) getDefaultTags() map[string]string {
|
||||||
|
tags := make(map[string]string)
|
||||||
|
tags["hostname"] = s.Url.Host
|
||||||
|
return tags
|
||||||
|
}
|
||||||
|
|
||||||
|
type oplogEntry struct {
|
||||||
|
Timestamp bson.MongoTimestamp `bson:"ts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsAuthorization(err error) bool {
|
||||||
|
return strings.Contains(err.Error(), "not authorized")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) authLog(err error) {
|
||||||
|
if IsAuthorization(err) {
|
||||||
|
s.Log.Debug(err.Error())
|
||||||
|
} else {
|
||||||
|
s.Log.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherServerStatus() (*ServerStatus, error) {
|
||||||
|
serverStatus := &ServerStatus{}
|
||||||
|
err := s.Session.DB("admin").Run(bson.D{
|
||||||
|
{
|
||||||
|
Name: "serverStatus",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "recordStats",
|
||||||
|
Value: 0,
|
||||||
|
},
|
||||||
|
}, serverStatus)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return serverStatus, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) {
|
||||||
|
replSetStatus := &ReplSetStatus{}
|
||||||
|
err := s.Session.DB("admin").Run(bson.D{
|
||||||
|
{
|
||||||
|
Name: "replSetGetStatus",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
}, replSetStatus)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return replSetStatus, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherClusterStatus() (*ClusterStatus, error) {
|
||||||
|
chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ClusterStatus{
|
||||||
|
JumboChunksCount: int64(chunkCount),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) {
|
||||||
|
shardStats := &ShardStats{}
|
||||||
|
err := s.Session.DB("admin").Run(bson.D{
|
||||||
|
{
|
||||||
|
Name: "shardConnPoolStats",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
}, &shardStats)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return shardStats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherDBStats(name string) (*Db, error) {
|
||||||
|
stats := &DbStatsData{}
|
||||||
|
err := s.Session.DB(name).Run(bson.D{
|
||||||
|
{
|
||||||
|
Name: "dbStats",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
}, stats)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Db{
|
||||||
|
Name: name,
|
||||||
|
DbStatsData: stats,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) {
|
||||||
|
query := bson.M{"ts": bson.M{"$exists": true}}
|
||||||
|
|
||||||
|
var first oplogEntry
|
||||||
|
err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var last oplogEntry
|
||||||
|
err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
firstTime := time.Unix(int64(first.Timestamp>>32), 0)
|
||||||
|
lastTime := time.Unix(int64(last.Timestamp>>32), 0)
|
||||||
|
stats := &OplogStats{
|
||||||
|
TimeDiff: int64(lastTime.Sub(firstTime).Seconds()),
|
||||||
|
}
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The "oplog.rs" collection is stored on all replica set members.
|
||||||
|
//
|
||||||
|
// The "oplog.$main" collection is created on the master node of a
|
||||||
|
// master-slave replicated deployment. As of MongoDB 3.2, master-slave
|
||||||
|
// replication has been deprecated.
|
||||||
|
func (s *Server) gatherOplogStats() (*OplogStats, error) {
|
||||||
|
stats, err := s.getOplogReplLag("oplog.rs")
|
||||||
|
if err == nil {
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.getOplogReplLag("oplog.$main")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) {
|
||||||
|
names, err := s.Session.DatabaseNames()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
results := &ColStats{}
|
||||||
|
for _, dbName := range names {
|
||||||
|
if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 {
|
||||||
|
var colls []string
|
||||||
|
colls, err = s.Session.DB(dbName).CollectionNames()
|
||||||
|
if err != nil {
|
||||||
|
s.Log.Errorf("Error getting collection names: %s", err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, colName := range colls {
|
||||||
|
colStatLine := &ColStatsData{}
|
||||||
|
err = s.Session.DB(dbName).Run(bson.D{
|
||||||
|
{
|
||||||
|
Name: "collStats",
|
||||||
|
Value: colName,
|
||||||
|
},
|
||||||
|
}, colStatLine)
|
||||||
|
if err != nil {
|
||||||
|
s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
collection := &Collection{
|
||||||
|
Name: colName,
|
||||||
|
DbName: dbName,
|
||||||
|
ColStatsData: colStatLine,
|
||||||
|
}
|
||||||
|
results.Collections = append(results.Collections, *collection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error {
|
||||||
|
s.Session.SetMode(mgo.Eventual, true)
|
||||||
|
s.Session.SetSocketTimeout(0)
|
||||||
|
|
||||||
|
serverStatus, err := s.gatherServerStatus()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get replica set status, an error indicates that the server is not a
|
||||||
|
// member of a replica set.
|
||||||
|
replSetStatus, err := s.gatherReplSetStatus()
|
||||||
|
if err != nil {
|
||||||
|
s.Log.Debugf("Unable to gather replica set status: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather the oplog if we are a member of a replica set. Non-replica set
|
||||||
|
// members do not have the oplog collections.
|
||||||
|
var oplogStats *OplogStats
|
||||||
|
if replSetStatus != nil {
|
||||||
|
oplogStats, err = s.gatherOplogStats()
|
||||||
|
if err != nil {
|
||||||
|
s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var clusterStatus *ClusterStatus
|
||||||
|
if gatherClusterStatus {
|
||||||
|
status, err := s.gatherClusterStatus()
|
||||||
|
if err != nil {
|
||||||
|
s.Log.Debugf("Unable to gather cluster status: %s", err.Error())
|
||||||
|
}
|
||||||
|
clusterStatus = status
|
||||||
|
}
|
||||||
|
|
||||||
|
shardStats, err := s.gatherShardConnPoolStats()
|
||||||
|
if err != nil {
|
||||||
|
s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
var collectionStats *ColStats
|
||||||
|
if gatherColStats {
|
||||||
|
stats, err := s.gatherCollectionStats(colStatsDbs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
collectionStats = stats
|
||||||
|
}
|
||||||
|
|
||||||
|
dbStats := &DbStats{}
|
||||||
|
if gatherDbStats {
|
||||||
|
names, err := s.Session.DatabaseNames()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
db, err := s.gatherDBStats(name)
|
||||||
|
if err != nil {
|
||||||
|
s.Log.Debugf("Error getting db stats from %q: %s", name, err.Error())
|
||||||
|
}
|
||||||
|
dbStats.Dbs = append(dbStats.Dbs, *db)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &MongoStatus{
|
||||||
|
ServerStatus: serverStatus,
|
||||||
|
ReplSetStatus: replSetStatus,
|
||||||
|
ClusterStatus: clusterStatus,
|
||||||
|
DbStats: dbStats,
|
||||||
|
ColStats: collectionStats,
|
||||||
|
ShardStats: shardStats,
|
||||||
|
OplogStats: oplogStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
result.SampleTime = time.Now()
|
||||||
|
if s.lastResult != nil && result != nil {
|
||||||
|
duration := result.SampleTime.Sub(s.lastResult.SampleTime)
|
||||||
|
durationInSeconds := int64(duration.Seconds())
|
||||||
|
if durationInSeconds == 0 {
|
||||||
|
durationInSeconds = 1
|
||||||
|
}
|
||||||
|
data := NewMongodbData(
|
||||||
|
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
|
||||||
|
s.getDefaultTags(),
|
||||||
|
)
|
||||||
|
data.AddDefaultStats()
|
||||||
|
data.AddDbStats()
|
||||||
|
data.AddColStats()
|
||||||
|
data.AddShardHostStats()
|
||||||
|
data.flush(acc)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lastResult = result
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringInSlice(a string, list []string) bool {
|
||||||
|
for _, b := range list {
|
||||||
|
if b == a {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
1369
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongostat.go
generated
vendored
Normal file
1369
vendor/github.com/influxdata/telegraf/plugins/inputs/mongodb/mongostat.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
171
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/README.md
generated
vendored
Normal file
171
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/README.md
generated
vendored
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
# Prometheus Input Plugin
|
||||||
|
|
||||||
|
The prometheus input plugin gathers metrics from HTTP servers exposing metrics
|
||||||
|
in Prometheus format.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Read metrics from one or many prometheus clients
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
## An array of urls to scrape metrics from.
|
||||||
|
urls = ["http://localhost:9100/metrics"]
|
||||||
|
|
||||||
|
## Metric version controls the mapping from Prometheus metrics into
|
||||||
|
## Telegraf metrics. When using the prometheus_client output, use the same
|
||||||
|
## value in both plugins to ensure metrics are round-tripped without
|
||||||
|
## modification.
|
||||||
|
##
|
||||||
|
## example: metric_version = 1; deprecated in 1.13
|
||||||
|
## metric_version = 2; recommended version
|
||||||
|
# metric_version = 1
|
||||||
|
|
||||||
|
## An array of Kubernetes services to scrape metrics from.
|
||||||
|
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
|
||||||
|
|
||||||
|
## Kubernetes config file to create client from.
|
||||||
|
# kube_config = "/path/to/kubernetes.config"
|
||||||
|
|
||||||
|
## Scrape Kubernetes pods for the following prometheus annotations:
|
||||||
|
## - prometheus.io/scrape: Enable scraping for this pod
|
||||||
|
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
|
||||||
|
## set this to `https` & most likely set the tls config.
|
||||||
|
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
|
||||||
|
## - prometheus.io/port: If port is not 9102 use this annotation
|
||||||
|
# monitor_kubernetes_pods = true
|
||||||
|
## Restricts Kubernetes monitoring to a single namespace
|
||||||
|
## ex: monitor_kubernetes_pods_namespace = "default"
|
||||||
|
# monitor_kubernetes_pods_namespace = ""
|
||||||
|
# label selector to target pods which have the label
|
||||||
|
# kubernetes_label_selector = "env=dev,app=nginx"
|
||||||
|
# field selector to target pods
|
||||||
|
# eg. To scrape pods on a specific node
|
||||||
|
# kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
|
||||||
|
|
||||||
|
## Use bearer token for authorization. ('bearer_token' takes priority)
|
||||||
|
# bearer_token = "/path/to/bearer/token"
|
||||||
|
## OR
|
||||||
|
# bearer_token_string = "abc_123"
|
||||||
|
|
||||||
|
## HTTP Basic Authentication username and password. ('bearer_token' and
|
||||||
|
## 'bearer_token_string' take priority)
|
||||||
|
# username = ""
|
||||||
|
# password = ""
|
||||||
|
|
||||||
|
## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||||
|
# response_timeout = "3s"
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = /path/to/cafile
|
||||||
|
# tls_cert = /path/to/certfile
|
||||||
|
# tls_key = /path/to/keyfile
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
```
|
||||||
|
|
||||||
|
`urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics`
|
||||||
|
|
||||||
|
#### Kubernetes Service Discovery
|
||||||
|
|
||||||
|
URLs listed in the `kubernetes_services` parameter will be expanded
|
||||||
|
by looking up all A records assigned to the hostname as described in
|
||||||
|
[Kubernetes DNS service discovery](https://kubernetes.io/docs/concepts/services-networking/service/#dns).
|
||||||
|
|
||||||
|
This method can be used to locate all
|
||||||
|
[Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services).
|
||||||
|
|
||||||
|
#### Kubernetes scraping
|
||||||
|
|
||||||
|
Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes
|
||||||
|
pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig
|
||||||
|
file to determine where to monitor.
|
||||||
|
Currently the following annotation are supported:
|
||||||
|
|
||||||
|
* `prometheus.io/scrape` Enable scraping for this pod.
|
||||||
|
* `prometheus.io/scheme` If the metrics endpoint is secured then you will need to set this to `https` & most likely set the tls config. (default 'http')
|
||||||
|
* `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics')
|
||||||
|
* `prometheus.io/port` Used to override the port. (default 9102)
|
||||||
|
|
||||||
|
Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping.
|
||||||
|
|
||||||
|
#### Bearer Token
|
||||||
|
|
||||||
|
If set, the file specified by the `bearer_token` parameter will be read on
|
||||||
|
each interval and its contents will be appended to the Bearer string in the
|
||||||
|
Authorization header.
|
||||||
|
|
||||||
|
### Usage for Caddy HTTP server
|
||||||
|
|
||||||
|
If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin:
|
||||||
|
|
||||||
|
* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus)
|
||||||
|
* Add the `prometheus` directive in your `CaddyFile`
|
||||||
|
* Restart Caddy
|
||||||
|
* Configure Telegraf to fetch metrics on it:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.prometheus]]
|
||||||
|
# ## An array of urls to scrape metrics from.
|
||||||
|
urls = ["http://localhost:9180/metrics"]
|
||||||
|
```
|
||||||
|
|
||||||
|
> This is the default URL where Caddy Prometheus plugin will send data.
|
||||||
|
> For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md).
|
||||||
|
|
||||||
|
### Metrics:
|
||||||
|
|
||||||
|
Measurement names are based on the Metric Family and tags are created for each
|
||||||
|
label. The value is added to a field named based on the metric type.
|
||||||
|
|
||||||
|
All metrics receive the `url` tag indicating the related URL specified in the
|
||||||
|
Telegraf configuration. If using Kubernetes service discovery the `address`
|
||||||
|
tag is also added indicating the discovered ip address.
|
||||||
|
|
||||||
|
### Example Output:
|
||||||
|
|
||||||
|
**Source**
|
||||||
|
```
|
||||||
|
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||||
|
# TYPE go_gc_duration_seconds summary
|
||||||
|
go_gc_duration_seconds{quantile="0"} 7.4545e-05
|
||||||
|
go_gc_duration_seconds{quantile="0.25"} 7.6999e-05
|
||||||
|
go_gc_duration_seconds{quantile="0.5"} 0.000277935
|
||||||
|
go_gc_duration_seconds{quantile="0.75"} 0.000706591
|
||||||
|
go_gc_duration_seconds{quantile="1"} 0.000706591
|
||||||
|
go_gc_duration_seconds_sum 0.00113607
|
||||||
|
go_gc_duration_seconds_count 4
|
||||||
|
# HELP go_goroutines Number of goroutines that currently exist.
|
||||||
|
# TYPE go_goroutines gauge
|
||||||
|
go_goroutines 15
|
||||||
|
# HELP cpu_usage_user Telegraf collected metric
|
||||||
|
# TYPE cpu_usage_user gauge
|
||||||
|
cpu_usage_user{cpu="cpu0"} 1.4112903225816156
|
||||||
|
cpu_usage_user{cpu="cpu1"} 0.702106318955865
|
||||||
|
cpu_usage_user{cpu="cpu2"} 2.0161290322588776
|
||||||
|
cpu_usage_user{cpu="cpu3"} 1.5045135406226022
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**
|
||||||
|
```
|
||||||
|
go_gc_duration_seconds,url=http://example.org:9273/metrics 1=0.001336611,count=14,sum=0.004527551,0=0.000057965,0.25=0.000083812,0.5=0.000286537,0.75=0.000365303 1505776733000000000
|
||||||
|
go_goroutines,url=http://example.org:9273/metrics gauge=21 1505776695000000000
|
||||||
|
cpu_usage_user,cpu=cpu0,url=http://example.org:9273/metrics gauge=1.513622603430151 1505776751000000000
|
||||||
|
cpu_usage_user,cpu=cpu1,url=http://example.org:9273/metrics gauge=5.829145728641773 1505776751000000000
|
||||||
|
cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805144 1505776751000000000
|
||||||
|
cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output (when metric_version = 2)**
|
||||||
|
```
|
||||||
|
prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000
|
||||||
|
prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000
|
||||||
|
prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000
|
||||||
|
prometheus,quantile=0.25,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000579 1556075100000000000
|
||||||
|
prometheus,quantile=0,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000349 1556075100000000000
|
||||||
|
prometheus,url=http://example.org:9273/metrics go_gc_duration_seconds_count=324,go_gc_duration_seconds_sum=0.091340353 1556075100000000000
|
||||||
|
prometheus,url=http://example.org:9273/metrics go_goroutines=15 1556075100000000000
|
||||||
|
prometheus,cpu=cpu0,url=http://example.org:9273/metrics cpu_usage_user=1.513622603430151 1505776751000000000
|
||||||
|
prometheus,cpu=cpu1,url=http://example.org:9273/metrics cpu_usage_user=5.829145728641773 1505776751000000000
|
||||||
|
prometheus,cpu=cpu2,url=http://example.org:9273/metrics cpu_usage_user=2.119071644805144 1505776751000000000
|
||||||
|
prometheus,cpu=cpu3,url=http://example.org:9273/metrics cpu_usage_user=1.5228426395944945 1505776751000000000
|
||||||
|
```
|
241
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/kubernetes.go
generated
vendored
Normal file
241
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/kubernetes.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ericchiang/k8s"
|
||||||
|
corev1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
type payload struct {
|
||||||
|
eventype string
|
||||||
|
pod *corev1.Pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadClient parses a kubeconfig from a file and returns a Kubernetes
|
||||||
|
// client. It does not support extensions or client auth providers.
|
||||||
|
func loadClient(kubeconfigPath string) (*k8s.Client, error) {
|
||||||
|
data, err := ioutil.ReadFile(kubeconfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal YAML into a Kubernetes config object.
|
||||||
|
var config k8s.Config
|
||||||
|
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return k8s.NewClient(&config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) start(ctx context.Context) error {
|
||||||
|
client, err := k8s.NewInClusterClient()
|
||||||
|
if err != nil {
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to get current user - %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
configLocation := filepath.Join(u.HomeDir, ".kube/config")
|
||||||
|
if p.KubeConfig != "" {
|
||||||
|
configLocation = p.KubeConfig
|
||||||
|
}
|
||||||
|
client, err = loadClient(configLocation)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.wg = sync.WaitGroup{}
|
||||||
|
|
||||||
|
p.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer p.wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
err := p.watch(ctx, client)
|
||||||
|
if err != nil {
|
||||||
|
p.Log.Errorf("Unable to watch resources: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// An edge case exists if a pod goes offline at the same time a new pod is created
|
||||||
|
// (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape
|
||||||
|
// pod, causing errors in the logs. This is only true if the pod going offline is not
|
||||||
|
// directed to do so by K8s.
|
||||||
|
func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error {
|
||||||
|
|
||||||
|
selectors := podSelector(p)
|
||||||
|
|
||||||
|
pod := &corev1.Pod{}
|
||||||
|
watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer watcher.Close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
pod = &corev1.Pod{}
|
||||||
|
// An error here means we need to reconnect the watcher.
|
||||||
|
eventType, err := watcher.Next(pod)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the pod is not "ready", there will be no ip associated with it.
|
||||||
|
if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" ||
|
||||||
|
!podReady(pod.Status.GetContainerStatuses()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch eventType {
|
||||||
|
case k8s.EventAdded:
|
||||||
|
registerPod(pod, p)
|
||||||
|
case k8s.EventModified:
|
||||||
|
// To avoid multiple actions for each event, unregister on the first event
|
||||||
|
// in the delete sequence, when the containers are still "ready".
|
||||||
|
if pod.Metadata.GetDeletionTimestamp() != nil {
|
||||||
|
unregisterPod(pod, p)
|
||||||
|
} else {
|
||||||
|
registerPod(pod, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func podReady(statuss []*corev1.ContainerStatus) bool {
|
||||||
|
if len(statuss) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, cs := range statuss {
|
||||||
|
if !cs.GetReady() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func podSelector(p *Prometheus) []k8s.Option {
|
||||||
|
options := []k8s.Option{}
|
||||||
|
|
||||||
|
if len(p.KubernetesLabelSelector) > 0 {
|
||||||
|
options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p.KubernetesFieldSelector) > 0 {
|
||||||
|
options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector))
|
||||||
|
}
|
||||||
|
|
||||||
|
return options
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerPod(pod *corev1.Pod, p *Prometheus) {
|
||||||
|
if p.kubernetesPods == nil {
|
||||||
|
p.kubernetesPods = map[string]URLAndAddress{}
|
||||||
|
}
|
||||||
|
targetURL := getScrapeURL(pod)
|
||||||
|
if targetURL == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL)
|
||||||
|
// add annotation as metrics tags
|
||||||
|
tags := pod.GetMetadata().GetAnnotations()
|
||||||
|
if tags == nil {
|
||||||
|
tags = map[string]string{}
|
||||||
|
}
|
||||||
|
tags["pod_name"] = pod.GetMetadata().GetName()
|
||||||
|
tags["namespace"] = pod.GetMetadata().GetNamespace()
|
||||||
|
// add labels as metrics tags
|
||||||
|
for k, v := range pod.GetMetadata().GetLabels() {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
URL, err := url.Parse(*targetURL)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
podURL := p.AddressToURL(URL, URL.Hostname())
|
||||||
|
p.lock.Lock()
|
||||||
|
p.kubernetesPods[podURL.String()] = URLAndAddress{
|
||||||
|
URL: podURL,
|
||||||
|
Address: URL.Hostname(),
|
||||||
|
OriginalURL: URL,
|
||||||
|
Tags: tags,
|
||||||
|
}
|
||||||
|
p.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getScrapeURL(pod *corev1.Pod) *string {
|
||||||
|
ip := pod.Status.GetPodIP()
|
||||||
|
if ip == "" {
|
||||||
|
// return as if scrape was disabled, we will be notified again once the pod
|
||||||
|
// has an IP
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"]
|
||||||
|
path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"]
|
||||||
|
port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"]
|
||||||
|
|
||||||
|
if scheme == "" {
|
||||||
|
scheme = "http"
|
||||||
|
}
|
||||||
|
if port == "" {
|
||||||
|
port = "9102"
|
||||||
|
}
|
||||||
|
if path == "" {
|
||||||
|
path = "/metrics"
|
||||||
|
}
|
||||||
|
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: net.JoinHostPort(ip, port),
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
x := u.String()
|
||||||
|
|
||||||
|
return &x
|
||||||
|
}
|
||||||
|
|
||||||
|
func unregisterPod(pod *corev1.Pod, p *Prometheus) {
|
||||||
|
url := getScrapeURL(pod)
|
||||||
|
if url == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q",
|
||||||
|
pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace())
|
||||||
|
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
if _, ok := p.kubernetesPods[*url]; ok {
|
||||||
|
delete(p.kubernetesPods, *url)
|
||||||
|
log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url)
|
||||||
|
}
|
||||||
|
}
|
320
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/parser.go
generated
vendored
Normal file
320
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// Parser inspired from
|
||||||
|
// https://github.com/prometheus/prom2json/blob/master/main.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/expfmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse returns a slice of Metrics from a text representation of a
|
||||||
|
// metrics
|
||||||
|
func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) {
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
var parser expfmt.TextParser
|
||||||
|
// parse even if the buffer begins with a newline
|
||||||
|
buf = bytes.TrimPrefix(buf, []byte("\n"))
|
||||||
|
// Read raw data
|
||||||
|
buffer := bytes.NewBuffer(buf)
|
||||||
|
reader := bufio.NewReader(buffer)
|
||||||
|
|
||||||
|
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
|
||||||
|
// Prepare output
|
||||||
|
metricFamilies := make(map[string]*dto.MetricFamily)
|
||||||
|
|
||||||
|
if err == nil && mediatype == "application/vnd.google.protobuf" &&
|
||||||
|
params["encoding"] == "delimited" &&
|
||||||
|
params["proto"] == "io.prometheus.client.MetricFamily" {
|
||||||
|
for {
|
||||||
|
mf := &dto.MetricFamily{}
|
||||||
|
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
|
||||||
|
if ierr == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
|
||||||
|
}
|
||||||
|
metricFamilies[mf.GetName()] = mf
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
metricFamilies, err = parser.TextToMetricFamilies(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading text format failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
|
||||||
|
now := time.Now()
|
||||||
|
// read metrics
|
||||||
|
for metricName, mf := range metricFamilies {
|
||||||
|
for _, m := range mf.Metric {
|
||||||
|
// reading tags
|
||||||
|
tags := makeLabels(m)
|
||||||
|
|
||||||
|
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
// summary metric
|
||||||
|
telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now)
|
||||||
|
metrics = append(metrics, telegrafMetrics...)
|
||||||
|
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
// histogram metric
|
||||||
|
telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now)
|
||||||
|
metrics = append(metrics, telegrafMetrics...)
|
||||||
|
} else {
|
||||||
|
// standard metric
|
||||||
|
// reading fields
|
||||||
|
fields := getNameAndValueV2(m, metricName)
|
||||||
|
// converting to telegraf metric
|
||||||
|
if len(fields) > 0 {
|
||||||
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
|
} else {
|
||||||
|
t = now
|
||||||
|
}
|
||||||
|
metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType()))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Quantiles for summary metric & Buckets for histogram
|
||||||
|
func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
|
} else {
|
||||||
|
t = now
|
||||||
|
}
|
||||||
|
fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
|
||||||
|
fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum())
|
||||||
|
met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, met)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, q := range m.GetSummary().Quantile {
|
||||||
|
newTags := tags
|
||||||
|
fields = make(map[string]interface{})
|
||||||
|
|
||||||
|
newTags["quantile"] = fmt.Sprint(q.GetQuantile())
|
||||||
|
fields[metricName] = float64(q.GetValue())
|
||||||
|
|
||||||
|
quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, quantileMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Buckets from histogram metric
|
||||||
|
func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
|
} else {
|
||||||
|
t = now
|
||||||
|
}
|
||||||
|
fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())
|
||||||
|
fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||||
|
|
||||||
|
met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, met)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range m.GetHistogram().Bucket {
|
||||||
|
newTags := tags
|
||||||
|
fields = make(map[string]interface{})
|
||||||
|
newTags["le"] = fmt.Sprint(b.GetUpperBound())
|
||||||
|
fields[metricName+"_bucket"] = float64(b.GetCumulativeCount())
|
||||||
|
|
||||||
|
histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, histogramMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns a slice of Metrics from a text representation of a
|
||||||
|
// metrics
|
||||||
|
func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
var parser expfmt.TextParser
|
||||||
|
// parse even if the buffer begins with a newline
|
||||||
|
buf = bytes.TrimPrefix(buf, []byte("\n"))
|
||||||
|
// Read raw data
|
||||||
|
buffer := bytes.NewBuffer(buf)
|
||||||
|
reader := bufio.NewReader(buffer)
|
||||||
|
|
||||||
|
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
|
||||||
|
// Prepare output
|
||||||
|
metricFamilies := make(map[string]*dto.MetricFamily)
|
||||||
|
|
||||||
|
if err == nil && mediatype == "application/vnd.google.protobuf" &&
|
||||||
|
params["encoding"] == "delimited" &&
|
||||||
|
params["proto"] == "io.prometheus.client.MetricFamily" {
|
||||||
|
for {
|
||||||
|
mf := &dto.MetricFamily{}
|
||||||
|
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
|
||||||
|
if ierr == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
|
||||||
|
}
|
||||||
|
metricFamilies[mf.GetName()] = mf
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
metricFamilies, err = parser.TextToMetricFamilies(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("reading text format failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
|
||||||
|
now := time.Now()
|
||||||
|
// read metrics
|
||||||
|
for metricName, mf := range metricFamilies {
|
||||||
|
for _, m := range mf.Metric {
|
||||||
|
// reading tags
|
||||||
|
tags := makeLabels(m)
|
||||||
|
// reading fields
|
||||||
|
var fields map[string]interface{}
|
||||||
|
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||||
|
// summary metric
|
||||||
|
fields = makeQuantiles(m)
|
||||||
|
fields["count"] = float64(m.GetSummary().GetSampleCount())
|
||||||
|
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
||||||
|
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
|
// histogram metric
|
||||||
|
fields = makeBuckets(m)
|
||||||
|
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
||||||
|
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// standard metric
|
||||||
|
fields = getNameAndValue(m)
|
||||||
|
}
|
||||||
|
// converting to telegraf metric
|
||||||
|
if len(fields) > 0 {
|
||||||
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
|
} else {
|
||||||
|
t = now
|
||||||
|
}
|
||||||
|
metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType()))
|
||||||
|
if err == nil {
|
||||||
|
metrics = append(metrics, metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func valueType(mt dto.MetricType) telegraf.ValueType {
|
||||||
|
switch mt {
|
||||||
|
case dto.MetricType_COUNTER:
|
||||||
|
return telegraf.Counter
|
||||||
|
case dto.MetricType_GAUGE:
|
||||||
|
return telegraf.Gauge
|
||||||
|
case dto.MetricType_SUMMARY:
|
||||||
|
return telegraf.Summary
|
||||||
|
case dto.MetricType_HISTOGRAM:
|
||||||
|
return telegraf.Histogram
|
||||||
|
default:
|
||||||
|
return telegraf.Untyped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Quantiles from summary metric
|
||||||
|
func makeQuantiles(m *dto.Metric) map[string]interface{} {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
for _, q := range m.GetSummary().Quantile {
|
||||||
|
if !math.IsNaN(q.GetValue()) {
|
||||||
|
fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Buckets from histogram metric
|
||||||
|
func makeBuckets(m *dto.Metric) map[string]interface{} {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
for _, b := range m.GetHistogram().Bucket {
|
||||||
|
fields[fmt.Sprint(b.GetUpperBound())] = float64(b.GetCumulativeCount())
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get labels from metric
|
||||||
|
func makeLabels(m *dto.Metric) map[string]string {
|
||||||
|
result := map[string]string{}
|
||||||
|
for _, lp := range m.Label {
|
||||||
|
result[lp.GetName()] = lp.GetValue()
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get name and value from metric
|
||||||
|
func getNameAndValue(m *dto.Metric) map[string]interface{} {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
if m.Gauge != nil {
|
||||||
|
if !math.IsNaN(m.GetGauge().GetValue()) {
|
||||||
|
fields["gauge"] = float64(m.GetGauge().GetValue())
|
||||||
|
}
|
||||||
|
} else if m.Counter != nil {
|
||||||
|
if !math.IsNaN(m.GetCounter().GetValue()) {
|
||||||
|
fields["counter"] = float64(m.GetCounter().GetValue())
|
||||||
|
}
|
||||||
|
} else if m.Untyped != nil {
|
||||||
|
if !math.IsNaN(m.GetUntyped().GetValue()) {
|
||||||
|
fields["value"] = float64(m.GetUntyped().GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get name and value from metric
|
||||||
|
func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
if m.Gauge != nil {
|
||||||
|
if !math.IsNaN(m.GetGauge().GetValue()) {
|
||||||
|
fields[metricName] = float64(m.GetGauge().GetValue())
|
||||||
|
}
|
||||||
|
} else if m.Counter != nil {
|
||||||
|
if !math.IsNaN(m.GetCounter().GetValue()) {
|
||||||
|
fields[metricName] = float64(m.GetCounter().GetValue())
|
||||||
|
}
|
||||||
|
} else if m.Untyped != nil {
|
||||||
|
if !math.IsNaN(m.GetUntyped().GetValue()) {
|
||||||
|
fields[metricName] = float64(m.GetUntyped().GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields
|
||||||
|
}
|
398
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/prometheus.go
generated
vendored
Normal file
398
vendor/github.com/influxdata/telegraf/plugins/inputs/prometheus/prometheus.go
generated
vendored
Normal file
|
@ -0,0 +1,398 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
"github.com/influxdata/telegraf/plugins/common/tls"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
|
||||||
|
|
||||||
|
type Prometheus struct {
|
||||||
|
// An array of urls to scrape metrics from.
|
||||||
|
URLs []string `toml:"urls"`
|
||||||
|
|
||||||
|
// An array of Kubernetes services to scrape metrics from.
|
||||||
|
KubernetesServices []string
|
||||||
|
|
||||||
|
// Location of kubernetes config file
|
||||||
|
KubeConfig string
|
||||||
|
|
||||||
|
// Label Selector/s for Kubernetes
|
||||||
|
KubernetesLabelSelector string `toml:"kubernetes_label_selector"`
|
||||||
|
|
||||||
|
// Field Selector/s for Kubernetes
|
||||||
|
KubernetesFieldSelector string `toml:"kubernetes_field_selector"`
|
||||||
|
|
||||||
|
// Bearer Token authorization file path
|
||||||
|
BearerToken string `toml:"bearer_token"`
|
||||||
|
BearerTokenString string `toml:"bearer_token_string"`
|
||||||
|
|
||||||
|
// Basic authentication credentials
|
||||||
|
Username string `toml:"username"`
|
||||||
|
Password string `toml:"password"`
|
||||||
|
|
||||||
|
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||||
|
|
||||||
|
MetricVersion int `toml:"metric_version"`
|
||||||
|
|
||||||
|
URLTag string `toml:"url_tag"`
|
||||||
|
|
||||||
|
tls.ClientConfig
|
||||||
|
|
||||||
|
Log telegraf.Logger
|
||||||
|
|
||||||
|
client *http.Client
|
||||||
|
|
||||||
|
// Should we scrape Kubernetes services for prometheus annotations
|
||||||
|
MonitorPods bool `toml:"monitor_kubernetes_pods"`
|
||||||
|
PodNamespace string `toml:"monitor_kubernetes_pods_namespace"`
|
||||||
|
lock sync.Mutex
|
||||||
|
kubernetesPods map[string]URLAndAddress
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## An array of urls to scrape metrics from.
|
||||||
|
urls = ["http://localhost:9100/metrics"]
|
||||||
|
|
||||||
|
## Metric version controls the mapping from Prometheus metrics into
|
||||||
|
## Telegraf metrics. When using the prometheus_client output, use the same
|
||||||
|
## value in both plugins to ensure metrics are round-tripped without
|
||||||
|
## modification.
|
||||||
|
##
|
||||||
|
## example: metric_version = 1; deprecated in 1.13
|
||||||
|
## metric_version = 2; recommended version
|
||||||
|
# metric_version = 1
|
||||||
|
|
||||||
|
## Url tag name (tag containing scrapped url. optional, default is "url")
|
||||||
|
# url_tag = "scrapeUrl"
|
||||||
|
|
||||||
|
## An array of Kubernetes services to scrape metrics from.
|
||||||
|
# kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
|
||||||
|
|
||||||
|
## Kubernetes config file to create client from.
|
||||||
|
# kube_config = "/path/to/kubernetes.config"
|
||||||
|
|
||||||
|
## Scrape Kubernetes pods for the following prometheus annotations:
|
||||||
|
## - prometheus.io/scrape: Enable scraping for this pod
|
||||||
|
## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
|
||||||
|
## set this to 'https' & most likely set the tls config.
|
||||||
|
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
|
||||||
|
## - prometheus.io/port: If port is not 9102 use this annotation
|
||||||
|
# monitor_kubernetes_pods = true
|
||||||
|
## Restricts Kubernetes monitoring to a single namespace
|
||||||
|
## ex: monitor_kubernetes_pods_namespace = "default"
|
||||||
|
# monitor_kubernetes_pods_namespace = ""
|
||||||
|
# label selector to target pods which have the label
|
||||||
|
# kubernetes_label_selector = "env=dev,app=nginx"
|
||||||
|
# field selector to target pods
|
||||||
|
# eg. To scrape pods on a specific node
|
||||||
|
# kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
|
||||||
|
|
||||||
|
## Use bearer token for authorization. ('bearer_token' takes priority)
|
||||||
|
# bearer_token = "/path/to/bearer/token"
|
||||||
|
## OR
|
||||||
|
# bearer_token_string = "abc_123"
|
||||||
|
|
||||||
|
## HTTP Basic Authentication username and password. ('bearer_token' and
|
||||||
|
## 'bearer_token_string' take priority)
|
||||||
|
# username = ""
|
||||||
|
# password = ""
|
||||||
|
|
||||||
|
## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||||
|
# response_timeout = "3s"
|
||||||
|
|
||||||
|
## Optional TLS Config
|
||||||
|
# tls_ca = /path/to/cafile
|
||||||
|
# tls_cert = /path/to/certfile
|
||||||
|
# tls_key = /path/to/keyfile
|
||||||
|
## Use TLS but skip chain & host verification
|
||||||
|
# insecure_skip_verify = false
|
||||||
|
`
|
||||||
|
|
||||||
|
func (p *Prometheus) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) Description() string {
|
||||||
|
return "Read metrics from one or many prometheus clients"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) Init() error {
|
||||||
|
if p.MetricVersion != 2 {
|
||||||
|
p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrProtocolError = errors.New("prometheus protocol error")
|
||||||
|
|
||||||
|
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
|
||||||
|
host := address
|
||||||
|
if u.Port() != "" {
|
||||||
|
host = address + ":" + u.Port()
|
||||||
|
}
|
||||||
|
reconstructedURL := &url.URL{
|
||||||
|
Scheme: u.Scheme,
|
||||||
|
Opaque: u.Opaque,
|
||||||
|
User: u.User,
|
||||||
|
Path: u.Path,
|
||||||
|
RawPath: u.RawPath,
|
||||||
|
ForceQuery: u.ForceQuery,
|
||||||
|
RawQuery: u.RawQuery,
|
||||||
|
Fragment: u.Fragment,
|
||||||
|
Host: host,
|
||||||
|
}
|
||||||
|
return reconstructedURL
|
||||||
|
}
|
||||||
|
|
||||||
|
type URLAndAddress struct {
|
||||||
|
OriginalURL *url.URL
|
||||||
|
URL *url.URL
|
||||||
|
Address string
|
||||||
|
Tags map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) {
|
||||||
|
allURLs := make(map[string]URLAndAddress, 0)
|
||||||
|
for _, u := range p.URLs {
|
||||||
|
URL, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
// loop through all pods scraped via the prometheus annotation on the pods
|
||||||
|
for k, v := range p.kubernetesPods {
|
||||||
|
allURLs[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, service := range p.KubernetesServices {
|
||||||
|
URL, err := url.Parse(service)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedAddresses, err := net.LookupHost(URL.Hostname())
|
||||||
|
if err != nil {
|
||||||
|
p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, resolved := range resolvedAddresses {
|
||||||
|
serviceURL := p.AddressToURL(URL, resolved)
|
||||||
|
allURLs[serviceURL.String()] = URLAndAddress{
|
||||||
|
URL: serviceURL,
|
||||||
|
Address: resolved,
|
||||||
|
OriginalURL: URL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allURLs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reads stats from all configured servers accumulates stats.
|
||||||
|
// Returns one of the errors encountered while gather stats (if any).
|
||||||
|
func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if p.client == nil {
|
||||||
|
client, err := p.createHTTPClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.client = client
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
allURLs, err := p.GetAllURLs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, URL := range allURLs {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(serviceURL URLAndAddress) {
|
||||||
|
defer wg.Done()
|
||||||
|
acc.AddError(p.gatherURL(serviceURL, acc))
|
||||||
|
}(URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) createHTTPClient() (*http.Client, error) {
|
||||||
|
tlsCfg, err := p.ClientConfig.TLSConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
DisableKeepAlives: true,
|
||||||
|
},
|
||||||
|
Timeout: p.ResponseTimeout.Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
|
||||||
|
var req *http.Request
|
||||||
|
var err error
|
||||||
|
var uClient *http.Client
|
||||||
|
var metrics []telegraf.Metric
|
||||||
|
if u.URL.Scheme == "unix" {
|
||||||
|
path := u.URL.Query().Get("path")
|
||||||
|
if path == "" {
|
||||||
|
path = "/metrics"
|
||||||
|
}
|
||||||
|
addr := "http://localhost" + path
|
||||||
|
req, err = http.NewRequest("GET", addr, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create new request '%s': %s", addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore error because it's been handled before getting here
|
||||||
|
tlsCfg, _ := p.ClientConfig.TLSConfig()
|
||||||
|
uClient = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: tlsCfg,
|
||||||
|
DisableKeepAlives: true,
|
||||||
|
Dial: func(network, addr string) (net.Conn, error) {
|
||||||
|
c, err := net.Dial("unix", u.URL.Path)
|
||||||
|
return c, err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Timeout: p.ResponseTimeout.Duration,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if u.URL.Path == "" {
|
||||||
|
u.URL.Path = "/metrics"
|
||||||
|
}
|
||||||
|
req, err = http.NewRequest("GET", u.URL.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create new request '%s': %s", u.URL.String(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Accept", acceptHeader)
|
||||||
|
|
||||||
|
if p.BearerToken != "" {
|
||||||
|
token, err := ioutil.ReadFile(p.BearerToken)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+string(token))
|
||||||
|
} else if p.BearerTokenString != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+p.BearerTokenString)
|
||||||
|
} else if p.Username != "" || p.Password != "" {
|
||||||
|
req.SetBasicAuth(p.Username, p.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp *http.Response
|
||||||
|
if u.URL.Scheme != "unix" {
|
||||||
|
resp, err = p.client.Do(req)
|
||||||
|
} else {
|
||||||
|
resp, err = uClient.Do(req)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.MetricVersion == 2 {
|
||||||
|
metrics, err = ParseV2(body, resp.Header)
|
||||||
|
} else {
|
||||||
|
metrics, err = Parse(body, resp.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading metrics for %s: %s",
|
||||||
|
u.URL, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range metrics {
|
||||||
|
tags := metric.Tags()
|
||||||
|
// strip user and password from URL
|
||||||
|
u.OriginalURL.User = nil
|
||||||
|
if p.URLTag != "" {
|
||||||
|
tags[p.URLTag] = u.OriginalURL.String()
|
||||||
|
}
|
||||||
|
if u.Address != "" {
|
||||||
|
tags["address"] = u.Address
|
||||||
|
}
|
||||||
|
for k, v := range u.Tags {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
switch metric.Type() {
|
||||||
|
case telegraf.Counter:
|
||||||
|
acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time())
|
||||||
|
case telegraf.Gauge:
|
||||||
|
acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time())
|
||||||
|
case telegraf.Summary:
|
||||||
|
acc.AddSummary(metric.Name(), metric.Fields(), tags, metric.Time())
|
||||||
|
case telegraf.Histogram:
|
||||||
|
acc.AddHistogram(metric.Name(), metric.Fields(), tags, metric.Time())
|
||||||
|
default:
|
||||||
|
acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start will start the Kubernetes scraping if enabled in the configuration
|
||||||
|
func (p *Prometheus) Start(a telegraf.Accumulator) error {
|
||||||
|
if p.MonitorPods {
|
||||||
|
var ctx context.Context
|
||||||
|
ctx, p.cancel = context.WithCancel(context.Background())
|
||||||
|
return p.start(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prometheus) Stop() {
|
||||||
|
if p.MonitorPods {
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
p.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("prometheus", func() telegraf.Input {
|
||||||
|
return &Prometheus{
|
||||||
|
ResponseTimeout: internal.Duration{Duration: time.Second * 3},
|
||||||
|
kubernetesPods: map[string]URLAndAddress{},
|
||||||
|
URLTag: "url",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,744 +0,0 @@
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
lastID uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
func newTrackingID() telegraf.TrackingID {
|
|
||||||
id := atomic.AddUint64(&lastID, 1)
|
|
||||||
return telegraf.TrackingID(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metric defines a single point measurement
|
|
||||||
type Metric struct {
|
|
||||||
Measurement string
|
|
||||||
Tags map[string]string
|
|
||||||
Fields map[string]interface{}
|
|
||||||
Time time.Time
|
|
||||||
Type telegraf.ValueType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Metric) String() string {
|
|
||||||
return fmt.Sprintf("%s %v %v", p.Measurement, p.Tags, p.Fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulator defines a mocked out accumulator
|
|
||||||
type Accumulator struct {
|
|
||||||
sync.Mutex
|
|
||||||
*sync.Cond
|
|
||||||
|
|
||||||
Metrics []*Metric
|
|
||||||
nMetrics uint64
|
|
||||||
Discard bool
|
|
||||||
Errors []error
|
|
||||||
debug bool
|
|
||||||
delivered chan telegraf.DeliveryInfo
|
|
||||||
|
|
||||||
TimeFunc func() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) NMetrics() uint64 {
|
|
||||||
return atomic.LoadUint64(&a.nMetrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) GetTelegrafMetrics() []telegraf.Metric {
|
|
||||||
metrics := []telegraf.Metric{}
|
|
||||||
for _, m := range a.Metrics {
|
|
||||||
metrics = append(metrics, FromTestMetric(m))
|
|
||||||
}
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) FirstError() error {
|
|
||||||
if len(a.Errors) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return a.Errors[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) ClearMetrics() {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
atomic.StoreUint64(&a.nMetrics, 0)
|
|
||||||
a.Metrics = make([]*Metric, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) addFields(
|
|
||||||
measurement string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tp telegraf.ValueType,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
atomic.AddUint64(&a.nMetrics, 1)
|
|
||||||
if a.Cond != nil {
|
|
||||||
a.Cond.Broadcast()
|
|
||||||
}
|
|
||||||
if a.Discard {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fields) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tagsCopy := map[string]string{}
|
|
||||||
for k, v := range tags {
|
|
||||||
tagsCopy[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldsCopy := map[string]interface{}{}
|
|
||||||
for k, v := range fields {
|
|
||||||
fieldsCopy[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
var t time.Time
|
|
||||||
if len(timestamp) > 0 {
|
|
||||||
t = timestamp[0]
|
|
||||||
} else {
|
|
||||||
t = time.Now()
|
|
||||||
if a.TimeFunc == nil {
|
|
||||||
t = time.Now()
|
|
||||||
} else {
|
|
||||||
t = a.TimeFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.debug {
|
|
||||||
pretty, _ := json.MarshalIndent(fields, "", " ")
|
|
||||||
prettyTags, _ := json.MarshalIndent(tags, "", " ")
|
|
||||||
msg := fmt.Sprintf("Adding Measurement [%s]\nFields:%s\nTags:%s\n",
|
|
||||||
measurement, string(pretty), string(prettyTags))
|
|
||||||
fmt.Print(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Metric{
|
|
||||||
Measurement: measurement,
|
|
||||||
Fields: fieldsCopy,
|
|
||||||
Tags: tagsCopy,
|
|
||||||
Time: t,
|
|
||||||
Type: tp,
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Metrics = append(a.Metrics, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddFields adds a measurement point with a specified timestamp.
|
|
||||||
func (a *Accumulator) AddFields(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.addFields(measurement, tags, fields, telegraf.Untyped, timestamp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddCounter(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.addFields(measurement, tags, fields, telegraf.Counter, timestamp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddGauge(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.addFields(measurement, tags, fields, telegraf.Gauge, timestamp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) {
|
|
||||||
for _, m := range metrics {
|
|
||||||
a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddSummary(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.addFields(measurement, tags, fields, telegraf.Summary, timestamp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddHistogram(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
timestamp ...time.Time,
|
|
||||||
) {
|
|
||||||
a.addFields(measurement, tags, fields, telegraf.Histogram, timestamp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddMetric(m telegraf.Metric) {
|
|
||||||
a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID {
|
|
||||||
a.AddMetric(m)
|
|
||||||
return newTrackingID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID {
|
|
||||||
for _, m := range group {
|
|
||||||
a.AddMetric(m)
|
|
||||||
}
|
|
||||||
return newTrackingID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) Delivered() <-chan telegraf.DeliveryInfo {
|
|
||||||
a.Lock()
|
|
||||||
if a.delivered == nil {
|
|
||||||
a.delivered = make(chan telegraf.DeliveryInfo)
|
|
||||||
}
|
|
||||||
a.Unlock()
|
|
||||||
return a.delivered
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddError appends the given error to Accumulator.Errors.
|
|
||||||
func (a *Accumulator) AddError(err error) {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.Lock()
|
|
||||||
a.Errors = append(a.Errors, err)
|
|
||||||
if a.Cond != nil {
|
|
||||||
a.Cond.Broadcast()
|
|
||||||
}
|
|
||||||
a.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) SetPrecision(precision time.Duration) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) DisablePrecision() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) Debug() bool {
|
|
||||||
// stub for implementing Accumulator interface.
|
|
||||||
return a.debug
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) SetDebug(debug bool) {
|
|
||||||
// stub for implementing Accumulator interface.
|
|
||||||
a.debug = debug
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the specified measurement point from the accumulator
|
|
||||||
func (a *Accumulator) Get(measurement string) (*Metric, bool) {
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
return p, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) HasTag(measurement string, key string) bool {
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
_, ok := p.Tags[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) TagSetValue(measurement string, key string) string {
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
v, ok := p.Tags[key]
|
|
||||||
if ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) TagValue(measurement string, key string) string {
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
v, ok := p.Tags[key]
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calls the given Gather function and returns the first error found.
|
|
||||||
func (a *Accumulator) GatherError(gf func(telegraf.Accumulator) error) error {
|
|
||||||
if err := gf(a); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(a.Errors) > 0 {
|
|
||||||
return a.Errors[0]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NFields returns the total number of fields in the accumulator, across all
|
|
||||||
// measurements
|
|
||||||
func (a *Accumulator) NFields() int {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
counter := 0
|
|
||||||
for _, pt := range a.Metrics {
|
|
||||||
for range pt.Fields {
|
|
||||||
counter++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return counter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait waits for the given number of metrics to be added to the accumulator.
|
|
||||||
func (a *Accumulator) Wait(n int) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
if a.Cond == nil {
|
|
||||||
a.Cond = sync.NewCond(&a.Mutex)
|
|
||||||
}
|
|
||||||
for int(a.NMetrics()) < n {
|
|
||||||
a.Cond.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitError waits for the given number of errors to be added to the accumulator.
|
|
||||||
func (a *Accumulator) WaitError(n int) {
|
|
||||||
a.Lock()
|
|
||||||
if a.Cond == nil {
|
|
||||||
a.Cond = sync.NewCond(&a.Mutex)
|
|
||||||
}
|
|
||||||
for len(a.Errors) < n {
|
|
||||||
a.Cond.Wait()
|
|
||||||
}
|
|
||||||
a.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AssertContainsTaggedFields(
|
|
||||||
t *testing.T,
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if !reflect.DeepEqual(tags, p.Tags) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We've failed. spit out some debug logging
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
t.Log("measurement", p.Measurement, "tags", p.Tags, "fields", p.Fields)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
msg := fmt.Sprintf("unknown measurement %q with tags %v", measurement, tags)
|
|
||||||
assert.Fail(t, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AssertDoesNotContainsTaggedFields(
|
|
||||||
t *testing.T,
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if !reflect.DeepEqual(tags, p.Tags) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) {
|
|
||||||
msg := fmt.Sprintf(
|
|
||||||
"found measurement %s with tagged fields (tags %v) which should not be there",
|
|
||||||
measurement, tags)
|
|
||||||
assert.Fail(t, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AssertContainsFields(
|
|
||||||
t *testing.T,
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
assert.Equal(t, fields, p.Fields)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
msg := fmt.Sprintf("unknown measurement %q", measurement)
|
|
||||||
assert.Fail(t, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) HasPoint(
|
|
||||||
measurement string,
|
|
||||||
tags map[string]string,
|
|
||||||
fieldKey string,
|
|
||||||
fieldValue interface{},
|
|
||||||
) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement != measurement {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(tags, p.Tags) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v, ok := p.Fields[fieldKey]
|
|
||||||
if ok && reflect.DeepEqual(v, fieldValue) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement string) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
msg := fmt.Sprintf("found unexpected measurement %s", measurement)
|
|
||||||
assert.Fail(t, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasTimestamp returns true if the measurement has a matching Time value
|
|
||||||
func (a *Accumulator) HasTimestamp(measurement string, timestamp time.Time) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
return timestamp.Equal(p.Time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasField returns true if the given measurement has a field with the given
|
|
||||||
// name
|
|
||||||
func (a *Accumulator) HasField(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
if _, ok := p.Fields[field]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasIntField returns true if the measurement has an Int value
|
|
||||||
func (a *Accumulator) HasIntField(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(int)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasInt64Field returns true if the measurement has an Int64 value
|
|
||||||
func (a *Accumulator) HasInt64Field(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(int64)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasInt32Field returns true if the measurement has an Int value
|
|
||||||
func (a *Accumulator) HasInt32Field(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(int32)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasStringField returns true if the measurement has an String value
|
|
||||||
func (a *Accumulator) HasStringField(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(string)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasUIntField returns true if the measurement has a UInt value
|
|
||||||
func (a *Accumulator) HasUIntField(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(uint64)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasFloatField returns true if the given measurement has a float value
|
|
||||||
func (a *Accumulator) HasFloatField(measurement string, field string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
_, ok := value.(float64)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasMeasurement returns true if the accumulator has a measurement with the
|
|
||||||
// given name
|
|
||||||
func (a *Accumulator) HasMeasurement(measurement string) bool {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntField returns the int value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) IntField(measurement string, field string) (int, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(int)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Field returns the int64 value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) Int64Field(measurement string, field string) (int64, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(int64)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Field returns the int64 value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) Uint64Field(measurement string, field string) (uint64, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(uint64)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Field returns the int32 value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) Int32Field(measurement string, field string) (int32, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(int32)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// FloatField returns the float64 value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) FloatField(measurement string, field string) (float64, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(float64)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0.0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringField returns the string value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) StringField(measurement string, field string) (string, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(string)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolField returns the bool value of the given measurement and field or false.
|
|
||||||
func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
for _, p := range a.Metrics {
|
|
||||||
if p.Measurement == measurement {
|
|
||||||
for fieldname, value := range p.Fields {
|
|
||||||
if fieldname == field {
|
|
||||||
v, ok := value.(bool)
|
|
||||||
return v, ok
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NopAccumulator is used for benchmarking to isolate the plugin from the internal
|
|
||||||
// telegraf accumulator machinery.
|
|
||||||
type NopAccumulator struct{}
|
|
||||||
|
|
||||||
func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
|
|
||||||
}
|
|
||||||
func (n *NopAccumulator) AddGauge(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
|
|
||||||
}
|
|
||||||
func (n *NopAccumulator) AddCounter(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
|
|
||||||
}
|
|
||||||
func (n *NopAccumulator) AddSummary(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
|
|
||||||
}
|
|
||||||
func (n *NopAccumulator) AddHistogram(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {
|
|
||||||
}
|
|
||||||
func (n *NopAccumulator) AddMetric(telegraf.Metric) {}
|
|
||||||
func (n *NopAccumulator) SetPrecision(precision time.Duration) {}
|
|
||||||
func (n *NopAccumulator) AddError(err error) {}
|
|
||||||
func (n *NopAccumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { return nil }
|
|
|
@ -1,54 +0,0 @@
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ telegraf.Logger = &Logger{}
|
|
||||||
|
|
||||||
// Logger defines a logging structure for plugins.
|
|
||||||
type Logger struct {
|
|
||||||
Name string // Name is the plugin name, will be printed in the `[]`.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf logs an error message, patterned after log.Printf.
|
|
||||||
func (l Logger) Errorf(format string, args ...interface{}) {
|
|
||||||
log.Printf("E! ["+l.Name+"] "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs an error message, patterned after log.Print.
|
|
||||||
func (l Logger) Error(args ...interface{}) {
|
|
||||||
log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf logs a debug message, patterned after log.Printf.
|
|
||||||
func (l Logger) Debugf(format string, args ...interface{}) {
|
|
||||||
log.Printf("D! ["+l.Name+"] "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug logs a debug message, patterned after log.Print.
|
|
||||||
func (l Logger) Debug(args ...interface{}) {
|
|
||||||
log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf logs a warning message, patterned after log.Printf.
|
|
||||||
func (l Logger) Warnf(format string, args ...interface{}) {
|
|
||||||
log.Printf("W! ["+l.Name+"] "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn logs a warning message, patterned after log.Print.
|
|
||||||
func (l Logger) Warn(args ...interface{}) {
|
|
||||||
log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infof logs an information message, patterned after log.Printf.
|
|
||||||
func (l Logger) Infof(format string, args ...interface{}) {
|
|
||||||
log.Printf("I! ["+l.Name+"] "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info logs an information message, patterned after log.Print.
|
|
||||||
func (l Logger) Info(args ...interface{}) {
|
|
||||||
log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...)
|
|
||||||
}
|
|
|
@ -1,205 +0,0 @@
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/metric"
|
|
||||||
)
|
|
||||||
|
|
||||||
type metricDiff struct {
|
|
||||||
Measurement string
|
|
||||||
Tags []*telegraf.Tag
|
|
||||||
Fields []*telegraf.Field
|
|
||||||
Type telegraf.ValueType
|
|
||||||
Time time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func lessFunc(lhs, rhs *metricDiff) bool {
|
|
||||||
if lhs.Measurement != rhs.Measurement {
|
|
||||||
return lhs.Measurement < rhs.Measurement
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i >= len(lhs.Tags) && i >= len(rhs.Tags) {
|
|
||||||
break
|
|
||||||
} else if i >= len(lhs.Tags) {
|
|
||||||
return true
|
|
||||||
} else if i >= len(rhs.Tags) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if lhs.Tags[i].Key != rhs.Tags[i].Key {
|
|
||||||
return lhs.Tags[i].Key < rhs.Tags[i].Key
|
|
||||||
}
|
|
||||||
if lhs.Tags[i].Value != rhs.Tags[i].Value {
|
|
||||||
return lhs.Tags[i].Value < rhs.Tags[i].Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i >= len(lhs.Fields) && i >= len(rhs.Fields) {
|
|
||||||
break
|
|
||||||
} else if i >= len(lhs.Fields) {
|
|
||||||
return true
|
|
||||||
} else if i >= len(rhs.Fields) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if lhs.Fields[i].Key != rhs.Fields[i].Key {
|
|
||||||
return lhs.Fields[i].Key < rhs.Fields[i].Key
|
|
||||||
}
|
|
||||||
|
|
||||||
if lhs.Fields[i].Value != rhs.Fields[i].Value {
|
|
||||||
ltype := reflect.TypeOf(lhs.Fields[i].Value)
|
|
||||||
rtype := reflect.TypeOf(lhs.Fields[i].Value)
|
|
||||||
|
|
||||||
if ltype.Kind() != rtype.Kind() {
|
|
||||||
return ltype.Kind() < rtype.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := lhs.Fields[i].Value.(type) {
|
|
||||||
case int64:
|
|
||||||
return v < lhs.Fields[i].Value.(int64)
|
|
||||||
case uint64:
|
|
||||||
return v < lhs.Fields[i].Value.(uint64)
|
|
||||||
case float64:
|
|
||||||
return v < lhs.Fields[i].Value.(float64)
|
|
||||||
case string:
|
|
||||||
return v < lhs.Fields[i].Value.(string)
|
|
||||||
case bool:
|
|
||||||
return !v
|
|
||||||
default:
|
|
||||||
panic("unknown type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lhs.Type != rhs.Type {
|
|
||||||
return lhs.Type < rhs.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
if lhs.Time.UnixNano() != rhs.Time.UnixNano() {
|
|
||||||
return lhs.Time.UnixNano() < rhs.Time.UnixNano()
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMetricDiff(metric telegraf.Metric) *metricDiff {
|
|
||||||
if metric == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &metricDiff{}
|
|
||||||
m.Measurement = metric.Name()
|
|
||||||
|
|
||||||
for _, tag := range metric.TagList() {
|
|
||||||
m.Tags = append(m.Tags, tag)
|
|
||||||
}
|
|
||||||
sort.Slice(m.Tags, func(i, j int) bool {
|
|
||||||
return m.Tags[i].Key < m.Tags[j].Key
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, field := range metric.FieldList() {
|
|
||||||
m.Fields = append(m.Fields, field)
|
|
||||||
}
|
|
||||||
sort.Slice(m.Fields, func(i, j int) bool {
|
|
||||||
return m.Fields[i].Key < m.Fields[j].Key
|
|
||||||
})
|
|
||||||
|
|
||||||
m.Type = metric.Type()
|
|
||||||
m.Time = metric.Time()
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// SortMetrics enables sorting metrics before comparison.
|
|
||||||
func SortMetrics() cmp.Option {
|
|
||||||
return cmpopts.SortSlices(lessFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreTime disables comparison of timestamp.
|
|
||||||
func IgnoreTime() cmp.Option {
|
|
||||||
return cmpopts.IgnoreFields(metricDiff{}, "Time")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetricEqual returns true if the metrics are equal.
|
|
||||||
func MetricEqual(expected, actual telegraf.Metric, opts ...cmp.Option) bool {
|
|
||||||
var lhs, rhs *metricDiff
|
|
||||||
if expected != nil {
|
|
||||||
lhs = newMetricDiff(expected)
|
|
||||||
}
|
|
||||||
if actual != nil {
|
|
||||||
rhs = newMetricDiff(actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(opts, cmpopts.EquateNaNs())
|
|
||||||
return cmp.Equal(lhs, rhs, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequireMetricEqual halts the test with an error if the metrics are not
|
|
||||||
// equal.
|
|
||||||
func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric, opts ...cmp.Option) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
var lhs, rhs *metricDiff
|
|
||||||
if expected != nil {
|
|
||||||
lhs = newMetricDiff(expected)
|
|
||||||
}
|
|
||||||
if actual != nil {
|
|
||||||
rhs = newMetricDiff(actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(opts, cmpopts.EquateNaNs())
|
|
||||||
if diff := cmp.Diff(lhs, rhs, opts...); diff != "" {
|
|
||||||
t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequireMetricsEqual halts the test with an error if the array of metrics
|
|
||||||
// are not equal.
|
|
||||||
func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric, opts ...cmp.Option) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
lhs := make([]*metricDiff, 0, len(expected))
|
|
||||||
for _, m := range expected {
|
|
||||||
lhs = append(lhs, newMetricDiff(m))
|
|
||||||
}
|
|
||||||
rhs := make([]*metricDiff, 0, len(actual))
|
|
||||||
for _, m := range actual {
|
|
||||||
rhs = append(rhs, newMetricDiff(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(opts, cmpopts.EquateNaNs())
|
|
||||||
if diff := cmp.Diff(lhs, rhs, opts...); diff != "" {
|
|
||||||
t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metric creates a new metric or panics on error.
|
|
||||||
func MustMetric(
|
|
||||||
name string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tm time.Time,
|
|
||||||
tp ...telegraf.ValueType,
|
|
||||||
) telegraf.Metric {
|
|
||||||
m, err := metric.New(name, tags, fields, tm, tp...)
|
|
||||||
if err != nil {
|
|
||||||
panic("MustMetric")
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func FromTestMetric(met *Metric) telegraf.Metric {
|
|
||||||
m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type)
|
|
||||||
if err != nil {
|
|
||||||
panic("MustMetric")
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/metric"
|
|
||||||
)
|
|
||||||
|
|
||||||
var localhost = "localhost"
|
|
||||||
|
|
||||||
// GetLocalHost returns the DOCKER_HOST environment variable, parsing
|
|
||||||
// out any scheme or ports so that only the IP address is returned.
|
|
||||||
func GetLocalHost() string {
|
|
||||||
if dockerHostVar := os.Getenv("DOCKER_HOST"); dockerHostVar != "" {
|
|
||||||
u, err := url.Parse(dockerHostVar)
|
|
||||||
if err != nil {
|
|
||||||
return dockerHostVar
|
|
||||||
}
|
|
||||||
|
|
||||||
// split out the ip addr from the port
|
|
||||||
host, _, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
return dockerHostVar
|
|
||||||
}
|
|
||||||
|
|
||||||
return host
|
|
||||||
}
|
|
||||||
return localhost
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockMetrics returns a mock []telegraf.Metric object for using in unit tests
|
|
||||||
// of telegraf output sinks.
|
|
||||||
func MockMetrics() []telegraf.Metric {
|
|
||||||
metrics := make([]telegraf.Metric, 0)
|
|
||||||
// Create a new point batch
|
|
||||||
metrics = append(metrics, TestMetric(1.0))
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestMetric Returns a simple test point:
|
|
||||||
// measurement -> "test1" or name
|
|
||||||
// tags -> "tag1":"value1"
|
|
||||||
// value -> value
|
|
||||||
// time -> time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
|
||||||
func TestMetric(value interface{}, name ...string) telegraf.Metric {
|
|
||||||
if value == nil {
|
|
||||||
panic("Cannot use a nil value")
|
|
||||||
}
|
|
||||||
measurement := "test1"
|
|
||||||
if len(name) > 0 {
|
|
||||||
measurement = name[0]
|
|
||||||
}
|
|
||||||
tags := map[string]string{"tag1": "value1"}
|
|
||||||
pt, _ := metric.New(
|
|
||||||
measurement,
|
|
||||||
tags,
|
|
||||||
map[string]interface{}{"value": value},
|
|
||||||
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
|
||||||
)
|
|
||||||
return pt
|
|
||||||
}
|
|
|
@ -1,101 +0,0 @@
|
||||||
package testutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/common/tls"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pki struct {
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPKI(path string) *pki {
|
|
||||||
return &pki{path: path}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) TLSClientConfig() *tls.ClientConfig {
|
|
||||||
return &tls.ClientConfig{
|
|
||||||
TLSCA: p.CACertPath(),
|
|
||||||
TLSCert: p.ClientCertPath(),
|
|
||||||
TLSKey: p.ClientKeyPath(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) TLSServerConfig() *tls.ServerConfig {
|
|
||||||
return &tls.ServerConfig{
|
|
||||||
TLSAllowedCACerts: []string{p.CACertPath()},
|
|
||||||
TLSCert: p.ServerCertPath(),
|
|
||||||
TLSKey: p.ServerKeyPath(),
|
|
||||||
TLSCipherSuites: []string{p.CipherSuite()},
|
|
||||||
TLSMinVersion: p.TLSMinVersion(),
|
|
||||||
TLSMaxVersion: p.TLSMaxVersion(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ReadCACert() string {
|
|
||||||
return readCertificate(p.CACertPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) CACertPath() string {
|
|
||||||
return path.Join(p.path, "cacert.pem")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) CipherSuite() string {
|
|
||||||
return "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) TLSMinVersion() string {
|
|
||||||
return "TLS11"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) TLSMaxVersion() string {
|
|
||||||
return "TLS12"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ReadClientCert() string {
|
|
||||||
return readCertificate(p.ClientCertPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ClientCertPath() string {
|
|
||||||
return path.Join(p.path, "clientcert.pem")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ReadClientKey() string {
|
|
||||||
return readCertificate(p.ClientKeyPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ClientKeyPath() string {
|
|
||||||
return path.Join(p.path, "clientkey.pem")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ReadServerCert() string {
|
|
||||||
return readCertificate(p.ServerCertPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ServerCertPath() string {
|
|
||||||
return path.Join(p.path, "servercert.pem")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ReadServerKey() string {
|
|
||||||
return readCertificate(p.ServerKeyPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pki) ServerKeyPath() string {
|
|
||||||
return path.Join(p.path, "serverkey.pem")
|
|
||||||
}
|
|
||||||
|
|
||||||
func readCertificate(filename string) string {
|
|
||||||
file, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("opening %q: %v", filename, err))
|
|
||||||
}
|
|
||||||
octets, err := ioutil.ReadAll(file)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("reading %q: %v", filename, err))
|
|
||||||
}
|
|
||||||
return string(octets)
|
|
||||||
}
|
|
|
@ -116,13 +116,6 @@ github.com/golang/protobuf/ptypes/timestamp
|
||||||
github.com/golang/protobuf/ptypes/wrappers
|
github.com/golang/protobuf/ptypes/wrappers
|
||||||
# github.com/golang/snappy v0.0.1
|
# github.com/golang/snappy v0.0.1
|
||||||
github.com/golang/snappy
|
github.com/golang/snappy
|
||||||
# github.com/google/go-cmp v0.5.2
|
|
||||||
github.com/google/go-cmp/cmp
|
|
||||||
github.com/google/go-cmp/cmp/cmpopts
|
|
||||||
github.com/google/go-cmp/cmp/internal/diff
|
|
||||||
github.com/google/go-cmp/cmp/internal/flags
|
|
||||||
github.com/google/go-cmp/cmp/internal/function
|
|
||||||
github.com/google/go-cmp/cmp/internal/value
|
|
||||||
# github.com/google/go-github/v32 v32.1.0
|
# github.com/google/go-github/v32 v32.1.0
|
||||||
github.com/google/go-github/v32/github
|
github.com/google/go-github/v32/github
|
||||||
# github.com/google/go-querystring v1.0.0
|
# github.com/google/go-querystring v1.0.0
|
||||||
|
@ -167,14 +160,16 @@ github.com/influxdata/telegraf/metric
|
||||||
github.com/influxdata/telegraf/plugins/common/tls
|
github.com/influxdata/telegraf/plugins/common/tls
|
||||||
github.com/influxdata/telegraf/plugins/inputs
|
github.com/influxdata/telegraf/plugins/inputs
|
||||||
github.com/influxdata/telegraf/plugins/inputs/elasticsearch
|
github.com/influxdata/telegraf/plugins/inputs/elasticsearch
|
||||||
|
github.com/influxdata/telegraf/plugins/inputs/github
|
||||||
|
github.com/influxdata/telegraf/plugins/inputs/mongodb
|
||||||
github.com/influxdata/telegraf/plugins/inputs/mysql
|
github.com/influxdata/telegraf/plugins/inputs/mysql
|
||||||
github.com/influxdata/telegraf/plugins/inputs/mysql/v1
|
github.com/influxdata/telegraf/plugins/inputs/mysql/v1
|
||||||
github.com/influxdata/telegraf/plugins/inputs/mysql/v2
|
github.com/influxdata/telegraf/plugins/inputs/mysql/v2
|
||||||
github.com/influxdata/telegraf/plugins/inputs/nginx
|
github.com/influxdata/telegraf/plugins/inputs/nginx
|
||||||
|
github.com/influxdata/telegraf/plugins/inputs/prometheus
|
||||||
github.com/influxdata/telegraf/plugins/inputs/redis
|
github.com/influxdata/telegraf/plugins/inputs/redis
|
||||||
github.com/influxdata/telegraf/plugins/parsers/json
|
github.com/influxdata/telegraf/plugins/parsers/json
|
||||||
github.com/influxdata/telegraf/selfstat
|
github.com/influxdata/telegraf/selfstat
|
||||||
github.com/influxdata/telegraf/testutil
|
|
||||||
# github.com/jcmturner/gofork v1.0.0
|
# github.com/jcmturner/gofork v1.0.0
|
||||||
github.com/jcmturner/gofork/encoding/asn1
|
github.com/jcmturner/gofork/encoding/asn1
|
||||||
github.com/jcmturner/gofork/x/crypto/pbkdf2
|
github.com/jcmturner/gofork/x/crypto/pbkdf2
|
||||||
|
|
Loading…
Reference in New Issue