新增zookeeper,tengine采集 (#574)

* add prober plugin for elasticsearch

* 新增zookeeper,tengine插件,补齐了prober采集插件的测试

* 添加zookeeper插件描述

Co-authored-by: lynxcat <lynxcatdeng@gmail.com>
This commit is contained in:
lynxcat 2021-02-03 14:43:39 +08:00 committed by GitHub
parent c48d8b93dd
commit 2d9bc50401
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
129 changed files with 4249 additions and 2900 deletions

1
etc/plugins/tengine.yml Normal file
View File

@ -0,0 +1 @@
mode: all # whitelist(default),all

View File

@ -0,0 +1 @@
mode: all # whitelist(default),all

7
go.mod
View File

@ -3,7 +3,7 @@ module github.com/didi/nightingale
go 1.12 go 1.12
require ( require (
github.com/Shopify/sarama v1.27.1 github.com/Shopify/sarama v1.27.2
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/codegangsta/negroni v1.0.0 github.com/codegangsta/negroni v1.0.0
github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-oidc v2.2.1+incompatible
@ -13,13 +13,14 @@ require (
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
github.com/gin-contrib/pprof v1.3.0 github.com/gin-contrib/pprof v1.3.0
github.com/gin-gonic/gin v1.6.3 github.com/gin-gonic/gin v1.6.3
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 // indirect
github.com/go-sql-driver/mysql v1.5.0 github.com/go-sql-driver/mysql v1.5.0
github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f github.com/google/uuid v1.1.2
github.com/gorilla/mux v1.7.3 github.com/gorilla/mux v1.7.3
github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru v0.5.4
github.com/hpcloud/tail v1.0.0 github.com/hpcloud/tail v1.0.0
github.com/influxdata/influxdb v1.8.0 github.com/influxdata/influxdb v1.8.0
github.com/influxdata/telegraf v1.16.2 github.com/influxdata/telegraf v1.17.2
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 // indirect github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 // indirect
github.com/m3db/m3 v0.15.17 github.com/m3db/m3 v0.15.17

25
go.sum
View File

@ -90,6 +90,8 @@ github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8= github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8=
github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II=
github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc=
github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
@ -115,6 +117,7 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/apex/log v1.3.0 h1:1fyfbPvUwD10nMoh3hY6MXzvZShJQn9/ck7ATgAt5pA= github.com/apex/log v1.3.0 h1:1fyfbPvUwD10nMoh3hY6MXzvZShJQn9/ck7ATgAt5pA=
github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs= github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs=
@ -351,6 +354,7 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
@ -398,6 +402,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -416,6 +421,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
@ -439,6 +445,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s=
github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
@ -462,6 +470,7 @@ github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@ -473,6 +482,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.14.1 h1:YuM9SXYy583fxvSOkzCDyBPCtY+/IMSHEG1dKFMLZsA= github.com/grpc-ecosystem/grpc-gateway v1.14.1 h1:YuM9SXYy583fxvSOkzCDyBPCtY+/IMSHEG1dKFMLZsA=
github.com/grpc-ecosystem/grpc-gateway v1.14.1/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/grpc-ecosystem/grpc-gateway v1.14.1/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ=
github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
@ -550,6 +560,8 @@ github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/telegraf v1.16.2 h1:G988b0+CL2IVDft9V2ZUteKgnbp+eI7vtQ0liPhQKxw= github.com/influxdata/telegraf v1.16.2 h1:G988b0+CL2IVDft9V2ZUteKgnbp+eI7vtQ0liPhQKxw=
github.com/influxdata/telegraf v1.16.2/go.mod h1:LZ/6hlf60cwqGr8phfbRKf8x1HoAoqxoMpTp/iqcNXk= github.com/influxdata/telegraf v1.16.2/go.mod h1:LZ/6hlf60cwqGr8phfbRKf8x1HoAoqxoMpTp/iqcNXk=
github.com/influxdata/telegraf v1.17.2 h1:SfwhXtAbZeX4y56wLg3Ku5d88e2lmcBVIh7k2bg++84=
github.com/influxdata/telegraf v1.17.2/go.mod h1:lBWJl18yjThBjYAHnHeWFE3nI6mb4dTUUWB9HQFYOWs=
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY=
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
@ -599,6 +611,7 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -702,6 +715,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885/go.mod h1:wRyVMWiOZeVj+MieWS5tIBBtJ3RtqqMbPsA5Z+t5b5U= github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885/go.mod h1:wRyVMWiOZeVj+MieWS5tIBBtJ3RtqqMbPsA5Z+t5b5U=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw=
github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA=
github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
@ -753,9 +767,11 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
@ -863,6 +879,7 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2/go.mod h1:ZnfuiMn3LNsry2q7ECmRe4WcscxmJSd2dIFpOi4w3lM= github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2/go.mod h1:ZnfuiMn3LNsry2q7ECmRe4WcscxmJSd2dIFpOi4w3lM=
github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -872,6 +889,7 @@ github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNC
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rhysd/go-github-selfupdate v1.2.2/go.mod h1:khesvSyKcXDUxeySCedFh621iawCks0dS/QnHPcpCws= github.com/rhysd/go-github-selfupdate v1.2.2/go.mod h1:khesvSyKcXDUxeySCedFh621iawCks0dS/QnHPcpCws=
github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ=
github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 h1:pyecQtsPmlkCsMkYhT5iZ+sUXuwee+OvfuJjinEA3ko= github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 h1:pyecQtsPmlkCsMkYhT5iZ+sUXuwee+OvfuJjinEA3ko=
github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT59RWatovFwnwocoUxiI/eENTnOY5GK3STuY= github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT59RWatovFwnwocoUxiI/eENTnOY5GK3STuY=
github.com/robskillington/gorename v0.0.0-20180424020013-52c7307cddd2/go.mod h1:CVTJ4xwzb/4H98jrd7NFgNoTAiL63scr2Pl7kqOcQAQ= github.com/robskillington/gorename v0.0.0-20180424020013-52c7307cddd2/go.mod h1:CVTJ4xwzb/4H98jrd7NFgNoTAiL63scr2Pl7kqOcQAQ=
@ -1019,6 +1037,7 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo=
github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo=
github.com/vmihailenco/msgpack v2.8.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v2.8.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU=
@ -1150,6 +1169,7 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -1318,6 +1338,8 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1336,6 +1358,8 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
@ -1393,6 +1417,7 @@ gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQb
gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1 h1:1IZMbdoz1SZAQ4HMRwAP0FPSyXt7ywsiJ4q7OPTEu4A= gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1 h1:1IZMbdoz1SZAQ4HMRwAP0FPSyXt7ywsiJ4q7OPTEu4A=
gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=

View File

@ -11,6 +11,8 @@ import (
_ "github.com/didi/nightingale/src/modules/monapi/plugins/redis" _ "github.com/didi/nightingale/src/modules/monapi/plugins/redis"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/nginx" _ "github.com/didi/nightingale/src/modules/monapi/plugins/nginx"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/elasticsearch" _ "github.com/didi/nightingale/src/modules/monapi/plugins/elasticsearch"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/tengine"
_ "github.com/didi/nightingale/src/modules/monapi/plugins/zookeeper"
// local // local
_ "github.com/didi/nightingale/src/modules/monapi/plugins/log" _ "github.com/didi/nightingale/src/modules/monapi/plugins/log"

View File

@ -7,7 +7,6 @@ import (
"github.com/didi/nightingale/src/toolkits/i18n" "github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/elasticsearch" "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
"reflect"
"time" "time"
) )
@ -33,21 +32,21 @@ var (
"zh": map[string]string{ "zh": map[string]string{
"Servers": "服务", "Servers": "服务",
"specify a list of one or more Elasticsearch servers <br /> you can add username and password to your url to use basic authentication: <br /> servers = [http://user:pass@localhost:9200]": "通过URL设置指定服务器<br />可以配置用户名和密码,格式如[http://user:pass@localhost:9200]<br />配置详情可参考 https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch", "specify a list of one or more Elasticsearch servers <br /> you can add username and password to your url to use basic authentication: <br /> servers = [http://user:pass@localhost:9200]": "通过URL设置指定服务器<br />可以配置用户名和密码,格式如[http://user:pass@localhost:9200]<br />配置详情可参考 https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch",
"Local": "是否本地", "Local": "是否本地",
"When local is true (the default), the node will read only its own stats.<br /> Set local to false when you want to read the node stats from all nodes<br /> of the cluster.": "默认为true,如要监控整个集群需设置为false", "When local is true (the default), the node will read only its own stats.<br /> Set local to false when you want to read the node stats from all nodes<br /> of the cluster.": "默认为true,如要监控整个集群需设置为false",
"HTTP timeout": "请求超时时间", "HTTP timeout": "请求超时时间",
"Timeout for HTTP requests": "http请求超时时间, 单位: 秒", "Timeout for HTTP requests": "http请求超时时间, 单位: 秒",
"ClusterHealth": "集群健康状态", "ClusterHealth": "集群健康状态",
"Set cluster_health to true when you want to obtain cluster health stats": "是否获取集群健康状况统计信息", "Set cluster_health to true when you want to obtain cluster health stats": "是否获取集群健康状况统计信息",
"ClusterHealthLevel": "健康状况等级", "ClusterHealthLevel": "健康状况等级",
"Adjust cluster_health_level when you want to obtain detailed health stats <br />The options are<br /> - indices (default)<br /> - cluster":"统计健康状况等级。可选(indices, cluster)", "Adjust cluster_health_level when you want to obtain detailed health stats <br />The options are<br /> - indices (default)<br /> - cluster": "统计健康状况等级。可选(indices, cluster)",
"ClusterStats": "集群运行状态", "ClusterStats": "集群运行状态",
"Set cluster_stats to true when you want to obtain cluster stats.": "是否收集集群运行状态", "Set cluster_stats to true when you want to obtain cluster stats.": "是否收集集群运行状态",
"ClusterStatsOnlyFromMaster": "是否只收集主服务器", "ClusterStatsOnlyFromMaster": "是否只收集主服务器",
"Only gather cluster_stats from the master node. To work this require local = true": "当设置为ture时是否本地需要为ture才能生效", "Only gather cluster_stats from the master node. To work this require local = true": "当设置为ture时是否本地需要为ture才能生效",
"Indices to collect; can be one or more indices names or _all": "可以配置一个或多个指标,默认为全部(_all)", "Indices to collect; can be one or more indices names or _all": "可以配置一个或多个指标,默认为全部(_all)",
"NodeStats": "子指标", "NodeStats": "子指标",
"node_stats is a list of sub-stats that you want to have gathered. Valid options<br /> are \"indices\", \"os\", \"process\", \"jvm\", \"thread_pool\", \"fs\", \"transport\", \"http\", \"breaker\". <br />Per default, all stats are gathered.":"需要收集的子指标<br />可选项有:\"indices\", \"os\", \"process\", \"jvm\", \"thread_pool\", \"fs\", \"transport\", \"http\", \"breaker\"<br />不配置则全部收集", "node_stats is a list of sub-stats that you want to have gathered. Valid options<br /> are \"indices\", \"os\", \"process\", \"jvm\", \"thread_pool\", \"fs\", \"transport\", \"http\", \"breaker\". <br />Per default, all stats are gathered.": "需要收集的子指标<br />可选项有:\"indices\", \"os\", \"process\", \"jvm\", \"thread_pool\", \"fs\", \"transport\", \"http\", \"breaker\"<br />不配置则全部收集",
}, },
} }
) )
@ -55,7 +54,7 @@ var (
type Rule struct { type Rule struct {
Servers []string `label:"Servers" json:"servers,required" description:"specify a list of one or more Elasticsearch servers <br /> you can add username and password to your url to use basic authentication: <br /> servers = [http://user:pass@localhost:9200]" example:"http://user:pass@localhost:9200"` Servers []string `label:"Servers" json:"servers,required" description:"specify a list of one or more Elasticsearch servers <br /> you can add username and password to your url to use basic authentication: <br /> servers = [http://user:pass@localhost:9200]" example:"http://user:pass@localhost:9200"`
Local bool `label:"Local" json:"local,required" description:"When local is true (the default), the node will read only its own stats.<br /> Set local to false when you want to read the node stats from all nodes<br /> of the cluster." default:"true"` Local bool `label:"Local" json:"local,required" description:"When local is true (the default), the node will read only its own stats.<br /> Set local to false when you want to read the node stats from all nodes<br /> of the cluster." default:"true"`
HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" default:"5" description:"Timeout for HTTP requests"` HTTPTimeout int `label:"HTTP timeout" json:"http_timeout" default:"5" description:"Timeout for HTTP requests"`
ClusterHealth bool `label:"ClusterHealth" json:"cluster_health,required" description:"Set cluster_health to true when you want to obtain cluster health stats" default:"false"` ClusterHealth bool `label:"ClusterHealth" json:"cluster_health,required" description:"Set cluster_health to true when you want to obtain cluster health stats" default:"false"`
ClusterHealthLevel string `label:"ClusterHealthLevel" json:"cluster_health_level,required" description:"Adjust cluster_health_level when you want to obtain detailed health stats <br />The options are<br /> - indices (default)<br /> - cluster" default:"\"indices\""` ClusterHealthLevel string `label:"ClusterHealthLevel" json:"cluster_health_level,required" description:"Adjust cluster_health_level when you want to obtain detailed health stats <br />The options are<br /> - indices (default)<br /> - cluster" default:"\"indices\""`
ClusterStats bool `label:"ClusterStats" json:"cluster_stats,required" description:"Set cluster_stats to true when you want to obtain cluster stats." default:"false"` ClusterStats bool `label:"ClusterStats" json:"cluster_stats,required" description:"Set cluster_stats to true when you want to obtain cluster stats." default:"false"`
@ -88,8 +87,8 @@ func (p *Rule) TelegrafInput() (telegraf.Input, error) {
ClientConfig: p.ClientConfig.TlsClientConfig(), ClientConfig: p.ClientConfig.TlsClientConfig(),
} }
v := reflect.ValueOf(&(es.HTTPTimeout.Duration)).Elem() if err := plugins.SetValue(&es.HTTPTimeout.Duration, time.Second*time.Duration(p.HTTPTimeout)); err != nil {
v.Set(reflect.ValueOf(time.Second * time.Duration(p.HTTPTimeout))) return nil, err
}
return es, nil return es, nil
} }

View File

@ -0,0 +1,16 @@
package elasticsearch
import (
"github.com/didi/nightingale/src/modules/monapi/plugins"
"testing"
"time"
)
func TestCollect(t *testing.T) {
input := plugins.PluginTest(t, &Rule{
Servers: []string{"http://localhost:9200"},
})
time.Sleep(time.Second)
plugins.PluginInputTest(t, input)
}

View File

@ -3,9 +3,11 @@ package nginx
import ( import (
"fmt" "fmt"
"github.com/didi/nightingale/src/modules/monapi/collector" "github.com/didi/nightingale/src/modules/monapi/collector"
"github.com/didi/nightingale/src/modules/monapi/plugins"
"github.com/didi/nightingale/src/toolkits/i18n" "github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/nginx" "github.com/influxdata/telegraf/plugins/inputs/nginx"
"time"
) )
func init() { func init() {
@ -28,20 +30,24 @@ func NewCollector() *Collector {
var ( var (
langDict = map[string]map[string]string{ langDict = map[string]map[string]string{
"zh": map[string]string{ "zh": map[string]string{
"nginx status uri": "查看Nginx状态的地址", "Urls": "服务",
"An array of Nginx stub_status URI to gather stats.": "查看Nginx状态的地址",
"ResponseTimeout":"响应超时时间",
"HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒)默认5秒",
}, },
} }
) )
type Rule struct { type Rule struct {
Urls []string `label:"nginx status uri" json:"url,required" example:"http://localhost/status"` Urls []string `label:"Urls" json:"urls,required" description:"An array of Nginx stub_status URI to gather stats." example:"http://localhost/status"`
ResponseTimeout int `label:"ResponseTimeout" json:"response_timeout" default:"5" description:"HTTP response timeout (default: 5s)"`
plugins.ClientConfig
} }
func (p *Rule) Validate() error { func (p *Rule) Validate() error {
if len(p.Urls) == 0 || p.Urls[0] == "" { if len(p.Urls) == 0 || p.Urls[0] == "" {
return fmt.Errorf("ningx.rule.urls must be set") return fmt.Errorf("ningx.rule.urls must be set")
} }
return nil return nil
} }
@ -49,7 +55,11 @@ func (p *Rule) TelegrafInput() (telegraf.Input, error) {
if err := p.Validate(); err != nil { if err := p.Validate(); err != nil {
return nil, err return nil, err
} }
return &nginx.Nginx{ input := &nginx.Nginx{
Urls: p.Urls, Urls: p.Urls,
}, nil }
if err := plugins.SetValue(&input.ResponseTimeout.Duration, time.Second*time.Duration(p.ResponseTimeout)); err != nil {
return nil, err
}
return input, nil
} }

View File

@ -0,0 +1,16 @@
package nginx
import (
"github.com/didi/nightingale/src/modules/monapi/plugins"
"testing"
"time"
)
func TestCollect(t *testing.T) {
input := plugins.PluginTest(t, &Rule{
Urls: []string{"http://localhost/nginx-status"},
})
time.Sleep(time.Second)
plugins.PluginInputTest(t, input)
}

View File

@ -0,0 +1,68 @@
package tengine
import (
"fmt"
"github.com/didi/nightingale/src/modules/monapi/collector"
"github.com/didi/nightingale/src/modules/monapi/plugins"
"github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/tengine"
"time"
)
func init() {
collector.CollectorRegister(NewCollector()) // for monapi
i18n.DictRegister(langDict)
}
type Collector struct {
*collector.BaseCollector
}
func NewCollector() *Collector {
return &Collector{BaseCollector: collector.NewBaseCollector(
"tengine",
collector.RemoteCategory,
func() collector.TelegrafPlugin { return &Rule{} },
)}
}
var (
langDict = map[string]map[string]string{
"zh": map[string]string{
"Urls": "服务",
"An array of Tengine reqstat module URI to gather stats.": "查看Tengine状态的地址",
"ResponseTimeout": "响应超时时间",
"HTTP response timeout (default: 5s)": "HTTP响应超时时间(单位: 秒)默认5秒",
},
}
)
type Rule struct {
Urls []string `label:"Urls" json:"urls,required" description:"An array of Tengine reqstat module URI to gather stats." example:"http://localhost/us"`
ResponseTimeout int `label:"ResponseTimeout" json:"response_timeout" default:"5" description:"HTTP response timeout (default: 5s)"`
plugins.ClientConfig
}
func (p *Rule) Validate() error {
if len(p.Urls) == 0 || p.Urls[0] == "" {
return fmt.Errorf("ningx.rule.urls must be set")
}
return nil
}
func (p *Rule) TelegrafInput() (telegraf.Input, error) {
if err := p.Validate(); err != nil {
return nil, err
}
input := &tengine.Tengine{
Urls: p.Urls,
}
if err := plugins.SetValue(&input.ResponseTimeout.Duration, time.Second*time.Duration(p.ResponseTimeout)); err != nil {
return nil, err
}
return input, nil
}

View File

@ -0,0 +1,16 @@
package tengine
import (
"github.com/didi/nightingale/src/modules/monapi/plugins"
"testing"
"time"
)
func TestCollect(t *testing.T) {
input := plugins.PluginTest(t, &Rule{
Urls: []string{"http://localhost/us"},
})
time.Sleep(time.Second)
plugins.PluginInputTest(t, input)
}

View File

@ -0,0 +1,68 @@
package zookeeper
import (
"fmt"
"github.com/didi/nightingale/src/modules/monapi/collector"
"github.com/didi/nightingale/src/modules/monapi/plugins"
"github.com/didi/nightingale/src/toolkits/i18n"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/zookeeper"
"time"
)
func init() {
collector.CollectorRegister(NewCollector()) // for monapi
i18n.DictRegister(langDict)
}
type Collector struct {
*collector.BaseCollector
}
func NewCollector() *Collector {
return &Collector{BaseCollector: collector.NewBaseCollector(
"zookeeper",
collector.RemoteCategory,
func() collector.TelegrafPlugin { return &Rule{} },
)}
}
var (
langDict = map[string]map[string]string{
"zh": map[string]string{
"Servers": "服务",
"An array of address to gather stats about. Specify an ip or hostname <br /> with port. ie localhost:2181, 10.0.0.1:2181, etc.": "服务地址,格式[localhost:2182],服务需开启四字指令",
"Timeout": "请求超时时间",
"Timeout for metric collections from all servers. Minimum timeout is 1s": "获取监控指标的超时时间(单位: 秒最小值为1秒",
},
}
)
type Rule struct {
Servers []string `label:"Servers" json:"servers,required" description:"An array of address to gather stats about. Specify an ip or hostname <br /> with port. ie localhost:2181, 10.0.0.1:2181, etc." example:"localhost:2181"`
Timeout int `label:"Timeout" json:"http_timeout" default:"5" description:"Timeout for metric collections from all servers. Minimum timeout is 1s"`
plugins.ClientConfig
}
func (p *Rule) Validate() error {
if len(p.Servers) == 0 || p.Servers[0] == "" {
return fmt.Errorf("zookeeper.rule.Servers must be set")
}
return nil
}
func (p *Rule) TelegrafInput() (telegraf.Input, error) {
if err := p.Validate(); err != nil {
return nil, err
}
input := &zookeeper.Zookeeper{
Servers: p.Servers,
}
if err := plugins.SetValue(&input.Timeout.Duration, time.Second*time.Duration(p.Timeout)); err != nil {
return nil, err
}
return input, nil
}

View File

@ -0,0 +1,16 @@
package zookeeper
import (
"github.com/didi/nightingale/src/modules/monapi/plugins"
"testing"
"time"
)
func TestCollect(t *testing.T) {
input := plugins.PluginTest(t, &Rule{
Servers: []string{"localhost:2181"},
})
time.Sleep(time.Second)
plugins.PluginInputTest(t, input)
}

View File

@ -2,6 +2,16 @@
#### Unreleased #### Unreleased
#### Version 1.27.2 (2020-10-21)
# Improvements
#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
# Fixes
#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
#### Version 1.27.1 (2020-10-07) #### Version 1.27.1 (2020-10-07)
# Improvements # Improvements

View File

@ -93,9 +93,6 @@ type Client interface {
// Closed returns true if the client has already had Close called on it // Closed returns true if the client has already had Close called on it
Closed() bool Closed() bool
// A private method to prevent users implementing the interface for compatibility
private()
} }
const ( const (
@ -187,8 +184,6 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
return client, nil return client, nil
} }
func (client *client) private() {}
func (client *client) Config() *Config { func (client *client) Config() *Config {
return client.conf return client.conf
} }

View File

@ -1078,6 +1078,154 @@ func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHead
return resp return resp
} }
type MockJoinGroupResponse struct {
t TestReporter
ThrottleTime int32
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
return &MockJoinGroupResponse{
t: t,
Members: make(map[string][]byte),
}
}
func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
req := reqBody.(*JoinGroupRequest)
resp := &JoinGroupResponse{
Version: req.Version,
ThrottleTime: m.ThrottleTime,
Err: m.Err,
GenerationId: m.GenerationId,
GroupProtocol: m.GroupProtocol,
LeaderId: m.LeaderId,
MemberId: m.MemberId,
Members: m.Members,
}
return resp
}
func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
m.ThrottleTime = t
return m
}
func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
m.Err = kerr
return m
}
func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
m.GenerationId = id
return m
}
func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
m.GroupProtocol = proto
return m
}
func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
m.LeaderId = id
return m
}
func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
m.MemberId = id
return m
}
func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
bin, err := encode(meta, nil)
if err != nil {
panic(fmt.Sprintf("error encoding member metadata: %v", err))
}
m.Members[id] = bin
return m
}
type MockLeaveGroupResponse struct {
t TestReporter
Err KError
}
func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
return &MockLeaveGroupResponse{t: t}
}
func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
resp := &LeaveGroupResponse{
Err: m.Err,
}
return resp
}
func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
m.Err = kerr
return m
}
type MockSyncGroupResponse struct {
t TestReporter
Err KError
MemberAssignment []byte
}
func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
return &MockSyncGroupResponse{t: t}
}
func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
resp := &SyncGroupResponse{
Err: m.Err,
MemberAssignment: m.MemberAssignment,
}
return resp
}
func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
m.Err = kerr
return m
}
func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
bin, err := encode(assignment, nil)
if err != nil {
panic(fmt.Sprintf("error encoding member assignment: %v", err))
}
m.MemberAssignment = bin
return m
}
type MockHeartbeatResponse struct {
t TestReporter
Err KError
}
func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
return &MockHeartbeatResponse{t: t}
}
func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
resp := &HeartbeatResponse{}
return resp
}
func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
m.Err = kerr
return m
}
type MockDescribeLogDirsResponse struct { type MockDescribeLogDirsResponse struct {
t TestReporter t TestReporter
logDirs []DescribeLogDirsResponseDirMetadata logDirs []DescribeLogDirsResponseDirMetadata

View File

@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID).
Full `go doc` style documentation for the package can be viewed online without Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here: installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid http://pkg.go.dev/github.com/google/uuid

View File

@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) {
// UnmarshalText implements encoding.TextUnmarshaler. // UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error { func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data) id, err := ParseBytes(data)
if err == nil { if err != nil {
*uuid = id return err
} }
return err *uuid = id
return nil
} }
// MarshalBinary implements encoding.BinaryMarshaler. // MarshalBinary implements encoding.BinaryMarshaler.

View File

@ -17,12 +17,6 @@ import (
// //
// In most cases, New should be used. // In most cases, New should be used.
func NewUUID() (UUID, error) { func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID var uuid UUID
now, seq, err := GetTime() now, seq, err := GetTime()
if err != nil { if err != nil {
@ -38,7 +32,13 @@ func NewUUID() (UUID, error) {
binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq) binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:]) copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil return uuid, nil
} }

View File

@ -30,6 +30,7 @@ func NewRandom() (UUID, error) {
return NewRandomFromReader(rander) return NewRandomFromReader(rander)
} }
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) { func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID var uuid UUID
_, err := io.ReadFull(r, uuid[:]) _, err := io.ReadFull(r, uuid[:])
@ -40,4 +41,3 @@ func NewRandomFromReader(r io.Reader) (UUID, error) {
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil return uuid, nil
} }

View File

@ -3,3 +3,4 @@ README.md merge=union
go.sum merge=union go.sum merge=union
plugins/inputs/all/all.go merge=union plugins/inputs/all/all.go merge=union
plugins/outputs/all/all.go merge=union plugins/outputs/all/all.go merge=union
**/testdata/** test eol=lf

View File

@ -5,3 +5,4 @@
/telegraf.gz /telegraf.gz
/vendor /vendor
.DS_Store .DS_Store
process.yml

View File

@ -1,3 +1,149 @@
## v1.17.2 [2021-01-28]
#### Bugfixes
- [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native
- [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function
## v1.17.1 [2021-01-27]
#### Release Notes
Included a few more changes that add configuration options to plugins as it's been while since the last release
- [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool
- [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows
- [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible
- [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser
- [#8646](https://github.com/influxdata/telegraf/pull/8646) link to Open Hardware Monitor
- [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout
- [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames
- [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C
- [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input
- [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin
- [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection
#### Bugfixes
- [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses
- [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors
- [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys.
- [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value
- [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed.
- [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin
- [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue
- [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8
- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2
- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1
- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists.
- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json.
- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle.
- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge
- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses
- [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0
- [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start
## v1.17.0 [2020-12-18]
#### Release Notes
- Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware.
- New input plugins: Riemann-Protobuff Listener, Intel PowerStat
- New output plugins: Yandex.Cloud monitoring, Logz.io
- New parser plugin: Prometheus
- New serializer: Prometheus remote write
#### Bugfixes
- [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter
- [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields.
- [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests
- [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test
- [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1
- [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition
- [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write
- [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there
- [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data
- [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser
- [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers
- [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits
- [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory
#### Features
- [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement
- [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin
- [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI
- [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries
- [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin.
- [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers
- [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries
- [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call
- [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB
- [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name]
- [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty
- [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin
- [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le
- [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek)
- [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data
- [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input
- [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin
- [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input
- [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values
- [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes
- [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin
- [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5
- [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin
- [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin
- [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input
- [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics
- [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor
- [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added
- [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support
#### New Parser Plugins
- [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus
#### New Serializer Plugins
- [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer
#### New Input Plugins
- [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener
- [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin
#### New Output Plugins
- [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring
- [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin
## v1.16.3 [2020-12-01]
#### Bugfixes
- [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482
- [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1
- [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype
- [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name
- [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range"
- [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency
- [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor
- [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column
- [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output
- [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor
- [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark
- [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list
- [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging
- [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors
- [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function
## v1.16.2 [2020-11-13] ## v1.16.2 [2020-11-13]
#### Bugfixes #### Bugfixes
@ -71,9 +217,6 @@
- [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15
- [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code
- [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting
- [#8216](https://github.com/influxdata/telegraf/pull/8216) `inputs.win_eventlog` Use TimeCreated timestamp from the event log as the metric timestamp (#8216)
- [#8157](https://github.com/influxdata/telegraf/pull/8157) `processors.port_name` Add support for fields and protocol lookups (#8157)
- [#8269](https://github.com/influxdata/telegraf/pull/8269) `outputs.influxdb` Default to gzip content encoding (#8269)
#### Bugfixes #### Bugfixes
@ -104,10 +247,6 @@
- [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform
- [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared
- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd
- [#8282](https://github.com/influxdata/telegraf/pull/8282) `outputs.azure_monitor` Fix using empty string as the namespace prefix (#8282)
- [#7263](https://github.com/influxdata/telegraf/issues/7263) `inputs.kafka_consumer` Fix using old partition leader during Kafka cluster node failure (#7263)
- [#1906](https://github.com/influxdata/telegraf/issues/1906) `inputs.phpfpm` Fix handling URLs with question mark (#1906)
- [#8290](https://github.com/influxdata/telegraf/pull/8290) `inputs.proxmox` Fix wrong memory measurements of containers and vms (#8290)
#### New Input Plugins #### New Input Plugins
@ -134,6 +273,13 @@
- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API.
- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics.
## v1.15.4 [2020-10-20]
#### Bugfixes
- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd
- [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging
## v1.15.3 [2020-09-11] ## v1.15.3 [2020-09-11]
#### Release Notes #### Release Notes

View File

@ -13,8 +13,8 @@
1. Ensure you have added proper unit tests and documentation. 1. Ensure you have added proper unit tests and documentation.
1. Open a new [pull request][]. 1. Open a new [pull request][].
#### Contributing an External Plugin *(experimental)* #### Contributing an External Plugin *(new)*
Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code.
Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin.
Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`.

View File

@ -5,11 +5,18 @@ Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for mor
Pull requests welcome. Pull requests welcome.
## Inputs ## Inputs
- [rand](https://github.com/ssoroka/rand) - Generate random numbers
- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts
- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels
- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS.
- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API.
- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org)
- [rand](https://github.com/ssoroka/rand) - Generate random numbers
- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics.
- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts
- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels
## Outputs
- [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis.
## Processors
- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses.

View File

@ -94,6 +94,8 @@ These builds are generated from the master branch:
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) - [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) - [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb)
- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) - [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) - [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
@ -212,6 +214,7 @@ For documentation on the latest development code see the [documentation index][d
* [influxdb](./plugins/inputs/influxdb) * [influxdb](./plugins/inputs/influxdb)
* [influxdb_listener](./plugins/inputs/influxdb_listener) * [influxdb_listener](./plugins/inputs/influxdb_listener)
* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) * [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener)
* [intel_powerstat](plugins/inputs/intel_powerstat)
* [intel_rdt](./plugins/inputs/intel_rdt) * [intel_rdt](./plugins/inputs/intel_rdt)
* [internal](./plugins/inputs/internal) * [internal](./plugins/inputs/internal)
* [interrupts](./plugins/inputs/interrupts) * [interrupts](./plugins/inputs/interrupts)
@ -430,6 +433,7 @@ For documentation on the latest development code see the [documentation index][d
* [instrumental](./plugins/outputs/instrumental) * [instrumental](./plugins/outputs/instrumental)
* [kafka](./plugins/outputs/kafka) * [kafka](./plugins/outputs/kafka)
* [librato](./plugins/outputs/librato) * [librato](./plugins/outputs/librato)
* [logz.io](./plugins/outputs/logzio)
* [mqtt](./plugins/outputs/mqtt) * [mqtt](./plugins/outputs/mqtt)
* [nats](./plugins/outputs/nats) * [nats](./plugins/outputs/nats)
* [newrelic](./plugins/outputs/newrelic) * [newrelic](./plugins/outputs/newrelic)
@ -446,3 +450,4 @@ For documentation on the latest development code see the [documentation index][d
* [warp10](./plugins/outputs/warp10) * [warp10](./plugins/outputs/warp10)
* [wavefront](./plugins/outputs/wavefront) * [wavefront](./plugins/outputs/wavefront)
* [sumologic](./plugins/outputs/sumologic) * [sumologic](./plugins/outputs/sumologic)
* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring)

View File

@ -12,7 +12,7 @@ clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment: environment:
GOPATH: C:\gopath GOPATH: C:\gopath
stack: go 1.14 stack: go 1.15
platform: x64 platform: x64

View File

@ -1 +1 @@
1.16.2 1.17.2

View File

@ -53,7 +53,7 @@ services:
- "6432:6432" - "6432:6432"
postgres: postgres:
image: postgres:alpine image: postgres:alpine
environment: environment:
- POSTGRES_HOST_AUTH_METHOD=trust - POSTGRES_HOST_AUTH_METHOD=trust
ports: ports:
- "5432:5432" - "5432:5432"

View File

@ -17,7 +17,7 @@ require (
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee
github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Microsoft/ApplicationInsights-Go v0.4.2
github.com/Microsoft/go-winio v0.4.9 // indirect github.com/Microsoft/go-winio v0.4.9 // indirect
github.com/Shopify/sarama v1.27.1 github.com/Shopify/sarama v1.27.2
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/aerospike/aerospike-client-go v1.27.0 github.com/aerospike/aerospike-client-go v1.27.0
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4
@ -49,9 +49,9 @@ require (
github.com/eclipse/paho.mqtt.golang v1.2.0 github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/ericchiang/k8s v1.2.0 github.com/ericchiang/k8s v1.2.0
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96
github.com/go-logfmt/logfmt v0.4.0 github.com/go-logfmt/logfmt v0.4.0
github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redis v6.15.9+incompatible
github.com/go-sql-driver/mysql v1.5.0 github.com/go-sql-driver/mysql v1.5.0
github.com/goburrow/modbus v0.1.0 github.com/goburrow/modbus v0.1.0
@ -61,10 +61,13 @@ require (
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/geo v0.0.0-20190916061304-5b978397cfec
github.com/golang/protobuf v1.3.5 github.com/golang/protobuf v1.3.5
github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.5.2 github.com/google/go-cmp v0.5.2
github.com/google/go-github/v32 v32.1.0 github.com/google/go-github/v32 v32.1.0
github.com/gopcua/opcua v0.1.12 github.com/gopcua/opcua v0.1.12
github.com/gorilla/mux v1.6.2 github.com/gorilla/mux v1.6.2
github.com/gosnmp/gosnmp v1.29.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0
github.com/hashicorp/consul v1.2.1 github.com/hashicorp/consul v1.2.1
@ -79,7 +82,7 @@ require (
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgx v3.6.0+incompatible github.com/jackc/pgx v3.6.0+incompatible
github.com/kardianos/service v1.0.0 github.com/kardianos/service v1.0.0
github.com/karrick/godirwalk v1.12.0 github.com/karrick/godirwalk v1.16.1
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
@ -87,7 +90,7 @@ require (
github.com/lib/pq v1.3.0 // indirect github.com/lib/pq v1.3.0 // indirect
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b
github.com/miekg/dns v1.0.14 github.com/miekg/dns v1.0.14
github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
@ -95,8 +98,8 @@ require (
github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/nats-io/nats-server/v2 v2.1.4 github.com/nats-io/nats-server/v2 v2.1.4
github.com/nats-io/nats.go v1.9.1 github.com/nats-io/nats.go v1.9.1
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1
github.com/nsqio/go-nsq v1.0.7 github.com/nsqio/go-nsq v1.0.8
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect
@ -108,6 +111,8 @@ require (
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1 github.com/prometheus/common v0.9.1
github.com/prometheus/procfs v0.0.8 github.com/prometheus/procfs v0.0.8
github.com/prometheus/prometheus v2.5.0+incompatible
github.com/riemann/riemann-go-client v0.5.0
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
@ -122,7 +127,7 @@ require (
github.com/tidwall/gjson v1.6.0 github.com/tidwall/gjson v1.6.0
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
github.com/vjeantet/grok v1.0.0 github.com/vjeantet/grok v1.0.1
github.com/vmware/govmomi v0.19.0 github.com/vmware/govmomi v0.19.0
github.com/wavefronthq/wavefront-sdk-go v0.9.2 github.com/wavefronthq/wavefront-sdk-go v0.9.2
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf
@ -141,15 +146,15 @@ require (
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4
gonum.org/v1/gonum v0.6.2 // indirect gonum.org/v1/gonum v0.6.2 // indirect
google.golang.org/api v0.20.0 google.golang.org/api v0.20.0
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884
google.golang.org/grpc v1.28.0 google.golang.org/grpc v1.33.1
gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect
gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/gorethink/gorethink.v3 v3.0.5
gopkg.in/ldap.v3 v3.1.0 gopkg.in/ldap.v3 v3.1.0
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/olivere/elastic.v5 v5.0.70
gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 v2.2.8
gotest.tools v2.2.0+incompatible // indirect gotest.tools v2.2.0+incompatible
honnef.co/go/tools v0.0.1-2020.1.3 // indirect honnef.co/go/tools v0.0.1-2020.1.3 // indirect
k8s.io/apimachinery v0.17.1 // indirect k8s.io/apimachinery v0.17.1 // indirect
modernc.org/sqlite v1.7.4 modernc.org/sqlite v1.7.4

View File

@ -86,8 +86,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8= github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc=
github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
@ -103,6 +103,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ=
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos=
@ -204,10 +205,9 @@ github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8=
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -223,6 +223,8 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 h1:jI2GiiRh+pPbey52EVmbU6kuLiXqwy4CXZ4gwUBj8Y0=
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
@ -256,7 +258,9 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
@ -280,6 +284,7 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=
@ -298,6 +303,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
@ -309,6 +316,10 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gosnmp/gosnmp v1.29.0 h1:fEkud7oiYVzR64L+/BQA7uvp+7COI9+XkrUQi8JunYM=
github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ=
@ -380,8 +391,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
@ -418,8 +429,8 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b h1:Kcr+kPbkWZHFHXwl87quXUAmavS4/IMgu2zck3aiE7k=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA=
github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0=
github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
@ -462,12 +473,12 @@ github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk=
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -532,12 +543,17 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg=
github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA=
github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk=
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
@ -584,8 +600,8 @@ github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Su
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4=
github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo=
github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY=
github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk=
@ -673,6 +689,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -681,7 +698,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -693,6 +709,7 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA=
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -851,8 +868,8 @@ google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -863,8 +880,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
@ -905,8 +922,11 @@ gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i
gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=

View File

@ -1,10 +1,9 @@
package metric package metric
import ( import (
"hash/fnv" "encoding/binary"
"io" "hash/maphash"
"sort" "sort"
"strconv"
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
@ -23,14 +22,17 @@ import (
// + cpu,host=localhost idle_time=42,usage_time=42 // + cpu,host=localhost idle_time=42,usage_time=42
func NewSeriesGrouper() *SeriesGrouper { func NewSeriesGrouper() *SeriesGrouper {
return &SeriesGrouper{ return &SeriesGrouper{
metrics: make(map[uint64]telegraf.Metric), metrics: make(map[uint64]telegraf.Metric),
ordered: []telegraf.Metric{}, ordered: []telegraf.Metric{},
hashSeed: maphash.MakeSeed(),
} }
} }
type SeriesGrouper struct { type SeriesGrouper struct {
metrics map[uint64]telegraf.Metric metrics map[uint64]telegraf.Metric
ordered []telegraf.Metric ordered []telegraf.Metric
hashSeed maphash.Seed
} }
// Add adds a field key and value to the series. // Add adds a field key and value to the series.
@ -41,8 +43,15 @@ func (g *SeriesGrouper) Add(
field string, field string,
fieldValue interface{}, fieldValue interface{},
) error { ) error {
taglist := make([]*telegraf.Tag, 0, len(tags))
for k, v := range tags {
taglist = append(taglist,
&telegraf.Tag{Key: k, Value: v})
}
sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key })
var err error var err error
id := groupID(measurement, tags, tm) id := groupID(g.hashSeed, measurement, taglist, tm)
metric := g.metrics[id] metric := g.metrics[id]
if metric == nil { if metric == nil {
metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm)
@ -57,30 +66,46 @@ func (g *SeriesGrouper) Add(
return nil return nil
} }
// AddMetric adds a metric to the series, merging with any previous matching metrics.
func (g *SeriesGrouper) AddMetric(
metric telegraf.Metric,
) {
id := groupID(g.hashSeed, metric.Name(), metric.TagList(), metric.Time())
m := g.metrics[id]
if m == nil {
m = metric.Copy()
g.metrics[id] = m
g.ordered = append(g.ordered, m)
} else {
for _, f := range metric.FieldList() {
m.AddField(f.Key, f.Value)
}
}
}
// Metrics returns the metrics grouped by series and time. // Metrics returns the metrics grouped by series and time.
func (g *SeriesGrouper) Metrics() []telegraf.Metric { func (g *SeriesGrouper) Metrics() []telegraf.Metric {
return g.ordered return g.ordered
} }
func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { func groupID(seed maphash.Seed, measurement string, taglist []*telegraf.Tag, tm time.Time) uint64 {
h := fnv.New64a() var mh maphash.Hash
h.Write([]byte(measurement)) mh.SetSeed(seed)
h.Write([]byte("\n"))
mh.WriteString(measurement)
mh.WriteByte(0)
taglist := make([]*telegraf.Tag, 0, len(tags))
for k, v := range tags {
taglist = append(taglist,
&telegraf.Tag{Key: k, Value: v})
}
sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key })
for _, tag := range taglist { for _, tag := range taglist {
h.Write([]byte(tag.Key)) mh.WriteString(tag.Key)
h.Write([]byte("\n")) mh.WriteByte(0)
h.Write([]byte(tag.Value)) mh.WriteString(tag.Value)
h.Write([]byte("\n")) mh.WriteByte(0)
} }
h.Write([]byte("\n")) mh.WriteByte(0)
io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) var tsBuf [8]byte
return h.Sum64() binary.BigEndian.PutUint64(tsBuf[:], uint64(tm.UnixNano()))
mh.Write(tsBuf[:])
return mh.Sum64()
} }

View File

@ -14,6 +14,7 @@ type ClientConfig struct {
TLSCert string `toml:"tls_cert"` TLSCert string `toml:"tls_cert"`
TLSKey string `toml:"tls_key"` TLSKey string `toml:"tls_key"`
InsecureSkipVerify bool `toml:"insecure_skip_verify"` InsecureSkipVerify bool `toml:"insecure_skip_verify"`
ServerName string `toml:"tls_server_name"`
// Deprecated in 1.7; use TLS variables above // Deprecated in 1.7; use TLS variables above
SSLCA string `toml:"ssl_ca"` SSLCA string `toml:"ssl_ca"`
@ -45,11 +46,14 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
c.TLSKey = c.SSLKey c.TLSKey = c.SSLKey
} }
// TODO: return default tls.Config; plugins should not call if they don't // This check returns a nil (aka, "use the default")
// want TLS, this will require using another option to determine. In the // tls.Config if no field is set that would have an effect on
// case of an HTTP plugin, you could use `https`. Other plugins may need // a TLS connection. That is, any of:
// the dedicated option `TLSEnable`. // * client certificate settings,
if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify { // * peer certificate authorities,
// * disabled security, or
// * an SNI server name.
if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify && c.ServerName == "" {
return nil, nil return nil, nil
} }
@ -73,6 +77,10 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) {
} }
} }
if c.ServerName != "" {
tlsConfig.ServerName = c.ServerName
}
return tlsConfig, nil return tlsConfig, nil
} }

View File

@ -23,6 +23,14 @@ alternative method for collecting repository information.
## Timeout for HTTP requests. ## Timeout for HTTP requests.
# http_timeout = "5s" # http_timeout = "5s"
## List of additional fields to query.
## NOTE: Getting those fields might involve issuing additional API-calls, so please
## make sure you do not exceed the rate-limit of GitHub.
##
## Available fields are:
## - pull-requests -- number of open and closed pull requests (2 API-calls per repository)
# additional_fields = []
``` ```
### Metrics ### Metrics
@ -52,11 +60,21 @@ When the [internal][] input is enabled:
- remaining - How many requests you have remaining (per hour) - remaining - How many requests you have remaining (per hour)
- blocks - How many requests have been blocked due to rate limit - blocks - How many requests have been blocked due to rate limit
When specifying `additional_fields` the plugin will collect the specified properties.
**NOTE:** Querying this additional fields might require to perform additional API-calls.
Please make sure you don't exceed the query rate-limit by specifying too many additional fields.
In the following we list the available options with the required API-calls and the resulting fields
- "pull-requests" (2 API-calls per repository)
- fields:
- open_pull_requests (int)
- closed_pull_requests (int)
### Example Output ### Example Output
``` ```
github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000
internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000
``` ```
[GitHub]: https://www.github.com [GitHub]: https://www.github.com

View File

@ -20,6 +20,7 @@ import (
type GitHub struct { type GitHub struct {
Repositories []string `toml:"repositories"` Repositories []string `toml:"repositories"`
AccessToken string `toml:"access_token"` AccessToken string `toml:"access_token"`
AdditionalFields []string `toml:"additional_fields"`
EnterpriseBaseURL string `toml:"enterprise_base_url"` EnterpriseBaseURL string `toml:"enterprise_base_url"`
HTTPTimeout internal.Duration `toml:"http_timeout"` HTTPTimeout internal.Duration `toml:"http_timeout"`
githubClient *github.Client githubClient *github.Client
@ -46,6 +47,14 @@ const sampleConfig = `
## Timeout for HTTP requests. ## Timeout for HTTP requests.
# http_timeout = "5s" # http_timeout = "5s"
## List of additional fields to query.
## NOTE: Getting those fields might involve issuing additional API-calls, so please
## make sure you do not exceed the rate-limit of GitHub.
##
## Available fields are:
## - pull-requests -- number of open and closed pull requests (2 API-calls per repository)
# additional_fields = []
` `
// SampleConfig returns sample configuration for this plugin. // SampleConfig returns sample configuration for this plugin.
@ -97,7 +106,6 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
if g.githubClient == nil { if g.githubClient == nil {
githubClient, err := g.createGitHubClient(ctx) githubClient, err := g.createGitHubClient(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -127,23 +135,35 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
} }
repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository)
g.handleRateLimit(response, err)
if _, ok := err.(*github.RateLimitError); ok {
g.RateLimitErrors.Incr(1)
}
if err != nil { if err != nil {
acc.AddError(err) acc.AddError(err)
return return
} }
g.RateLimit.Set(int64(response.Rate.Limit))
g.RateRemaining.Set(int64(response.Rate.Remaining))
now := time.Now() now := time.Now()
tags := getTags(repositoryInfo) tags := getTags(repositoryInfo)
fields := getFields(repositoryInfo) fields := getFields(repositoryInfo)
for _, field := range g.AdditionalFields {
addFields := make(map[string]interface{})
switch field {
case "pull-requests":
// Pull request properties
addFields, err = g.getPullRequestFields(ctx, owner, repository)
if err != nil {
acc.AddError(err)
continue
}
default:
acc.AddError(fmt.Errorf("unknown additional field %q", field))
continue
}
for k, v := range addFields {
fields[k] = v
}
}
acc.AddFields("github_repository", fields, tags, now) acc.AddFields("github_repository", fields, tags, now)
}(repository, acc) }(repository, acc)
} }
@ -152,6 +172,15 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
return nil return nil
} }
func (g *GitHub) handleRateLimit(response *github.Response, err error) {
if err == nil {
g.RateLimit.Set(int64(response.Rate.Limit))
g.RateRemaining.Set(int64(response.Rate.Remaining))
} else if _, ok := err.(*github.RateLimitError); ok {
g.RateLimitErrors.Incr(1)
}
}
func splitRepositoryName(repositoryName string) (string, string, error) { func splitRepositoryName(repositoryName string) (string, string, error) {
splits := strings.SplitN(repositoryName, "/", 2) splits := strings.SplitN(repositoryName, "/", 2)
@ -191,6 +220,32 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} {
} }
} }
func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) {
options := github.SearchOptions{
TextMatch: false,
ListOptions: github.ListOptions{
PerPage: 100,
Page: 1,
},
}
classes := []string{"open", "closed"}
fields := make(map[string]interface{})
for _, class := range classes {
q := fmt.Sprintf("repo:%s/%s is:pr is:%s", owner, repo, class)
searchResult, response, err := g.githubClient.Search.Issues(ctx, q, &options)
g.handleRateLimit(response, err)
if err != nil {
return fields, err
}
f := fmt.Sprintf("%s_pull_requests", class)
fields[f] = searchResult.GetTotal()
}
return fields, nil
}
func init() { func init() {
inputs.Add("github", func() telegraf.Input { inputs.Add("github", func() telegraf.Input {
return &GitHub{ return &GitHub{

View File

@ -9,6 +9,8 @@ import (
// MockPlugin struct should be named the same as the Plugin // MockPlugin struct should be named the same as the Plugin
type MockPlugin struct { type MockPlugin struct {
mock.Mock mock.Mock
constructedVariable string
} }
// Description will appear directly above the plugin definition in the config file // Description will appear directly above the plugin definition in the config file
@ -21,6 +23,12 @@ func (m *MockPlugin) SampleConfig() string {
return ` sampleVar = 'foo'` return ` sampleVar = 'foo'`
} }
// Init can be implemented to do one-time processing stuff like initializing variables
func (m *MockPlugin) Init() error {
m.constructedVariable = "I'm initialized now."
return nil
}
// Gather defines what data the plugin will gather. // Gather defines what data the plugin will gather.
func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error {
ret := m.Called(_a0) ret := m.Called(_a0)

View File

@ -88,6 +88,15 @@ This plugin gathers the statistic data from MySQL server
# gather_file_events_stats = false # gather_file_events_stats = false
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
# gather_perf_events_statements = false
#
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
# gather_perf_sum_per_acc_per_event = false
#
## list of events to be gathered for gather_perf_sum_per_acc_per_event
## in case of empty list all events will be gathered
# perf_summary_events = []
#
# gather_perf_events_statements = false # gather_perf_events_statements = false
## the limits for metrics form perf_events_statements ## the limits for metrics form perf_events_statements

View File

@ -37,6 +37,8 @@ type Mysql struct {
GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherFileEventsStats bool `toml:"gather_file_events_stats"`
GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"`
GatherGlobalVars bool `toml:"gather_global_variables"` GatherGlobalVars bool `toml:"gather_global_variables"`
GatherPerfSummaryPerAccountPerEvent bool `toml:"gather_perf_sum_per_acc_per_event"`
PerfSummaryEvents []string `toml:"perf_summary_events"`
IntervalSlow string `toml:"interval_slow"` IntervalSlow string `toml:"interval_slow"`
MetricVersion int `toml:"metric_version"` MetricVersion int `toml:"metric_version"`
@ -121,6 +123,13 @@ const sampleConfig = `
# perf_events_statements_limit = 250 # perf_events_statements_limit = 250
# perf_events_statements_time_limit = 86400 # perf_events_statements_time_limit = 86400
## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
# gather_perf_sum_per_acc_per_event = false
## list of events to be gathered for gather_perf_sum_per_acc_per_event
## in case of empty list all events will be gathered
# perf_summary_events = []
## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
## example: interval_slow = "30m" ## example: interval_slow = "30m"
# interval_slow = "" # interval_slow = ""
@ -416,6 +425,38 @@ const (
FROM information_schema.tables FROM information_schema.tables
WHERE table_schema = 'performance_schema' AND table_name = ? WHERE table_schema = 'performance_schema' AND table_name = ?
` `
perfSummaryPerAccountPerEvent = `
SELECT
coalesce(user, "unknown"),
coalesce(host, "unknown"),
coalesce(event_name, "unknown"),
count_star,
sum_timer_wait,
min_timer_wait,
avg_timer_wait,
max_timer_wait,
sum_lock_time,
sum_errors,
sum_warnings,
sum_rows_affected,
sum_rows_sent,
sum_rows_examined,
sum_created_tmp_disk_tables,
sum_created_tmp_tables,
sum_select_full_join,
sum_select_full_range_join,
sum_select_range,
sum_select_range_check,
sum_select_scan,
sum_sort_merge_passes,
sum_sort_range,
sum_sort_rows,
sum_sort_scan,
sum_no_index_used,
sum_no_good_index_used
FROM performance_schema.events_statements_summary_by_account_by_event_name
`
) )
func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
@ -491,6 +532,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error {
} }
} }
if m.GatherPerfSummaryPerAccountPerEvent {
err = m.gatherPerfSummaryPerAccountPerEvent(db, serv, acc)
if err != nil {
return err
}
}
if m.GatherTableIOWaits { if m.GatherTableIOWaits {
err = m.gatherPerfTableIOWaits(db, serv, acc) err = m.gatherPerfTableIOWaits(db, serv, acc)
if err != nil { if err != nil {
@ -1262,6 +1310,143 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu
return nil return nil
} }
// gatherPerfSummaryPerAccountPerEvent can be used to fetch enabled metrics from
// performance_schema.events_statements_summary_by_account_by_event_name
func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc telegraf.Accumulator) error {
sqlQuery := perfSummaryPerAccountPerEvent
var rows *sql.Rows
var err error
var (
srcUser string
srcHost string
eventName string
countStar float64
sumTimerWait float64
minTimerWait float64
avgTimerWait float64
maxTimerWait float64
sumLockTime float64
sumErrors float64
sumWarnings float64
sumRowsAffected float64
sumRowsSent float64
sumRowsExamined float64
sumCreatedTmpDiskTables float64
sumCreatedTmpTables float64
sumSelectFullJoin float64
sumSelectFullRangeJoin float64
sumSelectRange float64
sumSelectRangeCheck float64
sumSelectScan float64
sumSortMergePasses float64
sumSortRange float64
sumSortRows float64
sumSortScan float64
sumNoIndexUsed float64
sumNoGoodIndexUsed float64
)
var events []interface{}
// if we have perf_summary_events set - select only listed events (adding filter criteria for rows)
if len(m.PerfSummaryEvents) > 0 {
sqlQuery += " WHERE EVENT_NAME IN ("
for i, eventName := range m.PerfSummaryEvents {
if i > 0 {
sqlQuery += ", "
}
sqlQuery += "?"
events = append(events, eventName)
}
sqlQuery += ")"
rows, err = db.Query(sqlQuery, events...)
} else {
// otherwise no filter, hence, select all rows
rows, err = db.Query(perfSummaryPerAccountPerEvent)
}
if err != nil {
return err
}
defer rows.Close()
// parse DSN and save server tag
servtag := getDSNTag(serv)
tags := map[string]string{"server": servtag}
for rows.Next() {
if err := rows.Scan(
&srcUser,
&srcHost,
&eventName,
&countStar,
&sumTimerWait,
&minTimerWait,
&avgTimerWait,
&maxTimerWait,
&sumLockTime,
&sumErrors,
&sumWarnings,
&sumRowsAffected,
&sumRowsSent,
&sumRowsExamined,
&sumCreatedTmpDiskTables,
&sumCreatedTmpTables,
&sumSelectFullJoin,
&sumSelectFullRangeJoin,
&sumSelectRange,
&sumSelectRangeCheck,
&sumSelectScan,
&sumSortMergePasses,
&sumSortRange,
&sumSortRows,
&sumSortScan,
&sumNoIndexUsed,
&sumNoGoodIndexUsed,
); err != nil {
return err
}
srcUser = strings.ToLower(srcUser)
srcHost = strings.ToLower(srcHost)
sqlLWTags := copyTags(tags)
sqlLWTags["src_user"] = srcUser
sqlLWTags["src_host"] = srcHost
sqlLWTags["event"] = eventName
sqlLWFields := map[string]interface{}{
"count_star": countStar,
"sum_timer_wait": sumTimerWait,
"min_timer_wait": minTimerWait,
"avg_timer_wait": avgTimerWait,
"max_timer_wait": maxTimerWait,
"sum_lock_time": sumLockTime,
"sum_errors": sumErrors,
"sum_warnings": sumWarnings,
"sum_rows_affected": sumRowsAffected,
"sum_rows_sent": sumRowsSent,
"sum_rows_examined": sumRowsExamined,
"sum_created_tmp_disk_tables": sumCreatedTmpDiskTables,
"sum_created_tmp_tables": sumCreatedTmpTables,
"sum_select_full_join": sumSelectFullJoin,
"sum_select_full_range_join": sumSelectFullRangeJoin,
"sum_select_range": sumSelectRange,
"sum_select_range_check": sumSelectRangeCheck,
"sum_select_scan": sumSelectScan,
"sum_sort_merge_passes": sumSortMergePasses,
"sum_sort_range": sumSortRange,
"sum_sort_rows": sumSortRows,
"sum_sort_scan": sumSortScan,
"sum_no_index_used": sumNoIndexUsed,
"sum_no_good_index_used": sumNoGoodIndexUsed,
}
acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags)
}
return nil
}
// gatherPerfTableLockWaits can be used to get // gatherPerfTableLockWaits can be used to get
// the total number and time for SQL and external lock wait events // the total number and time for SQL and external lock wait events
// for each table and operation // for each table and operation

View File

@ -1,8 +1,5 @@
package prometheus package prometheus
// Parser inspired from
// https://github.com/prometheus/prom2json/blob/master/main.go
import ( import (
"bufio" "bufio"
"bytes" "bytes"
@ -15,168 +12,27 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/metric"
. "github.com/influxdata/telegraf/plugins/parsers/prometheus/common"
"github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
) )
// Parse returns a slice of Metrics from a text representation of a
// metrics
func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) {
var metrics []telegraf.Metric
var parser expfmt.TextParser
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
// Read raw data
buffer := bytes.NewBuffer(buf)
reader := bufio.NewReader(buffer)
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
// Prepare output
metricFamilies := make(map[string]*dto.MetricFamily)
if err == nil && mediatype == "application/vnd.google.protobuf" &&
params["encoding"] == "delimited" &&
params["proto"] == "io.prometheus.client.MetricFamily" {
for {
mf := &dto.MetricFamily{}
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
if ierr == io.EOF {
break
}
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
}
metricFamilies[mf.GetName()] = mf
}
} else {
metricFamilies, err = parser.TextToMetricFamilies(reader)
if err != nil {
return nil, fmt.Errorf("reading text format failed: %s", err)
}
}
// make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
now := time.Now()
// read metrics
for metricName, mf := range metricFamilies {
for _, m := range mf.Metric {
// reading tags
tags := makeLabels(m)
if mf.GetType() == dto.MetricType_SUMMARY {
// summary metric
telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now)
metrics = append(metrics, telegrafMetrics...)
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
// histogram metric
telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now)
metrics = append(metrics, telegrafMetrics...)
} else {
// standard metric
// reading fields
fields := getNameAndValueV2(m, metricName)
// converting to telegraf metric
if len(fields) > 0 {
var t time.Time
if m.TimestampMs != nil && *m.TimestampMs > 0 {
t = time.Unix(0, *m.TimestampMs*1000000)
} else {
t = now
}
metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType()))
if err == nil {
metrics = append(metrics, metric)
}
}
}
}
}
return metrics, err
}
// Get Quantiles for summary metric & Buckets for histogram
func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
fields := make(map[string]interface{})
var t time.Time
if m.TimestampMs != nil && *m.TimestampMs > 0 {
t = time.Unix(0, *m.TimestampMs*1000000)
} else {
t = now
}
fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum())
met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
if err == nil {
metrics = append(metrics, met)
}
for _, q := range m.GetSummary().Quantile {
newTags := tags
fields = make(map[string]interface{})
newTags["quantile"] = fmt.Sprint(q.GetQuantile())
fields[metricName] = float64(q.GetValue())
quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
if err == nil {
metrics = append(metrics, quantileMetric)
}
}
return metrics
}
// Get Buckets from histogram metric
func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
fields := make(map[string]interface{})
var t time.Time
if m.TimestampMs != nil && *m.TimestampMs > 0 {
t = time.Unix(0, *m.TimestampMs*1000000)
} else {
t = now
}
fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())
fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum())
met, err := metric.New("prometheus", tags, fields, t, valueType(metricType))
if err == nil {
metrics = append(metrics, met)
}
for _, b := range m.GetHistogram().Bucket {
newTags := tags
fields = make(map[string]interface{})
newTags["le"] = fmt.Sprint(b.GetUpperBound())
fields[metricName+"_bucket"] = float64(b.GetCumulativeCount())
histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType))
if err == nil {
metrics = append(metrics, histogramMetric)
}
}
return metrics
}
// Parse returns a slice of Metrics from a text representation of a
// metrics
func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
var metrics []telegraf.Metric
var parser expfmt.TextParser var parser expfmt.TextParser
var metrics []telegraf.Metric
var err error
// parse even if the buffer begins with a newline // parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n")) buf = bytes.TrimPrefix(buf, []byte("\n"))
// Read raw data // Read raw data
buffer := bytes.NewBuffer(buf) buffer := bytes.NewBuffer(buf)
reader := bufio.NewReader(buffer) reader := bufio.NewReader(buffer)
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
// Prepare output // Prepare output
metricFamilies := make(map[string]*dto.MetricFamily) metricFamilies := make(map[string]*dto.MetricFamily)
if err == nil && mediatype == "application/vnd.google.protobuf" && if isProtobuf(header) {
params["encoding"] == "delimited" &&
params["proto"] == "io.prometheus.client.MetricFamily" {
for { for {
mf := &dto.MetricFamily{} mf := &dto.MetricFamily{}
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
@ -194,13 +50,13 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
} }
} }
// make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds
now := time.Now() now := time.Now()
// read metrics // read metrics
for metricName, mf := range metricFamilies { for metricName, mf := range metricFamilies {
for _, m := range mf.Metric { for _, m := range mf.Metric {
// reading tags // reading tags
tags := makeLabels(m) tags := MakeLabels(m, nil)
// reading fields // reading fields
var fields map[string]interface{} var fields map[string]interface{}
if mf.GetType() == dto.MetricType_SUMMARY { if mf.GetType() == dto.MetricType_SUMMARY {
@ -226,7 +82,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
} else { } else {
t = now t = now
} }
metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) metric, err := metric.New(metricName, tags, fields, t, ValueType(mf.GetType()))
if err == nil { if err == nil {
metrics = append(metrics, metric) metrics = append(metrics, metric)
} }
@ -237,19 +93,16 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) {
return metrics, err return metrics, err
} }
func valueType(mt dto.MetricType) telegraf.ValueType { func isProtobuf(header http.Header) bool {
switch mt { mediatype, params, error := mime.ParseMediaType(header.Get("Content-Type"))
case dto.MetricType_COUNTER:
return telegraf.Counter if error != nil {
case dto.MetricType_GAUGE: return false
return telegraf.Gauge
case dto.MetricType_SUMMARY:
return telegraf.Summary
case dto.MetricType_HISTOGRAM:
return telegraf.Histogram
default:
return telegraf.Untyped
} }
return mediatype == "application/vnd.google.protobuf" &&
params["encoding"] == "delimited" &&
params["proto"] == "io.prometheus.client.MetricFamily"
} }
// Get Quantiles from summary metric // Get Quantiles from summary metric
@ -272,15 +125,6 @@ func makeBuckets(m *dto.Metric) map[string]interface{} {
return fields return fields
} }
// Get labels from metric
func makeLabels(m *dto.Metric) map[string]string {
result := map[string]string{}
for _, lp := range m.Label {
result[lp.GetName()] = lp.GetValue()
}
return result
}
// Get name and value from metric // Get name and value from metric
func getNameAndValue(m *dto.Metric) map[string]interface{} { func getNameAndValue(m *dto.Metric) map[string]interface{} {
fields := make(map[string]interface{}) fields := make(map[string]interface{})
@ -299,22 +143,3 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} {
} }
return fields return fields
} }
// Get name and value from metric
func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} {
fields := make(map[string]interface{})
if m.Gauge != nil {
if !math.IsNaN(m.GetGauge().GetValue()) {
fields[metricName] = float64(m.GetGauge().GetValue())
}
} else if m.Counter != nil {
if !math.IsNaN(m.GetCounter().GetValue()) {
fields[metricName] = float64(m.GetCounter().GetValue())
}
} else if m.Untyped != nil {
if !math.IsNaN(m.GetUntyped().GetValue()) {
fields[metricName] = float64(m.GetUntyped().GetValue())
}
}
return fields
}

View File

@ -15,6 +15,7 @@ import (
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus"
) )
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1`
@ -329,7 +330,8 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error
} }
if p.MetricVersion == 2 { if p.MetricVersion == 2 {
metrics, err = ParseV2(body, resp.Header) parser := parser_v2.Parser{Header: resp.Header}
metrics, err = parser.Parse(body)
} else { } else {
metrics, err = Parse(body, resp.Header) metrics, err = Parse(body, resp.Header)
} }

View File

@ -0,0 +1,71 @@
# Tengine Input Plugin
The tengine plugin gathers metrics from the
[Tengine Web Server](http://tengine.taobao.org/) via the
[reqstat](http://tengine.taobao.org/document/http_reqstat.html) module.
### Configuration:
```toml
# Read Tengine's basic status information (ngx_http_reqstat_module)
[[inputs.tengine]]
## An array of Tengine reqstat module URI to gather stats.
urls = ["http://127.0.0.1/us"]
## HTTP response timeout (default: 5s)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
```
### Metrics:
- Measurement
- tags:
- port
- server
- server_name
- fields:
- bytes_in (integer, total number of bytes received from client)
- bytes_out (integer, total number of bytes sent to client)
- conn_total (integer, total number of accepted connections)
- req_total (integer, total number of processed requests)
- http_2xx (integer, total number of 2xx requests)
- http_3xx (integer, total number of 3xx requests)
- http_4xx (integer, total number of 4xx requests)
- http_5xx (integer, total number of 5xx requests)
- http_other_status (integer, total number of other requests)
- rt (integer, accumulation or rt)
- ups_req (integer, total number of requests calling for upstream)
- ups_rt (integer, accumulation or upstream rt)
- ups_tries (integer, total number of times calling for upstream)
- http_200 (integer, total number of 200 requests)
- http_206 (integer, total number of 206 requests)
- http_302 (integer, total number of 302 requests)
- http_304 (integer, total number of 304 requests)
- http_403 (integer, total number of 403 requests)
- http_404 (integer, total number of 404 requests)
- http_416 (integer, total number of 416 requests)
- http_499 (integer, total number of 499 requests)
- http_500 (integer, total number of 500 requests)
- http_502 (integer, total number of 502 requests)
- http_503 (integer, total number of 503 requests)
- http_504 (integer, total number of 504 requests)
- http_508 (integer, total number of 508 requests)
- http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx)
- http_ups_5xx (integer, total number of requests of upstream 5xx)
### Example Output:
```
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=localhost bytes_in=9129i,bytes_out=56334i,conn_total=14i,http_200=90i,http_206=0i,http_2xx=90i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=90i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=28.79.190.35.bc.googleusercontent.com bytes_in=1500i,bytes_out=3009i,conn_total=4i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=1i,http_416=0i,http_499=0i,http_4xx=3i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=4i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=www.google.com bytes_in=372i,bytes_out=786i,conn_total=1i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=1i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=35.190.79.28 bytes_in=4433i,bytes_out=10259i,conn_total=5i,http_200=3i,http_206=0i,http_2xx=3i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=11i,http_416=0i,http_499=0i,http_4xx=11i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=14i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000
tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=tenka-prod-api.txwy.tw bytes_in=3014397400i,bytes_out=14279992835i,conn_total=36844i,http_200=3177339i,http_206=0i,http_2xx=3177339i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=123i,http_416=0i,http_499=0i,http_4xx=123i,http_500=17214i,http_502=4453i,http_503=80i,http_504=0i,http_508=0i,http_5xx=21747i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=123i,http_ups_5xx=21747i,req_total=3199209i,rt=245874536i,ups_req=2685076i,ups_rt=245858217i,ups_tries=2685076i 1526546308000000000
```

View File

@ -0,0 +1,338 @@
package tengine
import (
"bufio"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"io"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Tengine struct {
Urls []string
ResponseTimeout internal.Duration
tls.ClientConfig
client *http.Client
}
var sampleConfig = `
# An array of Tengine reqstat module URI to gather stats.
urls = ["http://127.0.0.1/us"]
# HTTP response timeout (default: 5s)
# response_timeout = "5s"
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.cer"
# tls_key = "/etc/telegraf/key.key"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
`
func (n *Tengine) SampleConfig() string {
return sampleConfig
}
func (n *Tengine) Description() string {
return "Read Tengine's basic status information (ngx_http_reqstat_module)"
}
func (n *Tengine) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
// Create an HTTP client that is re-used for each
// collection interval
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
n.client = client
}
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
wg.Add(1)
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *Tengine) createHttpClient() (*http.Client, error) {
tlsCfg, err := n.ClientConfig.TLSConfig()
if err != nil {
return nil, err
}
if n.ResponseTimeout.Duration < time.Second {
n.ResponseTimeout.Duration = time.Second * 5
}
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
type TengineStatus struct {
host string
bytes_in uint64
bytes_out uint64
conn_total uint64
req_total uint64
http_2xx uint64
http_3xx uint64
http_4xx uint64
http_5xx uint64
http_other_status uint64
rt uint64
ups_req uint64
ups_rt uint64
ups_tries uint64
http_200 uint64
http_206 uint64
http_302 uint64
http_304 uint64
http_403 uint64
http_404 uint64
http_416 uint64
http_499 uint64
http_500 uint64
http_502 uint64
http_503 uint64
http_504 uint64
http_508 uint64
http_other_detail_status uint64
http_ups_4xx uint64
http_ups_5xx uint64
}
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
var tenginestatus TengineStatus
resp, err := n.client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status)
}
r := bufio.NewReader(resp.Body)
for {
line, err := r.ReadString('\n')
if err != nil || io.EOF == err {
break
}
line_split := strings.Split(strings.TrimSpace(line), ",")
if len(line_split) != 30 {
continue
}
tenginestatus.host = line_split[0]
if err != nil {
return err
}
tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64)
if err != nil {
return err
}
tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64)
if err != nil {
return err
}
tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64)
if err != nil {
return err
}
tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64)
if err != nil {
return err
}
tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64)
if err != nil {
return err
}
tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64)
if err != nil {
return err
}
tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64)
if err != nil {
return err
}
tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64)
if err != nil {
return err
}
tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64)
if err != nil {
return err
}
tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64)
if err != nil {
return err
}
tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64)
if err != nil {
return err
}
tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64)
if err != nil {
return err
}
tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64)
if err != nil {
return err
}
tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64)
if err != nil {
return err
}
tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64)
if err != nil {
return err
}
tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64)
if err != nil {
return err
}
tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64)
if err != nil {
return err
}
tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64)
if err != nil {
return err
}
tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64)
if err != nil {
return err
}
tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64)
if err != nil {
return err
}
tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64)
if err != nil {
return err
}
tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64)
if err != nil {
return err
}
tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64)
if err != nil {
return err
}
tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64)
if err != nil {
return err
}
tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64)
if err != nil {
return err
}
tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64)
if err != nil {
return err
}
tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64)
if err != nil {
return err
}
tags := getTags(addr, tenginestatus.host)
fields := map[string]interface{}{
"bytes_in": tenginestatus.bytes_in,
"bytes_out": tenginestatus.bytes_out,
"conn_total": tenginestatus.conn_total,
"req_total": tenginestatus.req_total,
"http_2xx": tenginestatus.http_2xx,
"http_3xx": tenginestatus.http_3xx,
"http_4xx": tenginestatus.http_4xx,
"http_5xx": tenginestatus.http_5xx,
"http_other_status": tenginestatus.http_other_status,
"rt": tenginestatus.rt,
"ups_req": tenginestatus.ups_req,
"ups_rt": tenginestatus.ups_rt,
"ups_tries": tenginestatus.ups_tries,
"http_200": tenginestatus.http_200,
"http_206": tenginestatus.http_206,
"http_302": tenginestatus.http_302,
"http_304": tenginestatus.http_304,
"http_403": tenginestatus.http_403,
"http_404": tenginestatus.http_404,
"http_416": tenginestatus.http_416,
"http_499": tenginestatus.http_499,
"http_500": tenginestatus.http_500,
"http_502": tenginestatus.http_502,
"http_503": tenginestatus.http_503,
"http_504": tenginestatus.http_504,
"http_508": tenginestatus.http_508,
"http_other_detail_status": tenginestatus.http_other_detail_status,
"http_ups_4xx": tenginestatus.http_ups_4xx,
"http_ups_5xx": tenginestatus.http_ups_5xx,
}
acc.AddFields("tengine", fields, tags)
}
return nil
}
// Get tag(s) for the tengine plugin
func getTags(addr *url.URL, server_name string) map[string]string {
h := addr.Host
host, port, err := net.SplitHostPort(h)
if err != nil {
host = addr.Host
if addr.Scheme == "http" {
port = "80"
} else if addr.Scheme == "https" {
port = "443"
} else {
port = ""
}
}
return map[string]string{"server": host, "port": port, "server_name": server_name}
}
func init() {
inputs.Add("tengine", func() telegraf.Input {
return &Tengine{}
})
}

View File

@ -0,0 +1,85 @@
# Zookeeper Input Plugin
The zookeeper plugin collects variables outputted from the 'mntr' command
[Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html).
### Configuration
```toml
# Reads 'mntr' stats from one or many zookeeper servers
[[inputs.zookeeper]]
## An array of address to gather stats about. Specify an ip or hostname
## with port. ie localhost:2181, 10.0.0.1:2181, etc.
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 2181 is used
servers = [":2181"]
## Timeout for metric collections from all servers. Minimum timeout is "1s".
# timeout = "5s"
## Optional TLS Config
# enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
```
### Metrics:
Exact field names are based on Zookeeper response and may vary between
configuration, platform, and version.
- zookeeper
- tags:
- server
- port
- state
- fields:
- approximate_data_size (integer)
- avg_latency (integer)
- ephemerals_count (integer)
- max_file_descriptor_count (integer)
- max_latency (integer)
- min_latency (integer)
- num_alive_connections (integer)
- open_file_descriptor_count (integer)
- outstanding_requests (integer)
- packets_received (integer)
- packets_sent (integer)
- version (string)
- watch_count (integer)
- znode_count (integer)
- followers (integer, leader only)
- synced_followers (integer, leader only)
- pending_syncs (integer, leader only)
### Debugging:
If you have any issues please check the direct Zookeeper output using netcat:
```sh
$ echo mntr | nc localhost 2181
zk_version 3.4.9-3--1, built on Thu, 01 Jun 2017 16:26:44 -0700
zk_avg_latency 0
zk_max_latency 0
zk_min_latency 0
zk_packets_received 8
zk_packets_sent 7
zk_num_alive_connections 1
zk_outstanding_requests 0
zk_server_state standalone
zk_znode_count 129
zk_watch_count 0
zk_ephemerals_count 0
zk_approximate_data_size 10044
zk_open_file_descriptor_count 44
zk_max_file_descriptor_count 4096
```
### Example Output
```
zookeeper,server=localhost,port=2181,state=standalone ephemerals_count=0i,approximate_data_size=10044i,open_file_descriptor_count=44i,max_latency=0i,packets_received=7i,outstanding_requests=0i,znode_count=129i,max_file_descriptor_count=4096i,version="3.4.9-3--1",avg_latency=0i,packets_sent=6i,num_alive_connections=1i,watch_count=0i,min_latency=0i 1522351112000000000
```

View File

@ -0,0 +1,181 @@
package zookeeper
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"net"
"regexp"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
tlsint "github.com/influxdata/telegraf/plugins/common/tls"
"github.com/influxdata/telegraf/plugins/inputs"
)
var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w[\w\.\-]*)\s+([\w\.\-]+)`)
// Zookeeper is a zookeeper plugin
type Zookeeper struct {
Servers []string
Timeout internal.Duration
EnableTLS bool `toml:"enable_tls"`
EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls
tlsint.ClientConfig
initialized bool
tlsConfig *tls.Config
}
var sampleConfig = `
## An array of address to gather stats about. Specify an ip or hostname
## with port. ie localhost:2181, 10.0.0.1:2181, etc.
## If no servers are specified, then localhost is used as the host.
## If no port is specified, 2181 is used
servers = [":2181"]
## Timeout for metric collections from all servers. Minimum timeout is "1s".
# timeout = "5s"
## Optional TLS Config
# enable_tls = true
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## If false, skip chain & host verification
# insecure_skip_verify = true
`
var defaultTimeout = 5 * time.Second
// SampleConfig returns sample configuration message
func (z *Zookeeper) SampleConfig() string {
return sampleConfig
}
// Description returns description of Zookeeper plugin
func (z *Zookeeper) Description() string {
return `Reads 'mntr' stats from one or many zookeeper servers`
}
func (z *Zookeeper) dial(ctx context.Context, addr string) (net.Conn, error) {
var dialer net.Dialer
if z.EnableTLS || z.EnableSSL {
deadline, ok := ctx.Deadline()
if ok {
dialer.Deadline = deadline
}
return tls.DialWithDialer(&dialer, "tcp", addr, z.tlsConfig)
} else {
return dialer.DialContext(ctx, "tcp", addr)
}
}
// Gather reads stats from all configured servers accumulates stats
func (z *Zookeeper) Gather(acc telegraf.Accumulator) error {
ctx := context.Background()
if !z.initialized {
tlsConfig, err := z.ClientConfig.TLSConfig()
if err != nil {
return err
}
z.tlsConfig = tlsConfig
z.initialized = true
}
if z.Timeout.Duration < 1*time.Second {
z.Timeout.Duration = defaultTimeout
}
ctx, cancel := context.WithTimeout(ctx, z.Timeout.Duration)
defer cancel()
if len(z.Servers) == 0 {
z.Servers = []string{":2181"}
}
for _, serverAddress := range z.Servers {
acc.AddError(z.gatherServer(ctx, serverAddress, acc))
}
return nil
}
func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error {
var zookeeper_state string
_, _, err := net.SplitHostPort(address)
if err != nil {
address = address + ":2181"
}
c, err := z.dial(ctx, address)
if err != nil {
return err
}
defer c.Close()
// Apply deadline to connection
deadline, ok := ctx.Deadline()
if ok {
c.SetDeadline(deadline)
}
fmt.Fprintf(c, "%s\n", "mntr")
rdr := bufio.NewReader(c)
scanner := bufio.NewScanner(rdr)
service := strings.Split(address, ":")
if len(service) != 2 {
return fmt.Errorf("Invalid service address: %s", address)
}
fields := make(map[string]interface{})
for scanner.Scan() {
line := scanner.Text()
parts := zookeeperFormatRE.FindStringSubmatch(string(line))
if len(parts) != 3 {
return fmt.Errorf("unexpected line in mntr response: %q", line)
}
measurement := strings.TrimPrefix(parts[1], "zk_")
if measurement == "server_state" {
zookeeper_state = parts[2]
} else {
sValue := string(parts[2])
iVal, err := strconv.ParseInt(sValue, 10, 64)
if err == nil {
fields[measurement] = iVal
} else {
fields[measurement] = sValue
}
}
}
srv := "localhost"
if service[0] != "" {
srv = service[0]
}
tags := map[string]string{
"server": srv,
"port": service[1],
"state": zookeeper_state,
}
acc.AddFields("zookeeper", fields, tags)
return nil
}
func init() {
inputs.Add("zookeeper", func() telegraf.Input {
return &Zookeeper{}
})
}

View File

@ -3,8 +3,8 @@
The JSON data format parses a [JSON][json] object or an array of objects into The JSON data format parses a [JSON][json] object or an array of objects into
metric fields. metric fields.
**NOTE:** All JSON numbers are converted to float fields. JSON String are **NOTE:** All JSON numbers are converted to float fields. JSON strings and booleans are
ignored unless specified in the `tag_key` or `json_string_fields` options. ignored unless specified in the `tag_key` or `json_string_fields` options.
### Configuration ### Configuration
@ -30,13 +30,15 @@ ignored unless specified in the `tag_key` or `json_string_fields` options.
json_query = "" json_query = ""
## Tag keys is an array of keys that should be added as tags. Matching keys ## Tag keys is an array of keys that should be added as tags. Matching keys
## are no longer saved as fields. ## are no longer saved as fields. Supports wildcard glob matching.
tag_keys = [ tag_keys = [
"my_tag_1", "my_tag_1",
"my_tag_2" "my_tag_2",
"tags_*",
"tag*"
] ]
## Array of glob pattern strings keys that should be added as string fields. ## Array of glob pattern strings or booleans keys that should be added as string fields.
json_string_fields = [] json_string_fields = []
## Name key is the key to use as the measurement name. ## Name key is the key to use as the measurement name.

View File

@ -36,7 +36,7 @@ type Config struct {
type Parser struct { type Parser struct {
metricName string metricName string
tagKeys []string tagKeys filter.Filter
stringFields filter.Filter stringFields filter.Filter
nameKey string nameKey string
query string query string
@ -53,9 +53,14 @@ func New(config *Config) (*Parser, error) {
return nil, err return nil, err
} }
tagKeyFilter, err := filter.Compile(config.TagKeys)
if err != nil {
return nil, err
}
return &Parser{ return &Parser{
metricName: config.MetricName, metricName: config.MetricName,
tagKeys: config.TagKeys, tagKeys: tagKeyFilter,
nameKey: config.NameKey, nameKey: config.NameKey,
stringFields: stringFilter, stringFields: stringFilter,
query: config.Query, query: config.Query,
@ -104,7 +109,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) (
name := p.metricName name := p.metricName
//checks if json_name_key is set // checks if json_name_key is set
if p.nameKey != "" { if p.nameKey != "" {
switch field := f.Fields[p.nameKey].(type) { switch field := f.Fields[p.nameKey].(type) {
case string: case string:
@ -112,7 +117,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) (
} }
} }
//if time key is specified, set timestamp to it // if time key is specified, set timestamp to it
if p.timeKey != "" { if p.timeKey != "" {
if p.timeFormat == "" { if p.timeFormat == "" {
err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'")
@ -131,7 +136,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) (
delete(f.Fields, p.timeKey) delete(f.Fields, p.timeKey)
//if the year is 0, set to current year // if the year is 0, set to current year
if timestamp.Year() == 0 { if timestamp.Year() == 0 {
timestamp = timestamp.AddDate(time.Now().Year(), 0, 0) timestamp = timestamp.AddDate(time.Now().Year(), 0, 0)
} }
@ -145,32 +150,37 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) (
return []telegraf.Metric{metric}, nil return []telegraf.Metric{metric}, nil
} }
//will take in field map with strings and bools, // will take in field map with strings and bools,
//search for TagKeys that match fieldnames and add them to tags // search for TagKeys that match fieldnames and add them to tags
//will delete any strings/bools that shouldn't be fields // will delete any strings/bools that shouldn't be fields
//assumes that any non-numeric values in TagKeys should be displayed as tags // assumes that any non-numeric values in TagKeys should be displayed as tags
func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) {
for _, name := range p.tagKeys {
//switch any fields in tagkeys into tags for name, value := range fields {
if fields[name] == nil { if p.tagKeys == nil {
continue continue
} }
switch value := fields[name].(type) { // skip switch statement if tagkey doesn't match fieldname
if !p.tagKeys.Match(name) {
continue
}
// switch any fields in tagkeys into tags
switch t := value.(type) {
case string: case string:
tags[name] = value tags[name] = t
delete(fields, name) delete(fields, name)
case bool: case bool:
tags[name] = strconv.FormatBool(value) tags[name] = strconv.FormatBool(t)
delete(fields, name) delete(fields, name)
case float64: case float64:
tags[name] = strconv.FormatFloat(value, 'f', -1, 64) tags[name] = strconv.FormatFloat(t, 'f', -1, 64)
delete(fields, name) delete(fields, name)
default: default:
log.Printf("E! [parsers.json] Unrecognized type %T", value) log.Printf("E! [parsers.json] Unrecognized type %T", value)
} }
} }
//remove any additional string/bool values from fields // remove any additional string/bool values from fields
for fk := range fields { for fk := range fields {
switch fields[fk].(type) { switch fields[fk].(type) {
case string, bool: case string, bool:

View File

@ -0,0 +1,17 @@
# Prometheus Text-Based Format
There are no additional configuration options for [Prometheus Text-Based Format][]. The metrics are parsed directly into Telegraf metrics. It is used internally in [prometheus input](/plugins/inputs/prometheus) or can be used in [http_listener_v2](/plugins/inputs/http_listener_v2) to simulate Pushgateway.
[Prometheus Text-Based Format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format
```toml
[[inputs.file]]
files = ["example"]
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "prometheus"
```

View File

@ -0,0 +1,36 @@
package common
import (
"github.com/influxdata/telegraf"
dto "github.com/prometheus/client_model/go"
)
func ValueType(mt dto.MetricType) telegraf.ValueType {
switch mt {
case dto.MetricType_COUNTER:
return telegraf.Counter
case dto.MetricType_GAUGE:
return telegraf.Gauge
case dto.MetricType_SUMMARY:
return telegraf.Summary
case dto.MetricType_HISTOGRAM:
return telegraf.Histogram
default:
return telegraf.Untyped
}
}
// Get labels from metric
func MakeLabels(m *dto.Metric, defaultTags map[string]string) map[string]string {
result := map[string]string{}
for key, value := range defaultTags {
result[key] = value
}
for _, lp := range m.Label {
result[lp.GetName()] = lp.GetValue()
}
return result
}

View File

@ -0,0 +1,200 @@
package prometheus
import (
"bufio"
"bytes"
"fmt"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"io"
"math"
"mime"
"net/http"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
. "github.com/influxdata/telegraf/plugins/parsers/prometheus/common"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
)
type Parser struct {
DefaultTags map[string]string
Header http.Header
}
func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) {
var parser expfmt.TextParser
var metrics []telegraf.Metric
var err error
// parse even if the buffer begins with a newline
buf = bytes.TrimPrefix(buf, []byte("\n"))
// Read raw data
buffer := bytes.NewBuffer(buf)
reader := bufio.NewReader(buffer)
// Prepare output
metricFamilies := make(map[string]*dto.MetricFamily)
mediatype, params, err := mime.ParseMediaType(p.Header.Get("Content-Type"))
if err == nil && mediatype == "application/vnd.google.protobuf" &&
params["encoding"] == "delimited" &&
params["proto"] == "io.prometheus.client.MetricFamily" {
for {
mf := &dto.MetricFamily{}
if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil {
if ierr == io.EOF {
break
}
return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr)
}
metricFamilies[mf.GetName()] = mf
}
} else {
metricFamilies, err = parser.TextToMetricFamilies(reader)
if err != nil {
return nil, fmt.Errorf("reading text format failed: %s", err)
}
}
now := time.Now()
// read metrics
for metricName, mf := range metricFamilies {
for _, m := range mf.Metric {
// reading tags
tags := MakeLabels(m, p.DefaultTags)
if mf.GetType() == dto.MetricType_SUMMARY {
// summary metric
telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now)
metrics = append(metrics, telegrafMetrics...)
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
// histogram metric
telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now)
metrics = append(metrics, telegrafMetrics...)
} else {
// standard metric
// reading fields
fields := make(map[string]interface{})
fields = getNameAndValue(m, metricName)
// converting to telegraf metric
if len(fields) > 0 {
t := getTimestamp(m, now)
metric, err := metric.New("prometheus", tags, fields, t, ValueType(mf.GetType()))
if err == nil {
metrics = append(metrics, metric)
}
}
}
}
}
return metrics, err
}
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
metrics, err := p.Parse([]byte(line))
if err != nil {
return nil, err
}
if len(metrics) < 1 {
return nil, fmt.Errorf("No metrics in line")
}
if len(metrics) > 1 {
return nil, fmt.Errorf("More than one metric in line")
}
return metrics[0], nil
}
func (p *Parser) SetDefaultTags(tags map[string]string) {
p.DefaultTags = tags
}
// Get Quantiles for summary metric & Buckets for histogram
func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
fields := make(map[string]interface{})
t := getTimestamp(m, now)
fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount())
fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum())
met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType))
if err == nil {
metrics = append(metrics, met)
}
for _, q := range m.GetSummary().Quantile {
newTags := tags
fields = make(map[string]interface{})
newTags["quantile"] = fmt.Sprint(q.GetQuantile())
fields[metricName] = float64(q.GetValue())
quantileMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType))
if err == nil {
metrics = append(metrics, quantileMetric)
}
}
return metrics
}
// Get Buckets from histogram metric
func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric {
var metrics []telegraf.Metric
fields := make(map[string]interface{})
t := getTimestamp(m, now)
fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount())
fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum())
met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType))
if err == nil {
metrics = append(metrics, met)
}
for _, b := range m.GetHistogram().Bucket {
newTags := tags
fields = make(map[string]interface{})
newTags["le"] = fmt.Sprint(b.GetUpperBound())
fields[metricName+"_bucket"] = float64(b.GetCumulativeCount())
histogramMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType))
if err == nil {
metrics = append(metrics, histogramMetric)
}
}
return metrics
}
// Get name and value from metric
func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} {
fields := make(map[string]interface{})
if m.Gauge != nil {
if !math.IsNaN(m.GetGauge().GetValue()) {
fields[metricName] = float64(m.GetGauge().GetValue())
}
} else if m.Counter != nil {
if !math.IsNaN(m.GetCounter().GetValue()) {
fields[metricName] = float64(m.GetCounter().GetValue())
}
} else if m.Untyped != nil {
if !math.IsNaN(m.GetUntyped().GetValue()) {
fields[metricName] = float64(m.GetUntyped().GetValue())
}
}
return fields
}
func getTimestamp(m *dto.Metric, now time.Time) time.Time {
var t time.Time
if m.TimestampMs != nil && *m.TimestampMs > 0 {
t = time.Unix(0, m.GetTimestampMs()*1000000)
} else {
t = now
}
return t
}

View File

@ -2,22 +2,22 @@ language: go
matrix: matrix:
include: include:
- go: 1.13.x - go: 1.14.x
env: VET=1 GO111MODULE=on env: VET=1 GO111MODULE=on
- go: 1.13.x - go: 1.14.x
env: RACE=1 GO111MODULE=on env: RACE=1 GO111MODULE=on
- go: 1.13.x - go: 1.14.x
env: RUN386=1 env: RUN386=1
- go: 1.13.x - go: 1.14.x
env: GRPC_GO_RETRY=on env: GRPC_GO_RETRY=on
- go: 1.13.x - go: 1.14.x
env: TESTEXTRAS=1 env: TESTEXTRAS=1
- go: 1.13.x
env: GO111MODULE=on
- go: 1.12.x - go: 1.12.x
env: GO111MODULE=on env: GO111MODULE=on
- go: 1.11.x - go: 1.11.x # Keep until interop tests no longer require Go1.11
env: GO111MODULE=on env: GO111MODULE=on
- go: 1.9.x
env: GAE=1
go_import_path: google.golang.org/grpc go_import_path: google.golang.org/grpc

View File

@ -57,6 +57,5 @@ How to get your contributions merged smoothly and quickly.
- `make vet` to catch vet errors - `make vet` to catch vet errors
- `make test` to run the tests - `make test` to run the tests
- `make testrace` to run tests in race mode - `make testrace` to run tests in race mode
- optional `make testappengine` to run tests with appengine
- Exceptions to the rules can be made if there's a compelling reason for doing so. - Exceptions to the rules can be made if there's a compelling reason for doing so.

View File

@ -21,6 +21,7 @@ test: testdeps
testsubmodule: testdeps testsubmodule: testdeps
cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
testappengine: testappenginedeps testappengine: testappenginedeps
goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...

View File

@ -1,64 +1,53 @@
# gRPC-Go # gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
The Go implementation of [gRPC](https://grpc.io/): A high performance, open The [Go][] implementation of [gRPC][]: A high performance, open source, general
source, general RPC framework that puts mobile and HTTP/2 first. For more RPC framework that puts mobile and HTTP/2 first. For more information see the
information see the [gRPC Quick Start: [Go gRPC docs][], or jump directly into the [quick start][].
Go](https://grpc.io/docs/quickstart/go.html) guide.
Installation ## Prerequisites
------------
To install this package, you need to install Go and setup your Go workspace on - **[Go][]**: any one of the **three latest major** [releases][go-releases].
your computer. The simplest way to install the library is to run:
## Installation
With [Go module][] support (Go 1.11+), simply add the following import
```go
import "google.golang.org/grpc"
``` ```
to your code, and then `go [build|run|test]` will automatically fetch the
necessary dependencies.
Otherwise, to install the `grpc-go` package, run the following command:
```console
$ go get -u google.golang.org/grpc $ go get -u google.golang.org/grpc
``` ```
With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in > **Note:** If you are trying to access `grpc-go` from **China**, see the
your source code and `go [build|run|test]` will automatically download the > [FAQ](#FAQ) below.
necessary dependencies ([Go modules
ref](https://github.com/golang/go/wiki/Modules)).
If you are trying to access grpc-go from within China, please see the ## Learn more
[FAQ](#FAQ) below.
Prerequisites - [Go gRPC docs][], which include a [quick start][] and [API
------------- reference][API] among other resources
gRPC-Go requires Go 1.9 or later. - [Low-level technical docs](Documentation) from this repository
- [Performance benchmark][]
- [Examples](examples)
Documentation ## FAQ
-------------
- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
descriptions.
- Documentation on specific topics can be found in the [Documentation
directory](Documentation/).
- Examples can be found in the [examples directory](examples/).
Performance ### I/O Timeout Errors
-----------
Performance benchmark data for grpc-go and other languages is maintained in
[this
dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status The `golang.org` domain may be blocked from some countries. `go get` usually
------
General Availability [Google Cloud Platform Launch
Stages](https://cloud.google.com/terms/launch-stages).
FAQ
---
#### I/O Timeout Errors
The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens: produces an error like the following when this happens:
``` ```console
$ go get -u google.golang.org/grpc $ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
``` ```
@ -69,7 +58,7 @@ To build Go code, there are several options:
- Without Go module support: `git clone` the repo manually: - Without Go module support: `git clone` the repo manually:
``` ```sh
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
``` ```
@ -79,7 +68,7 @@ To build Go code, there are several options:
- With Go module support: it is possible to use the `replace` feature of `go - With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory: mod` to create aliases for golang.org packages. In your project's directory:
``` ```sh
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy go mod tidy
go mod vendor go mod vendor
@ -87,19 +76,17 @@ To build Go code, there are several options:
``` ```
Again, this will need to be done for all transitive dependencies hosted on Again, this will need to be done for all transitive dependencies hosted on
golang.org as well. Please refer to [this golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
this concern.
#### Compiling error, undefined: grpc.SupportPackageIsVersion ### Compiling error, undefined: grpc.SupportPackageIsVersion
##### If you are using Go modules: #### If you are using Go modules:
Please ensure your gRPC-Go version is `require`d at the appropriate version in Ensure your gRPC-Go version is `require`d at the appropriate version in
the same module containing the generated `.pb.go` files. For example, the same module containing the generated `.pb.go` files. For example,
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: `SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
``` ```go
module <your module name> module <your module name>
require ( require (
@ -107,23 +94,27 @@ require (
) )
``` ```
##### If you are *not* using Go modules: #### If you are *not* using Go modules:
Please update proto package, gRPC package and rebuild the proto files: Update the `proto` package, gRPC package, and rebuild the `.proto` files:
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- `go get -u google.golang.org/grpc`
- `protoc --go_out=plugins=grpc:. *.proto`
#### How to turn on logging ```sh
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
The default logger is controlled by the environment variables. Turn everything go get -u google.golang.org/grpc
on by setting: protoc --go_out=plugins=grpc:. *.proto
```
GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
``` ```
#### The RPC failed with error `"code = Unavailable desc = transport is closing"` ### How to turn on logging
The default logger is controlled by environment variables. Turn everything on
like this:
```console
$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
```
### The RPC failed with error `"code = Unavailable desc = transport is closing"`
This error means the connection the RPC is using was closed, and there are many This error means the connection the RPC is using was closed, and there are many
possible reasons, including: possible reasons, including:
@ -139,3 +130,12 @@ It can be tricky to debug this because the error happens on the client side but
the root cause of the connection being closed is on the server side. Turn on the root cause of the connection being closed is on the server side. Turn on
logging on __both client and server__, and see if there are any transport logging on __both client and server__, and see if there are any transport
errors. errors.
[API]: https://pkg.go.dev/google.golang.org/grpc
[Go]: https://golang.org
[Go module]: https://github.com/golang/go/wiki/Modules
[gRPC]: https://grpc.io
[Go gRPC docs]: https://grpc.io/docs/languages/go
[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696
[quick start]: https://grpc.io/docs/languages/go/quickstart
[go-releases]: https://golang.org/doc/devel/release.html

View File

@ -19,7 +19,10 @@
// Package attributes defines a generic key/value store used in various gRPC // Package attributes defines a generic key/value store used in various gRPC
// components. // components.
// //
// All APIs in this package are EXPERIMENTAL. // Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package attributes package attributes
import "fmt" import "fmt"
@ -50,6 +53,9 @@ func New(kvs ...interface{}) *Attributes {
// times, the last value overwrites all previous values for that key. To // times, the last value overwrites all previous values for that key. To
// remove an existing key, use a nil value. // remove an existing key, use a nil value.
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
if a == nil {
return New(kvs...)
}
if len(kvs)%2 != 0 { if len(kvs)%2 != 0 {
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
} }
@ -66,5 +72,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
// Value returns the value associated with these attributes for key, or nil if // Value returns the value associated with these attributes for key, or nil if
// no value is associated with key. // no value is associated with key.
func (a *Attributes) Value(key interface{}) interface{} { func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key] return a.m[key]
} }

View File

@ -48,7 +48,10 @@ type BackoffConfig struct {
// here for more details: // here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type ConnectParams struct { type ConnectParams struct {
// Backoff specifies the configuration options for connection backoff. // Backoff specifies the configuration options for connection backoff.
Backoff backoff.Config Backoff backoff.Config

View File

@ -1,391 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"context"
"net"
"sync"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/naming"
"google.golang.org/grpc/status"
)
// Address represents a server the client connects to.
//
// Deprecated: please use package balancer.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
Metadata interface{}
}
// BalancerConfig specifies the configurations for Balancer.
//
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerConfig struct {
// DialCreds is the transport credential the Balancer implementation can
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
}
// BalancerGetOptions configures a Get call.
//
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerGetOptions struct {
// BlockingWait specifies whether Get should block when there is no
// connected address.
BlockingWait bool
}
// Balancer chooses network addresses for RPCs.
//
// Deprecated: please use package balancer. May be removed in a future 1.x release.
type Balancer interface {
// Start does the initialization work to bootstrap a Balancer. For example,
// this function may start the name resolution and watch the updates. It will
// be called when dialing.
Start(target string, config BalancerConfig) error
// Up informs the Balancer that gRPC has a connection to the server at
// addr. It returns down which is called once the connection to addr gets
// lost or closed.
// TODO: It is not clear how to construct and take advantage of the meaningful error
// parameter for down. Need realistic demands to guide.
Up(addr Address) (down func(error))
// Get gets the address of a server for the RPC corresponding to ctx.
// i) If it returns a connected address, gRPC internals issues the RPC on the
// connection to this address;
// ii) If it returns an address on which the connection is under construction
// (initiated by Notify(...)) but not connected, gRPC internals
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
// Shutdown state;
// or
// * issues RPC on the connection otherwise.
// iii) If it returns an address on which the connection does not exist, gRPC
// internals treats it as an error and will fail the corresponding RPC.
//
// Therefore, the following is the recommended rule when writing a custom Balancer.
// If opts.BlockingWait is true, it should return a connected address or
// block if there is no connected address. It should respect the timeout or
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
// RPCs), it should return an address it has notified via Notify(...) immediately
// instead of blocking.
//
// The function returns put which is called once the rpc has completed or failed.
// put can collect and report RPC stats to a remote load balancer.
//
// This function should only return the errors Balancer cannot recover by itself.
// gRPC internals will fail the RPC if an error is returned.
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
// Notify returns a channel that is used by gRPC internals to watch the addresses
// gRPC needs to connect. The addresses might be from a name resolver or remote
// load balancer. gRPC internals will compare it with the existing connected
// addresses. If the address Balancer notified is not in the existing connected
// addresses, gRPC starts to connect the address. If an address in the existing
// connected addresses is not in the notification list, the corresponding connection
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
// the Address slice must be the full list of the Addresses which should be connected.
// It is NOT delta.
Notify() <-chan []Address
// Close shuts down the balancer.
Close() error
}
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly.
//
// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
func RoundRobin(r naming.Resolver) Balancer {
return &roundRobin{r: r}
}
type addrInfo struct {
addr Address
connected bool
}
type roundRobin struct {
r naming.Resolver
w naming.Watcher
addrs []*addrInfo // all the addresses the client should potentially connect
mu sync.Mutex
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
next int // index of the next address to return for Get()
waitCh chan struct{} // the channel to block when there is no connected address available
done bool // The Balancer is closed.
}
func (rr *roundRobin) watchAddrUpdates() error {
updates, err := rr.w.Next()
if err != nil {
grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
return err
}
rr.mu.Lock()
defer rr.mu.Unlock()
for _, update := range updates {
addr := Address{
Addr: update.Addr,
Metadata: update.Metadata,
}
switch update.Op {
case naming.Add:
var exist bool
for _, v := range rr.addrs {
if addr == v.addr {
exist = true
grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
break
}
}
if exist {
continue
}
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
case naming.Delete:
for i, v := range rr.addrs {
if addr == v.addr {
copy(rr.addrs[i:], rr.addrs[i+1:])
rr.addrs = rr.addrs[:len(rr.addrs)-1]
break
}
}
default:
grpclog.Errorln("Unknown update.Op ", update.Op)
}
}
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
open := make([]Address, len(rr.addrs))
for i, v := range rr.addrs {
open[i] = v.addr
}
if rr.done {
return ErrClientConnClosing
}
select {
case <-rr.addrCh:
default:
}
rr.addrCh <- open
return nil
}
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
rr.mu.Lock()
defer rr.mu.Unlock()
if rr.done {
return ErrClientConnClosing
}
if rr.r == nil {
// If there is no name resolver installed, it is not needed to
// do name resolution. In this case, target is added into rr.addrs
// as the only address available and rr.addrCh stays nil.
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
return nil
}
w, err := rr.r.Resolve(target)
if err != nil {
return err
}
rr.w = w
rr.addrCh = make(chan []Address, 1)
go func() {
for {
if err := rr.watchAddrUpdates(); err != nil {
return
}
}
}()
return nil
}
// Up sets the connected state of addr and sends notification if there are pending
// Get() calls.
func (rr *roundRobin) Up(addr Address) func(error) {
rr.mu.Lock()
defer rr.mu.Unlock()
var cnt int
for _, a := range rr.addrs {
if a.addr == addr {
if a.connected {
return nil
}
a.connected = true
}
if a.connected {
cnt++
}
}
// addr is only one which is connected. Notify the Get() callers who are blocking.
if cnt == 1 && rr.waitCh != nil {
close(rr.waitCh)
rr.waitCh = nil
}
return func(err error) {
rr.down(addr, err)
}
}
// down unsets the connected state of addr.
func (rr *roundRobin) down(addr Address, err error) {
rr.mu.Lock()
defer rr.mu.Unlock()
for _, a := range rr.addrs {
if addr == a.addr {
a.connected = false
break
}
}
}
// Get returns the next addr in the rotation.
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
var ch chan struct{}
rr.mu.Lock()
if rr.done {
rr.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(rr.addrs) > 0 {
if rr.next >= len(rr.addrs) {
rr.next = 0
}
next := rr.next
for {
a := rr.addrs[next]
next = (next + 1) % len(rr.addrs)
if a.connected {
addr = a.addr
rr.next = next
rr.mu.Unlock()
return
}
if next == rr.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
if !opts.BlockingWait {
if len(rr.addrs) == 0 {
rr.mu.Unlock()
err = status.Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on rr.addrs for failfast RPCs.
addr = rr.addrs[rr.next].addr
rr.next++
rr.mu.Unlock()
return
}
// Wait on rr.waitCh for non-failfast RPCs.
if rr.waitCh == nil {
ch = make(chan struct{})
rr.waitCh = ch
} else {
ch = rr.waitCh
}
rr.mu.Unlock()
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
case <-ch:
rr.mu.Lock()
if rr.done {
rr.mu.Unlock()
err = ErrClientConnClosing
return
}
if len(rr.addrs) > 0 {
if rr.next >= len(rr.addrs) {
rr.next = 0
}
next := rr.next
for {
a := rr.addrs[next]
next = (next + 1) % len(rr.addrs)
if a.connected {
addr = a.addr
rr.next = next
rr.mu.Unlock()
return
}
if next == rr.next {
// Has iterated all the possible address but none is connected.
break
}
}
}
// The newly added addr got removed by Down() again.
if rr.waitCh == nil {
ch = make(chan struct{})
rr.waitCh = ch
} else {
ch = rr.waitCh
}
rr.mu.Unlock()
}
}
}
func (rr *roundRobin) Notify() <-chan []Address {
return rr.addrCh
}
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
if rr.done {
return errBalancerClosed
}
rr.done = true
if rr.w != nil {
rr.w.Close()
}
if rr.waitCh != nil {
close(rr.waitCh)
rr.waitCh = nil
}
if rr.addrCh != nil {
close(rr.addrCh)
}
return nil
}
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
// returns the only address Up by resetTransport().
type pickFirst struct {
*roundRobin
}

View File

@ -111,6 +111,9 @@ type NewSubConnOptions struct {
// CredsBundle is the credentials bundle that will be used in the created // CredsBundle is the credentials bundle that will be used in the created
// SubConn. If it's nil, the original creds from grpc DialOptions will be // SubConn. If it's nil, the original creds from grpc DialOptions will be
// used. // used.
//
// Deprecated: Use the Attributes field in resolver.Address to pass
// arbitrary data to the credential handshaker.
CredsBundle credentials.Bundle CredsBundle credentials.Bundle
// HealthCheckEnabled indicates whether health check service should be // HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn // enabled on this SubConn
@ -123,7 +126,7 @@ type State struct {
// determine the state of the ClientConn. // determine the state of the ClientConn.
ConnectivityState connectivity.State ConnectivityState connectivity.State
// Picker is used to choose connections (SubConns) for RPCs. // Picker is used to choose connections (SubConns) for RPCs.
Picker V2Picker Picker Picker
} }
// ClientConn represents a gRPC ClientConn. // ClientConn represents a gRPC ClientConn.
@ -141,20 +144,11 @@ type ClientConn interface {
// The SubConn will be shutdown. // The SubConn will be shutdown.
RemoveSubConn(SubConn) RemoveSubConn(SubConn)
// UpdateBalancerState is called by balancer to notify gRPC that some internal
// state in balancer has changed.
//
// gRPC will update the connectivity state of the ClientConn, and will call pick
// on the new picker to pick new SubConn.
//
// Deprecated: use UpdateState instead
UpdateBalancerState(s connectivity.State, p Picker)
// UpdateState notifies gRPC that the balancer's internal state has // UpdateState notifies gRPC that the balancer's internal state has
// changed. // changed.
// //
// gRPC will update the connectivity state of the ClientConn, and will call pick // gRPC will update the connectivity state of the ClientConn, and will call
// on the new picker to pick new SubConns. // Pick on the new Picker to pick new SubConns.
UpdateState(State) UpdateState(State)
// ResolveNow is called by balancer to notify gRPC to do a name resolving. // ResolveNow is called by balancer to notify gRPC to do a name resolving.
@ -232,56 +226,17 @@ type DoneInfo struct {
var ( var (
// ErrNoSubConnAvailable indicates no SubConn is available for pick(). // ErrNoSubConnAvailable indicates no SubConn is available for pick().
// gRPC will block the RPC until a new picker is available via UpdateBalancerState(). // gRPC will block the RPC until a new picker is available via UpdateState().
ErrNoSubConnAvailable = errors.New("no SubConn is available") ErrNoSubConnAvailable = errors.New("no SubConn is available")
// ErrTransientFailure indicates all SubConns are in TransientFailure. // ErrTransientFailure indicates all SubConns are in TransientFailure.
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) //
// Deprecated: return an appropriate error based on the last resolution or
// connection attempt instead. The behavior is the same for any non-gRPC
// status error.
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
) )
// Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed.
//
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
//
// Deprecated: use V2Picker instead
type Picker interface {
// Pick returns the SubConn to be used to send the RPC.
// The returned SubConn must be one returned by NewSubConn().
//
// This functions is expected to return:
// - a SubConn that is known to be READY;
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
// made (for example, some SubConn is in CONNECTING mode);
// - other errors if no active connecting is happening (for example, all SubConn
// are in TRANSIENT_FAILURE mode).
//
// If a SubConn is returned:
// - If it is READY, gRPC will send the RPC on it;
// - If it is not ready, or becomes not ready after it's returned, gRPC will
// block until UpdateBalancerState() is called and will call pick on the
// new picker. The done function returned from Pick(), if not nil, will be
// called with nil error, no bytes sent and no bytes received.
//
// If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
// - If the error is ErrTransientFailure or implements IsTransientFailure()
// bool, returning true:
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
// is called to pick again;
// - Otherwise, RPC will fail with unavailable error.
// - Else (error is other non-nil error):
// - The RPC will fail with the error's status code, or Unknown if it is
// not a status error.
//
// The returned done() function will be called once the rpc has finished,
// with the final status of that RPC. If the SubConn returned is not a
// valid SubConn type, done may not be called. done may be nil if balancer
// doesn't care about the RPC status.
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
}
// PickResult contains information related to a connection chosen for an RPC. // PickResult contains information related to a connection chosen for an RPC.
type PickResult struct { type PickResult struct {
// SubConn is the connection to use for this pick, if its state is Ready. // SubConn is the connection to use for this pick, if its state is Ready.
@ -297,24 +252,19 @@ type PickResult struct {
Done func(DoneInfo) Done func(DoneInfo)
} }
type transientFailureError struct { // TransientFailureError returns e. It exists for backward compatibility and
error // will be deleted soon.
} //
// Deprecated: no longer necessary, picker errors are treated this way by
// default.
func TransientFailureError(e error) error { return e }
func (e *transientFailureError) IsTransientFailure() bool { return true } // Picker is used by gRPC to pick a SubConn to send an RPC.
// TransientFailureError wraps err in an error implementing
// IsTransientFailure() bool, returning true.
func TransientFailureError(err error) error {
return &transientFailureError{error: err}
}
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its // Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed. // internal state has changed.
// //
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). // The pickers used by gRPC can be updated by ClientConn.UpdateState().
type V2Picker interface { type Picker interface {
// Pick returns the connection to use for this RPC and related information. // Pick returns the connection to use for this RPC and related information.
// //
// Pick should not block. If the balancer needs to do I/O or any blocking // Pick should not block. If the balancer needs to do I/O or any blocking
@ -327,14 +277,13 @@ type V2Picker interface {
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new // - If the error is ErrNoSubConnAvailable, gRPC will block until a new
// Picker is provided by the balancer (using ClientConn.UpdateState). // Picker is provided by the balancer (using ClientConn.UpdateState).
// //
// - If the error implements IsTransientFailure() bool, returning true, // - If the error is a status error (implemented by the grpc/status
// wait for ready RPCs will wait, but non-wait for ready RPCs will be // package), gRPC will terminate the RPC with the code and message
// terminated with this error's Error() string and status code // provided.
// Unavailable.
// //
// - Any other errors terminate all RPCs with the code and message // - For all other errors, wait for ready RPCs will wait, but non-wait for
// provided. If the error is not a status error, it will be converted by // ready RPCs will be terminated with this error's Error() string and
// gRPC to a status error with code Unknown. // status code Unavailable.
Pick(info PickInfo) (PickResult, error) Pick(info PickInfo) (PickResult, error)
} }
@ -343,29 +292,21 @@ type V2Picker interface {
// //
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
// //
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed // UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
// to be called synchronously from the same goroutine. // guaranteed to be called synchronously from the same goroutine. There's no
// There's no guarantee on picker.Pick, it may be called anytime. // guarantee on picker.Pick, it may be called anytime.
type Balancer interface { type Balancer interface {
// HandleSubConnStateChange is called by gRPC when the connectivity state // UpdateClientConnState is called by gRPC when the state of the ClientConn
// of sc has changed. // changes. If the error returned is ErrBadResolverState, the ClientConn
// Balancer is expected to aggregate all the state of SubConn and report // will begin calling ResolveNow on the active name resolver with
// that back to gRPC. // exponential backoff until a subsequent call to UpdateClientConnState
// Balancer should also generate and update Pickers when its internal state has // returns a nil error. Any other errors are currently ignored.
// been changed by the new state. UpdateClientConnState(ClientConnState) error
// // ResolverError is called by gRPC when the name resolver reports an error.
// Deprecated: if V2Balancer is implemented by the Balancer, ResolverError(error)
// UpdateSubConnState will be called instead. // UpdateSubConnState is called by gRPC when the state of a SubConn
HandleSubConnStateChange(sc SubConn, state connectivity.State) // changes.
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to UpdateSubConnState(SubConn, SubConnState)
// balancers.
// Balancer can create new SubConn or remove SubConn with the addresses.
// An empty address slice and a non-nil error will be passed if the resolver returns
// non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateClientConnState will be called instead.
HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call // Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns. // ClientConn.RemoveSubConn for its existing SubConns.
Close() Close()
@ -393,27 +334,6 @@ type ClientConnState struct {
// problem with the provided name resolver data. // problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state") var ErrBadResolverState = errors.New("bad resolver state")
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes. If the error returned is ErrBadResolverState, the ClientConn
// will begin calling ResolveNow on the active name resolver with
// exponential backoff until a subsequent call to UpdateClientConnState
// returns a nil error. Any other errors are currently ignored.
UpdateClientConnState(ClientConnState) error
// ResolverError is called by gRPC when the name resolver reports an error.
ResolverError(error)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
// and returns one aggregated connectivity state. // and returns one aggregated connectivity state.
// //

View File

@ -19,7 +19,6 @@
package base package base
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
@ -29,18 +28,18 @@ import (
"google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver"
) )
var logger = grpclog.Component("balancer")
type baseBuilder struct { type baseBuilder struct {
name string name string
pickerBuilder PickerBuilder pickerBuilder PickerBuilder
v2PickerBuilder V2PickerBuilder config Config
config Config
} }
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{ bal := &baseBalancer{
cc: cc, cc: cc,
pickerBuilder: bb.pickerBuilder, pickerBuilder: bb.pickerBuilder,
v2PickerBuilder: bb.v2PickerBuilder,
subConns: make(map[resolver.Address]balancer.SubConn), subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State), scStates: make(map[balancer.SubConn]connectivity.State),
@ -50,11 +49,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
// Initialize picker to a picker that always returns // Initialize picker to a picker that always returns
// ErrNoSubConnAvailable, because when state of a SubConn changes, we // ErrNoSubConnAvailable, because when state of a SubConn changes, we
// may call UpdateState with this picker. // may call UpdateState with this picker.
if bb.pickerBuilder != nil { bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
} else {
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
}
return bal return bal
} }
@ -62,12 +57,9 @@ func (bb *baseBuilder) Name() string {
return bb.name return bb.name
} }
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
type baseBalancer struct { type baseBalancer struct {
cc balancer.ClientConn cc balancer.ClientConn
pickerBuilder PickerBuilder pickerBuilder PickerBuilder
v2PickerBuilder V2PickerBuilder
csEvltr *balancer.ConnectivityStateEvaluator csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State state connectivity.State
@ -75,43 +67,34 @@ type baseBalancer struct {
subConns map[resolver.Address]balancer.SubConn subConns map[resolver.Address]balancer.SubConn
scStates map[balancer.SubConn]connectivity.State scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker picker balancer.Picker
v2Picker balancer.V2Picker
config Config config Config
resolverErr error // the last error reported by the resolver; cleared on successful resolution resolverErr error // the last error reported by the resolver; cleared on successful resolution
connErr error // the last connection error; cleared upon leaving TransientFailure connErr error // the last connection error; cleared upon leaving TransientFailure
} }
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
panic("not implemented")
}
func (b *baseBalancer) ResolverError(err error) { func (b *baseBalancer) ResolverError(err error) {
b.resolverErr = err b.resolverErr = err
if len(b.subConns) == 0 { if len(b.subConns) == 0 {
b.state = connectivity.TransientFailure b.state = connectivity.TransientFailure
} }
if b.state != connectivity.TransientFailure { if b.state != connectivity.TransientFailure {
// The picker will not change since the balancer does not currently // The picker will not change since the balancer does not currently
// report an error. // report an error.
return return
} }
b.regeneratePicker() b.regeneratePicker()
if b.picker != nil { b.cc.UpdateState(balancer.State{
b.cc.UpdateBalancerState(b.state, b.picker) ConnectivityState: b.state,
} else { Picker: b.picker,
b.cc.UpdateState(balancer.State{ })
ConnectivityState: b.state,
Picker: b.v2Picker,
})
}
} }
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig? // TODO: handle s.ResolverState.ServiceConfig?
if grpclog.V(2) { if logger.V(2) {
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) logger.Info("base.baseBalancer: got new ClientConn state: ", s)
} }
// Successful resolution; clear resolver error and ensure we return nil. // Successful resolution; clear resolver error and ensure we return nil.
b.resolverErr = nil b.resolverErr = nil
@ -123,7 +106,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// a is a new address (not existing in b.subConns). // a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
if err != nil { if err != nil {
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue continue
} }
b.subConns[a] = sc b.subConns[a] = sc
@ -137,7 +120,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.cc.RemoveSubConn(sc) b.cc.RemoveSubConn(sc)
delete(b.subConns, a) delete(b.subConns, a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
// The entry will be deleted in HandleSubConnStateChange. // The entry will be deleted in UpdateSubConnState.
} }
} }
// If resolver state contains no addresses, return an error so ClientConn // If resolver state contains no addresses, return an error so ClientConn
@ -171,49 +154,29 @@ func (b *baseBalancer) mergeErrors() error {
// - built by the pickerBuilder with all READY SubConns otherwise. // - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() { func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure { if b.state == connectivity.TransientFailure {
if b.pickerBuilder != nil { b.picker = NewErrPicker(b.mergeErrors())
b.picker = NewErrPicker(balancer.ErrTransientFailure)
} else {
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
}
return return
} }
if b.pickerBuilder != nil { readySCs := make(map[balancer.SubConn]SubConnInfo)
readySCs := make(map[resolver.Address]balancer.SubConn)
// Filter out all ready SCs from full subConn map. // Filter out all ready SCs from full subConn map.
for addr, sc := range b.subConns { for addr, sc := range b.subConns {
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[addr] = sc readySCs[sc] = SubConnInfo{Address: addr}
}
} }
b.picker = b.pickerBuilder.Build(readySCs)
} else {
readySCs := make(map[balancer.SubConn]SubConnInfo)
// Filter out all ready SCs from full subConn map.
for addr, sc := range b.subConns {
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[sc] = SubConnInfo{Address: addr}
}
}
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
} }
} b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
panic("not implemented")
} }
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState s := state.ConnectivityState
if grpclog.V(2) { if logger.V(2) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
} }
oldS, ok := b.scStates[sc] oldS, ok := b.scStates[sc]
if !ok { if !ok {
if grpclog.V(2) { if logger.V(2) {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
} }
return return
} }
@ -247,11 +210,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
b.regeneratePicker() b.regeneratePicker()
} }
if b.picker != nil { b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
b.cc.UpdateBalancerState(b.state, b.picker)
} else {
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
}
} }
// Close is a nop because base balancer doesn't have internal state to clean up, // Close is a nop because base balancer doesn't have internal state to clean up,
@ -259,28 +218,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
func (b *baseBalancer) Close() { func (b *baseBalancer) Close() {
} }
// NewErrPicker returns a picker that always returns err on Pick(). // NewErrPicker returns a Picker that always returns err on Pick().
func NewErrPicker(err error) balancer.Picker { func NewErrPicker(err error) balancer.Picker {
return &errPicker{err: err} return &errPicker{err: err}
} }
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
//
// Deprecated: use NewErrPicker instead.
var NewErrPickerV2 = NewErrPicker
type errPicker struct { type errPicker struct {
err error // Pick() always returns this err. err error // Pick() always returns this err.
} }
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
return nil, nil, p.err
}
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
func NewErrPickerV2(err error) balancer.V2Picker {
return &errPickerV2{err: err}
}
type errPickerV2 struct {
err error // Pick() always returns this err.
}
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err return balancer.PickResult{}, p.err
} }

View File

@ -37,15 +37,8 @@ import (
// PickerBuilder creates balancer.Picker. // PickerBuilder creates balancer.Picker.
type PickerBuilder interface { type PickerBuilder interface {
// Build takes a slice of ready SubConns, and returns a picker that will be
// used by gRPC to pick a SubConn.
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
}
// V2PickerBuilder creates balancer.V2Picker.
type V2PickerBuilder interface {
// Build returns a picker that will be used by gRPC to pick a SubConn. // Build returns a picker that will be used by gRPC to pick a SubConn.
Build(info PickerBuildInfo) balancer.V2Picker Build(info PickerBuildInfo) balancer.Picker
} }
// PickerBuildInfo contains information needed by the picker builder to // PickerBuildInfo contains information needed by the picker builder to
@ -62,32 +55,17 @@ type SubConnInfo struct {
Address resolver.Address // the address used to create this SubConn Address resolver.Address // the address used to create this SubConn
} }
// NewBalancerBuilder returns a balancer builder. The balancers
// built by this builder will use the picker builder to build pickers.
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
return NewBalancerBuilderWithConfig(name, pb, Config{})
}
// Config contains the config info about the base balancer builder. // Config contains the config info about the base balancer builder.
type Config struct { type Config struct {
// HealthCheck indicates whether health checking should be enabled for this specific balancer. // HealthCheck indicates whether health checking should be enabled for this specific balancer.
HealthCheck bool HealthCheck bool
} }
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. // NewBalancerBuilder returns a base balancer builder configured by the provided config.
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{ return &baseBuilder{
name: name, name: name,
pickerBuilder: pb, pickerBuilder: pb,
config: config, config: config,
} }
} }
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
v2PickerBuilder: pb,
config: config,
}
}

View File

@ -0,0 +1,51 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package state declares grpclb types to be set by resolvers wishing to pass
// information to grpclb via resolver.State Attributes.
package state
import (
"google.golang.org/grpc/resolver"
)
// keyType is the key to use for storing State in Attributes.
type keyType string
const key = keyType("grpc.grpclb.state")
// State contains gRPCLB-relevant data passed from the name resolver.
type State struct {
// BalancerAddresses contains the remote load balancer address(es). If
// set, overrides any resolver-provided addresses with Type of GRPCLB.
BalancerAddresses []resolver.Address
}
// Set returns a copy of the provided state with attributes containing s. s's
// data should not be mutated after calling Set.
func Set(state resolver.State, s *State) resolver.State {
state.Attributes = state.Attributes.WithValues(key, s)
return state
}
// Get returns the grpclb State in the resolver.State, or nil if not present.
// The returned data should not be mutated.
func Get(state resolver.State) *State {
s, _ := state.Attributes.Value(key).(*State)
return s
}

View File

@ -33,9 +33,11 @@ import (
// Name is the name of round_robin balancer. // Name is the name of round_robin balancer.
const Name = "round_robin" const Name = "round_robin"
var logger = grpclog.Component("roundrobin")
// newBuilder creates a new roundrobin balancer builder. // newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder { func newBuilder() balancer.Builder {
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
} }
func init() { func init() {
@ -44,10 +46,10 @@ func init() {
type rrPickerBuilder struct{} type rrPickerBuilder struct{}
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) logger.Infof("roundrobinPicker: newPicker called with info: %v", info)
if len(info.ReadySCs) == 0 { if len(info.ReadySCs) == 0 {
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
} }
var scs []balancer.SubConn var scs []balancer.SubConn
for sc := range info.ReadySCs { for sc := range info.ReadySCs {

View File

@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() {
} }
ccb.balancerMu.Lock() ccb.balancerMu.Lock()
su := t.(*scStateUpdate) su := t.(*scStateUpdate)
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
} else {
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
}
ccb.balancerMu.Unlock() ccb.balancerMu.Unlock()
case <-ccb.done.Done(): case <-ccb.done.Done():
} }
@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.balancerMu.Lock() ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock() defer ccb.balancerMu.Unlock()
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { return ccb.balancer.UpdateClientConnState(*ccs)
return ub.UpdateClientConnState(*ccs)
}
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
return nil
} }
func (ccb *ccBalancerWrapper) resolverError(err error) { func (ccb *ccBalancerWrapper) resolverError(err error) {
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { ccb.balancerMu.Lock()
ccb.balancerMu.Lock() ccb.balancer.ResolverError(err)
ub.ResolverError(err) ccb.balancerMu.Unlock()
ccb.balancerMu.Unlock()
}
} }
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
} }
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePicker(p)
ccb.cc.csMgr.updateState(s)
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
ccb.mu.Lock() ccb.mu.Lock()
defer ccb.mu.Unlock() defer ccb.mu.Unlock()
@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
// case where we wait for ready and then perform an RPC. If the picker is // case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is // updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated. // updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePickerV2(s.Picker) ccb.cc.blockingpicker.updatePicker(s.Picker)
ccb.cc.csMgr.updateState(s.ConnectivityState) ccb.cc.csMgr.updateState(s.ConnectivityState)
} }
@ -245,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
ac, err := cc.newAddrConn(addrs, opts) ac, err := cc.newAddrConn(addrs, opts)
if err != nil { if err != nil {
channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return return
} }
acbw.ac = ac acbw.ac = ac

View File

@ -1,334 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
)
type balancerWrapperBuilder struct {
b Balancer // The v1 balancer.
}
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
DialCreds: opts.DialCreds,
Dialer: opts.Dialer,
})
_, pickfirst := bwb.b.(*pickFirst)
bw := &balancerWrapper{
balancer: bwb.b,
pickfirst: pickfirst,
cc: cc,
targetAddr: opts.Target.Endpoint,
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
csEvltr: &balancer.ConnectivityStateEvaluator{},
state: connectivity.Idle,
}
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
go bw.lbWatcher()
return bw
}
func (bwb *balancerWrapperBuilder) Name() string {
return "wrapper"
}
type scState struct {
addr Address // The v1 address type.
s connectivity.State
down func(error)
}
type balancerWrapper struct {
balancer Balancer // The v1 balancer.
pickfirst bool
cc balancer.ClientConn
targetAddr string // Target without the scheme.
mu sync.Mutex
conns map[resolver.Address]balancer.SubConn
connSt map[balancer.SubConn]*scState
// This channel is closed when handling the first resolver result.
// lbWatcher blocks until this is closed, to avoid race between
// - NewSubConn is created, cc wants to notify balancer of state changes;
// - Build hasn't return, cc doesn't have access to balancer.
startCh chan struct{}
// To aggregate the connectivity state.
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
}
// lbWatcher watches the Notify channel of the balancer and manages
// connections accordingly.
func (bw *balancerWrapper) lbWatcher() {
<-bw.startCh
notifyCh := bw.balancer.Notify()
if notifyCh == nil {
// There's no resolver in the balancer. Connect directly.
a := resolver.Address{
Addr: bw.targetAddr,
Type: resolver.Backend,
}
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
if err != nil {
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
} else {
bw.mu.Lock()
bw.conns[a] = sc
bw.connSt[sc] = &scState{
addr: Address{Addr: bw.targetAddr},
s: connectivity.Idle,
}
bw.mu.Unlock()
sc.Connect()
}
return
}
for addrs := range notifyCh {
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
if bw.pickfirst {
var (
oldA resolver.Address
oldSC balancer.SubConn
)
bw.mu.Lock()
for oldA, oldSC = range bw.conns {
break
}
bw.mu.Unlock()
if len(addrs) <= 0 {
if oldSC != nil {
// Teardown old sc.
bw.mu.Lock()
delete(bw.conns, oldA)
delete(bw.connSt, oldSC)
bw.mu.Unlock()
bw.cc.RemoveSubConn(oldSC)
}
continue
}
var newAddrs []resolver.Address
for _, a := range addrs {
newAddr := resolver.Address{
Addr: a.Addr,
Type: resolver.Backend, // All addresses from balancer are all backends.
ServerName: "",
Metadata: a.Metadata,
}
newAddrs = append(newAddrs, newAddr)
}
if oldSC == nil {
// Create new sc.
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
if err != nil {
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
} else {
bw.mu.Lock()
// For pickfirst, there should be only one SubConn, so the
// address doesn't matter. All states updating (up and down)
// and picking should all happen on that only SubConn.
bw.conns[resolver.Address{}] = sc
bw.connSt[sc] = &scState{
addr: addrs[0], // Use the first address.
s: connectivity.Idle,
}
bw.mu.Unlock()
sc.Connect()
}
} else {
bw.mu.Lock()
bw.connSt[oldSC].addr = addrs[0]
bw.mu.Unlock()
oldSC.UpdateAddresses(newAddrs)
}
} else {
var (
add []resolver.Address // Addresses need to setup connections.
del []balancer.SubConn // Connections need to tear down.
)
resAddrs := make(map[resolver.Address]Address)
for _, a := range addrs {
resAddrs[resolver.Address{
Addr: a.Addr,
Type: resolver.Backend, // All addresses from balancer are all backends.
ServerName: "",
Metadata: a.Metadata,
}] = a
}
bw.mu.Lock()
for a := range resAddrs {
if _, ok := bw.conns[a]; !ok {
add = append(add, a)
}
}
for a, c := range bw.conns {
if _, ok := resAddrs[a]; !ok {
del = append(del, c)
delete(bw.conns, a)
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
}
}
bw.mu.Unlock()
for _, a := range add {
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
if err != nil {
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
} else {
bw.mu.Lock()
bw.conns[a] = sc
bw.connSt[sc] = &scState{
addr: resAddrs[a],
s: connectivity.Idle,
}
bw.mu.Unlock()
sc.Connect()
}
}
for _, c := range del {
bw.cc.RemoveSubConn(c)
}
}
}
}
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
bw.mu.Lock()
defer bw.mu.Unlock()
scSt, ok := bw.connSt[sc]
if !ok {
return
}
if s == connectivity.Idle {
sc.Connect()
}
oldS := scSt.s
scSt.s = s
if oldS != connectivity.Ready && s == connectivity.Ready {
scSt.down = bw.balancer.Up(scSt.addr)
} else if oldS == connectivity.Ready && s != connectivity.Ready {
if scSt.down != nil {
scSt.down(errConnClosing)
}
}
sa := bw.csEvltr.RecordTransition(oldS, s)
if bw.state != sa {
bw.state = sa
}
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
if s == connectivity.Shutdown {
// Remove state for this sc.
delete(bw.connSt, sc)
}
}
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
bw.mu.Lock()
defer bw.mu.Unlock()
select {
case <-bw.startCh:
default:
close(bw.startCh)
}
// There should be a resolver inside the balancer.
// All updates here, if any, are ignored.
}
func (bw *balancerWrapper) Close() {
bw.mu.Lock()
defer bw.mu.Unlock()
select {
case <-bw.startCh:
default:
close(bw.startCh)
}
bw.balancer.Close()
}
// The picker is the balancerWrapper itself.
// It either blocks or returns error, consistent with v1 balancer Get().
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
failfast := true // Default failfast is true.
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
failfast = ss.failfast
}
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
if err != nil {
return balancer.PickResult{}, toRPCErr(err)
}
if p != nil {
result.Done = func(balancer.DoneInfo) { p() }
defer func() {
if err != nil {
p()
}
}()
}
bw.mu.Lock()
defer bw.mu.Unlock()
if bw.pickfirst {
// Get the first sc in conns.
for _, result.SubConn = range bw.conns {
return result, nil
}
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
var ok1 bool
result.SubConn, ok1 = bw.conns[resolver.Address{
Addr: a.Addr,
Type: resolver.Backend,
ServerName: "",
Metadata: a.Metadata,
}]
s, ok2 := bw.connSt[result.SubConn]
if !ok1 || !ok2 {
// This can only happen due to a race where Get() returned an address
// that was subsequently removed by Notify. In this case we should
// retry always.
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
switch s.s {
case connectivity.Ready, connectivity.Idle:
return result, nil
case connectivity.Shutdown, connectivity.TransientFailure:
// If the returned sc has been shut down or is in transient failure,
// return error, and this RPC will fail or wait for another picker (if
// non-failfast).
return balancer.PickResult{}, balancer.ErrTransientFailure
default:
// For other states (connecting or unknown), the v1 balancer would
// traditionally wait until ready and then issue the RPC. Returning
// ErrNoSubConnAvailable will be a slight improvement in that it will
// allow the balancer to choose another address in case others are
// connected.
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
}

View File

@ -1,13 +1,15 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto // source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" package grpc_binarylog_v1
import proto "github.com/golang/protobuf/proto" import (
import fmt "fmt" fmt "fmt"
import math "math" proto "github.com/golang/protobuf/proto"
import duration "github.com/golang/protobuf/ptypes/duration" duration "github.com/golang/protobuf/ptypes/duration"
import timestamp "github.com/golang/protobuf/ptypes/timestamp" timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
@ -18,7 +20,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against. // is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the // A compilation error at this line likely means your copy of the
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Enumerates the type of event // Enumerates the type of event
// Note the terminology is different from the RPC semantics // Note the terminology is different from the RPC semantics
@ -64,6 +66,7 @@ var GrpcLogEntry_EventType_name = map[int32]string{
6: "EVENT_TYPE_SERVER_TRAILER", 6: "EVENT_TYPE_SERVER_TRAILER",
7: "EVENT_TYPE_CANCEL", 7: "EVENT_TYPE_CANCEL",
} }
var GrpcLogEntry_EventType_value = map[string]int32{ var GrpcLogEntry_EventType_value = map[string]int32{
"EVENT_TYPE_UNKNOWN": 0, "EVENT_TYPE_UNKNOWN": 0,
"EVENT_TYPE_CLIENT_HEADER": 1, "EVENT_TYPE_CLIENT_HEADER": 1,
@ -78,8 +81,9 @@ var GrpcLogEntry_EventType_value = map[string]int32{
func (x GrpcLogEntry_EventType) String() string { func (x GrpcLogEntry_EventType) String() string {
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
} }
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} return fileDescriptor_b7972e58de45083a, []int{0, 0}
} }
// Enumerates the entity that generates the log entry // Enumerates the entity that generates the log entry
@ -96,6 +100,7 @@ var GrpcLogEntry_Logger_name = map[int32]string{
1: "LOGGER_CLIENT", 1: "LOGGER_CLIENT",
2: "LOGGER_SERVER", 2: "LOGGER_SERVER",
} }
var GrpcLogEntry_Logger_value = map[string]int32{ var GrpcLogEntry_Logger_value = map[string]int32{
"LOGGER_UNKNOWN": 0, "LOGGER_UNKNOWN": 0,
"LOGGER_CLIENT": 1, "LOGGER_CLIENT": 1,
@ -105,8 +110,9 @@ var GrpcLogEntry_Logger_value = map[string]int32{
func (x GrpcLogEntry_Logger) String() string { func (x GrpcLogEntry_Logger) String() string {
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
} }
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} return fileDescriptor_b7972e58de45083a, []int{0, 1}
} }
type Address_Type int32 type Address_Type int32
@ -128,6 +134,7 @@ var Address_Type_name = map[int32]string{
2: "TYPE_IPV6", 2: "TYPE_IPV6",
3: "TYPE_UNIX", 3: "TYPE_UNIX",
} }
var Address_Type_value = map[string]int32{ var Address_Type_value = map[string]int32{
"TYPE_UNKNOWN": 0, "TYPE_UNKNOWN": 0,
"TYPE_IPV4": 1, "TYPE_IPV4": 1,
@ -138,8 +145,9 @@ var Address_Type_value = map[string]int32{
func (x Address_Type) String() string { func (x Address_Type) String() string {
return proto.EnumName(Address_Type_name, int32(x)) return proto.EnumName(Address_Type_name, int32(x))
} }
func (Address_Type) EnumDescriptor() ([]byte, []int) { func (Address_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} return fileDescriptor_b7972e58de45083a, []int{7, 0}
} }
// Log entry we store in binary logs // Log entry we store in binary logs
@ -185,16 +193,17 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
func (*GrpcLogEntry) ProtoMessage() {} func (*GrpcLogEntry) ProtoMessage() {}
func (*GrpcLogEntry) Descriptor() ([]byte, []int) { func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} return fileDescriptor_b7972e58de45083a, []int{0}
} }
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
} }
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
} }
func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { func (m *GrpcLogEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_GrpcLogEntry.Merge(dst, src) xxx_messageInfo_GrpcLogEntry.Merge(m, src)
} }
func (m *GrpcLogEntry) XXX_Size() int { func (m *GrpcLogEntry) XXX_Size() int {
return xxx_messageInfo_GrpcLogEntry.Size(m) return xxx_messageInfo_GrpcLogEntry.Size(m)
@ -317,9 +326,9 @@ func (m *GrpcLogEntry) GetPeer() *Address {
return nil return nil
} }
// XXX_OneofFuncs is for the internal use of the proto package. // XXX_OneofWrappers is for the internal use of the proto package.
func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} {
return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ return []interface{}{
(*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil), (*GrpcLogEntry_Message)(nil),
@ -327,108 +336,6 @@ func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer)
} }
} }
func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientHeader); err != nil {
return err
}
case *GrpcLogEntry_ServerHeader:
b.EncodeVarint(7<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerHeader); err != nil {
return err
}
case *GrpcLogEntry_Message:
b.EncodeVarint(8<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Message); err != nil {
return err
}
case *GrpcLogEntry_Trailer:
b.EncodeVarint(9<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Trailer); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
}
return nil
}
func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*GrpcLogEntry)
switch tag {
case 6: // payload.client_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ClientHeader{msg}
return true, err
case 7: // payload.server_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ServerHeader{msg}
return true, err
case 8: // payload.message
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Message)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Message{msg}
return true, err
case 9: // payload.trailer
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Trailer)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Trailer{msg}
return true, err
default:
return false, nil
}
}
func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
s := proto.Size(x.ClientHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_ServerHeader:
s := proto.Size(x.ServerHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Message:
s := proto.Size(x.Message)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Trailer:
s := proto.Size(x.Trailer)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ClientHeader struct { type ClientHeader struct {
// This contains only the metadata from the application. // This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
@ -453,16 +360,17 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} }
func (m *ClientHeader) String() string { return proto.CompactTextString(m) } func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
func (*ClientHeader) ProtoMessage() {} func (*ClientHeader) ProtoMessage() {}
func (*ClientHeader) Descriptor() ([]byte, []int) { func (*ClientHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} return fileDescriptor_b7972e58de45083a, []int{1}
} }
func (m *ClientHeader) XXX_Unmarshal(b []byte) error { func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClientHeader.Unmarshal(m, b) return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
} }
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
} }
func (dst *ClientHeader) XXX_Merge(src proto.Message) { func (m *ClientHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientHeader.Merge(dst, src) xxx_messageInfo_ClientHeader.Merge(m, src)
} }
func (m *ClientHeader) XXX_Size() int { func (m *ClientHeader) XXX_Size() int {
return xxx_messageInfo_ClientHeader.Size(m) return xxx_messageInfo_ClientHeader.Size(m)
@ -513,16 +421,17 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} }
func (m *ServerHeader) String() string { return proto.CompactTextString(m) } func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
func (*ServerHeader) ProtoMessage() {} func (*ServerHeader) ProtoMessage() {}
func (*ServerHeader) Descriptor() ([]byte, []int) { func (*ServerHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} return fileDescriptor_b7972e58de45083a, []int{2}
} }
func (m *ServerHeader) XXX_Unmarshal(b []byte) error { func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerHeader.Unmarshal(m, b) return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
} }
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
} }
func (dst *ServerHeader) XXX_Merge(src proto.Message) { func (m *ServerHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerHeader.Merge(dst, src) xxx_messageInfo_ServerHeader.Merge(m, src)
} }
func (m *ServerHeader) XXX_Size() int { func (m *ServerHeader) XXX_Size() int {
return xxx_messageInfo_ServerHeader.Size(m) return xxx_messageInfo_ServerHeader.Size(m)
@ -560,16 +469,17 @@ func (m *Trailer) Reset() { *m = Trailer{} }
func (m *Trailer) String() string { return proto.CompactTextString(m) } func (m *Trailer) String() string { return proto.CompactTextString(m) }
func (*Trailer) ProtoMessage() {} func (*Trailer) ProtoMessage() {}
func (*Trailer) Descriptor() ([]byte, []int) { func (*Trailer) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} return fileDescriptor_b7972e58de45083a, []int{3}
} }
func (m *Trailer) XXX_Unmarshal(b []byte) error { func (m *Trailer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Trailer.Unmarshal(m, b) return xxx_messageInfo_Trailer.Unmarshal(m, b)
} }
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
} }
func (dst *Trailer) XXX_Merge(src proto.Message) { func (m *Trailer) XXX_Merge(src proto.Message) {
xxx_messageInfo_Trailer.Merge(dst, src) xxx_messageInfo_Trailer.Merge(m, src)
} }
func (m *Trailer) XXX_Size() int { func (m *Trailer) XXX_Size() int {
return xxx_messageInfo_Trailer.Size(m) return xxx_messageInfo_Trailer.Size(m)
@ -624,16 +534,17 @@ func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) } func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {} func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) { func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} return fileDescriptor_b7972e58de45083a, []int{4}
} }
func (m *Message) XXX_Unmarshal(b []byte) error { func (m *Message) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Message.Unmarshal(m, b) return xxx_messageInfo_Message.Unmarshal(m, b)
} }
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Message.Marshal(b, m, deterministic) return xxx_messageInfo_Message.Marshal(b, m, deterministic)
} }
func (dst *Message) XXX_Merge(src proto.Message) { func (m *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(dst, src) xxx_messageInfo_Message.Merge(m, src)
} }
func (m *Message) XXX_Size() int { func (m *Message) XXX_Size() int {
return xxx_messageInfo_Message.Size(m) return xxx_messageInfo_Message.Size(m)
@ -690,16 +601,17 @@ func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) } func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {} func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) { func (*Metadata) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} return fileDescriptor_b7972e58de45083a, []int{5}
} }
func (m *Metadata) XXX_Unmarshal(b []byte) error { func (m *Metadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metadata.Unmarshal(m, b) return xxx_messageInfo_Metadata.Unmarshal(m, b)
} }
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
} }
func (dst *Metadata) XXX_Merge(src proto.Message) { func (m *Metadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metadata.Merge(dst, src) xxx_messageInfo_Metadata.Merge(m, src)
} }
func (m *Metadata) XXX_Size() int { func (m *Metadata) XXX_Size() int {
return xxx_messageInfo_Metadata.Size(m) return xxx_messageInfo_Metadata.Size(m)
@ -730,16 +642,17 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
func (*MetadataEntry) ProtoMessage() {} func (*MetadataEntry) ProtoMessage() {}
func (*MetadataEntry) Descriptor() ([]byte, []int) { func (*MetadataEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} return fileDescriptor_b7972e58de45083a, []int{6}
} }
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
} }
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
} }
func (dst *MetadataEntry) XXX_Merge(src proto.Message) { func (m *MetadataEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetadataEntry.Merge(dst, src) xxx_messageInfo_MetadataEntry.Merge(m, src)
} }
func (m *MetadataEntry) XXX_Size() int { func (m *MetadataEntry) XXX_Size() int {
return xxx_messageInfo_MetadataEntry.Size(m) return xxx_messageInfo_MetadataEntry.Size(m)
@ -779,16 +692,17 @@ func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) } func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {} func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) { func (*Address) Descriptor() ([]byte, []int) {
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} return fileDescriptor_b7972e58de45083a, []int{7}
} }
func (m *Address) XXX_Unmarshal(b []byte) error { func (m *Address) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Address.Unmarshal(m, b) return xxx_messageInfo_Address.Unmarshal(m, b)
} }
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Address.Marshal(b, m, deterministic) return xxx_messageInfo_Address.Marshal(b, m, deterministic)
} }
func (dst *Address) XXX_Merge(src proto.Message) { func (m *Address) XXX_Merge(src proto.Message) {
xxx_messageInfo_Address.Merge(dst, src) xxx_messageInfo_Address.Merge(m, src)
} }
func (m *Address) XXX_Size() int { func (m *Address) XXX_Size() int {
return xxx_messageInfo_Address.Size(m) return xxx_messageInfo_Address.Size(m)
@ -821,6 +735,9 @@ func (m *Address) GetIpPort() uint32 {
} }
func init() { func init() {
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
@ -829,72 +746,67 @@ func init() {
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
} }
func init() { func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) }
proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
}
var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ var fileDescriptor_b7972e58de45083a = []byte{
// 900 bytes of a gzipped FileDescriptorProto // 904 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, 0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09,
0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, 0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda,
0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, 0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59,
0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, 0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c,
0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, 0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8,
0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, 0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52,
0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, 0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2,
0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, 0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0,
0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, 0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22,
0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, 0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54,
0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, 0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03,
0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, 0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f,
0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, 0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee,
0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, 0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1,
0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, 0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d,
0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, 0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94,
0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, 0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf,
0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, 0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9,
0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, 0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca,
0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, 0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0,
0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, 0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c,
0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, 0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c,
0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, 0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a,
0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, 0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e,
0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, 0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd,
0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, 0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1,
0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, 0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe,
0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, 0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1,
0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, 0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05,
0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, 0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71,
0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, 0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28,
0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, 0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37,
0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, 0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a,
0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, 0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38,
0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, 0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64,
0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, 0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a,
0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, 0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15,
0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, 0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b,
0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, 0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b,
0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, 0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f,
0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, 0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55,
0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, 0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72,
0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, 0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79,
0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, 0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41,
0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, 0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f,
0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, 0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95,
0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, 0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32,
0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, 0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a,
0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, 0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10,
0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, 0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4,
0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, 0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b,
0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, 0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80,
0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed,
0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3,
0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, 0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
0xd4, 0x07, 0x00, 0x00, 0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00,
} }

View File

@ -68,8 +68,6 @@ var (
errConnDrain = errors.New("grpc: the connection is drained") errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing. // errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing") errConnClosing = errors.New("grpc: the connection is closing")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config. // service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@ -151,17 +149,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if channelz.IsOn() { if channelz.IsOn() {
if cc.dopts.channelzParentID != 0 { if cc.dopts.channelzParentID != 0 {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Channel Created", Desc: "Channel Created",
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{ Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
}, },
}) })
} else { } else {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
channelz.Info(cc.channelzID, "Channel Created") channelz.Info(logger, cc.channelzID, "Channel Created")
} }
cc.csMgr.channelzID = cc.channelzID cc.csMgr.channelzID = cc.channelzID
} }
@ -217,7 +215,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
defer func() { defer func() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
conn, err = nil, ctx.Err() switch {
case ctx.Err() == err:
conn = nil
case err == nil || !cc.dopts.returnLastError:
conn, err = nil, ctx.Err()
default:
conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
}
default: default:
} }
}() }()
@ -240,13 +245,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
// Determine the resolver to use. // Determine the resolver to use.
cc.parsedTarget = grpcutil.ParseTarget(cc.target) cc.parsedTarget = grpcutil.ParseTarget(cc.target)
channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) unixScheme := strings.HasPrefix(cc.target, "unix:")
channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
if resolverBuilder == nil { if resolverBuilder == nil {
// If resolver builder is still nil, the parsed target's scheme is // If resolver builder is still nil, the parsed target's scheme is
// not registered. Fallback to default resolver and set Endpoint to // not registered. Fallback to default resolver and set Endpoint to
// the original target. // the original target.
channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
cc.parsedTarget = resolver.Target{ cc.parsedTarget = resolver.Target{
Scheme: resolver.GetDefaultScheme(), Scheme: resolver.GetDefaultScheme(),
Endpoint: target, Endpoint: target,
@ -262,6 +268,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
cc.authority = creds.Info().ServerName cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.authority != "" { } else if cc.dopts.insecure && cc.dopts.authority != "" {
cc.authority = cc.dopts.authority cc.authority = cc.dopts.authority
} else if unixScheme {
cc.authority = "localhost"
} else { } else {
// Use endpoint from "scheme://authority/endpoint" as the default // Use endpoint from "scheme://authority/endpoint" as the default
// authority for ClientConn. // authority for ClientConn.
@ -311,7 +319,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if s == connectivity.Ready { if s == connectivity.Ready {
break break
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
if err = cc.blockingpicker.connectionError(); err != nil { if err = cc.connectionError(); err != nil {
terr, ok := err.(interface { terr, ok := err.(interface {
Temporary() bool Temporary() bool
}) })
@ -322,6 +330,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
} }
if !cc.WaitForStateChange(ctx, s) { if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled. // ctx got timeout or canceled.
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
return nil, err
}
return nil, ctx.Err() return nil, ctx.Err()
} }
} }
@ -414,7 +425,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
return return
} }
csm.state = state csm.state = state
channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil { if csm.notifyChan != nil {
// There are other goroutines waiting on this channel. // There are other goroutines waiting on this channel.
close(csm.notifyChan) close(csm.notifyChan)
@ -490,11 +501,18 @@ type ClientConn struct {
channelzID int64 // channelz unique identification number channelzID int64 // channelz unique identification number
czData *channelzData czData *channelzData
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
} }
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter. // ctx expires. A true value is returned in former case and false in latter.
// This is an EXPERIMENTAL API. //
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan() ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState { if cc.csMgr.getState() != sourceState {
@ -509,7 +527,11 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
} }
// GetState returns the connectivity.State of ClientConn. // GetState returns the connectivity.State of ClientConn.
// This is an EXPERIMENTAL API. //
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (cc *ClientConn) GetState() connectivity.State { func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState() return cc.csMgr.getState()
} }
@ -664,9 +686,9 @@ func (cc *ClientConn) switchBalancer(name string) {
return return
} }
channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
if cc.dopts.balancerBuilder != nil { if cc.dopts.balancerBuilder != nil {
channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
return return
} }
if cc.balancerWrapper != nil { if cc.balancerWrapper != nil {
@ -675,11 +697,11 @@ func (cc *ClientConn) switchBalancer(name string) {
builder := balancer.Get(name) builder := balancer.Get(name)
if builder == nil { if builder == nil {
channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder() builder = newPickfirstBuilder()
} else { } else {
channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
} }
cc.curBalancerName = builder.Name() cc.curBalancerName = builder.Name()
@ -720,12 +742,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
} }
if channelz.IsOn() { if channelz.IsOn() {
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Created", Desc: "Subchannel Created",
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{ Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
}, },
}) })
} }
@ -759,7 +781,11 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
} }
// Target returns the target string of the ClientConn. // Target returns the target string of the ClientConn.
// This is an EXPERIMENTAL API. //
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (cc *ClientConn) Target() string { func (cc *ClientConn) Target() string {
return cc.target return cc.target
} }
@ -818,7 +844,7 @@ func (ac *addrConn) connect() error {
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock() ac.mu.Lock()
defer ac.mu.Unlock() defer ac.mu.Unlock()
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if ac.state == connectivity.Shutdown || if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure || ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle { ac.state == connectivity.Idle {
@ -838,7 +864,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
break break
} }
} }
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound { if curAddrFound {
ac.addrs = addrs ac.addrs = addrs
} }
@ -849,9 +875,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
// GetMethodConfig gets the method config of the input method. // GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return // If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig. // the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config // If there isn't an exact match for the input method, we look for the service's default
// under the service (i.e /service/). If there is a default MethodConfig for // config under the service (i.e /service/) and then for the default for all services (empty string).
// the service, we return it. //
// If there is a default MethodConfig for the service, we return it.
// Otherwise, we return an empty MethodConfig. // Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here. // TODO: Avoid the locking here.
@ -860,12 +887,14 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
if cc.sc == nil { if cc.sc == nil {
return MethodConfig{} return MethodConfig{}
} }
m, ok := cc.sc.Methods[method] if m, ok := cc.sc.Methods[method]; ok {
if !ok { return m
i := strings.LastIndex(method, "/")
m = cc.sc.Methods[method[:i+1]]
} }
return m i := strings.LastIndex(method, "/")
if m, ok := cc.sc.Methods[method[:i+1]]; ok {
return m
}
return cc.sc.Methods[""]
} }
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@ -957,7 +986,10 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
// However, if a previously unavailable network becomes available, this may be // However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect. // used to trigger an immediate reconnect.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (cc *ClientConn) ResetConnectBackoff() { func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock() cc.mu.Lock()
conns := cc.conns conns := cc.conns
@ -1001,15 +1033,15 @@ func (cc *ClientConn) Close() error {
if channelz.IsOn() { if channelz.IsOn() {
ted := &channelz.TraceEventDesc{ ted := &channelz.TraceEventDesc{
Desc: "Channel Deleted", Desc: "Channel Deleted",
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
} }
if cc.dopts.channelzParentID != 0 { if cc.dopts.channelzParentID != 0 {
ted.Parent = &channelz.TraceEventDesc{ ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
} }
} }
channelz.AddTraceEvent(cc.channelzID, 0, ted) channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity being deleted, and thus prevent it from being deleted right away. // the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(cc.channelzID) channelz.RemoveEntry(cc.channelzID)
@ -1053,7 +1085,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
return return
} }
ac.state = s ac.state = s
channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
} }
@ -1190,7 +1222,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
} }
ac.mu.Unlock() ac.mu.Unlock()
channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
if err == nil { if err == nil {
@ -1199,7 +1231,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
if firstConnErr == nil { if firstConnErr == nil {
firstConnErr = err firstConnErr = err
} }
ac.cc.blockingpicker.updateConnectionError(err) ac.cc.updateConnectionError(err)
} }
// Couldn't connect to any address. // Couldn't connect to any address.
@ -1214,16 +1246,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
onCloseCalled := make(chan struct{}) onCloseCalled := make(chan struct{})
reconnect := grpcsync.NewEvent() reconnect := grpcsync.NewEvent()
authority := ac.cc.authority
// addr.ServerName takes precedent over ClientConn authority, if present. // addr.ServerName takes precedent over ClientConn authority, if present.
if addr.ServerName != "" { if addr.ServerName == "" {
authority = addr.ServerName addr.ServerName = ac.cc.authority
}
target := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
Authority: authority,
} }
once := sync.Once{} once := sync.Once{}
@ -1269,10 +1294,10 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
copts.ChannelzParentID = ac.channelzID copts.ChannelzParentID = ac.channelzID
} }
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose)
if err != nil { if err != nil {
// newTr is either nil, or closed. // newTr is either nil, or closed.
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
return nil, nil, err return nil, nil, err
} }
@ -1280,7 +1305,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
case <-time.After(time.Until(connectDeadline)): case <-time.After(time.Until(connectDeadline)):
// We didn't get the preface in time. // We didn't get the preface in time.
newTr.Close() newTr.Close()
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
return nil, nil, errors.New("timed out waiting for server handshake") return nil, nil, errors.New("timed out waiting for server handshake")
case <-prefaceReceived: case <-prefaceReceived:
// We got the preface - huzzah! things are good. // We got the preface - huzzah! things are good.
@ -1297,7 +1322,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// //
// LB channel health checking is enabled when all requirements below are met: // LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package // 2. internal.HealthCheckFunc is set by importing the grpc/health package
// 3. a service config with non-empty healthCheckConfig field is provided // 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it // 4. the load balancer requests it
// //
@ -1327,7 +1352,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
// The health package is not imported to set health check function. // The health package is not imported to set health check function.
// //
// TODO: add a link to the health check doc in the error message. // TODO: add a link to the health check doc in the error message.
channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
return return
} }
@ -1357,9 +1382,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil { if err != nil {
if status.Code(err) == codes.Unimplemented { if status.Code(err) == codes.Unimplemented {
channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else { } else {
channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
} }
} }
}() }()
@ -1424,12 +1449,12 @@ func (ac *addrConn) tearDown(err error) {
ac.mu.Lock() ac.mu.Lock()
} }
if channelz.IsOn() { if channelz.IsOn() {
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Deleted", Desc: "Subchannel Deleted",
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{ Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
Severity: channelz.CtINFO, Severity: channelz.CtInfo,
}, },
}) })
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
@ -1532,3 +1557,15 @@ func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
} }
return resolver.Get(scheme) return resolver.Get(scheme)
} }
func (cc *ClientConn) updateConnectionError(err error) {
cc.lceMu.Lock()
cc.lastConnectionError = err
cc.lceMu.Unlock()
}
func (cc *ClientConn) connectionError() error {
cc.lceMu.Lock()
defer cc.lceMu.Unlock()
return cc.lastConnectionError
}

View File

@ -33,6 +33,9 @@ const (
OK Code = 0 OK Code = 0
// Canceled indicates the operation was canceled (typically by the caller). // Canceled indicates the operation was canceled (typically by the caller).
//
// The gRPC framework will generate this error code when cancellation
// is requested.
Canceled Code = 1 Canceled Code = 1
// Unknown error. An example of where this error may be returned is // Unknown error. An example of where this error may be returned is
@ -40,12 +43,17 @@ const (
// an error-space that is not known in this address space. Also // an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information // errors raised by APIs that do not return enough error information
// may be converted to this error. // may be converted to this error.
//
// The gRPC framework will generate this error code in the above two
// mentioned cases.
Unknown Code = 2 Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument. // InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments // Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system // that are problematic regardless of the state of the system
// (e.g., a malformed file name). // (e.g., a malformed file name).
//
// This error code will not be generated by the gRPC framework.
InvalidArgument Code = 3 InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion. // DeadlineExceeded means operation expired before completion.
@ -53,14 +61,21 @@ const (
// returned even if the operation has completed successfully. For // returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed // example, a successful response from a server could have been delayed
// long enough for the deadline to expire. // long enough for the deadline to expire.
//
// The gRPC framework will generate this error code when the deadline is
// exceeded.
DeadlineExceeded Code = 4 DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was // NotFound means some requested entity (e.g., file or directory) was
// not found. // not found.
//
// This error code will not be generated by the gRPC framework.
NotFound Code = 5 NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one // AlreadyExists means an attempt to create an entity failed because one
// already exists. // already exists.
//
// This error code will not be generated by the gRPC framework.
AlreadyExists Code = 6 AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to // PermissionDenied indicates the caller does not have permission to
@ -69,10 +84,17 @@ const (
// instead for those errors). It must not be // instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated // used if the caller cannot be identified (use Unauthenticated
// instead for those errors). // instead for those errors).
//
// This error code will not be generated by the gRPC core framework,
// but expect authentication middleware to use it.
PermissionDenied Code = 7 PermissionDenied Code = 7
// ResourceExhausted indicates some resource has been exhausted, perhaps // ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space. // a per-user quota, or perhaps the entire file system is out of space.
//
// This error code will be generated by the gRPC framework in
// out-of-memory and server overload situations, or when a message is
// larger than the configured maximum size.
ResourceExhausted Code = 8 ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the // FailedPrecondition indicates operation was rejected because the
@ -94,6 +116,8 @@ const (
// REST Get/Update/Delete on a resource and the resource on the // REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting // server does not match the condition. E.g., conflicting
// read-modify-write on the same resource. // read-modify-write on the same resource.
//
// This error code will not be generated by the gRPC framework.
FailedPrecondition Code = 9 FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a // Aborted indicates the operation was aborted, typically due to a
@ -102,6 +126,8 @@ const (
// //
// See litmus test above for deciding between FailedPrecondition, // See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable. // Aborted, and Unavailable.
//
// This error code will not be generated by the gRPC framework.
Aborted Code = 10 Aborted Code = 10
// OutOfRange means operation was attempted past the valid range. // OutOfRange means operation was attempted past the valid range.
@ -119,15 +145,26 @@ const (
// error) when it applies so that callers who are iterating through // error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when // a space can easily look for an OutOfRange error to detect when
// they are done. // they are done.
//
// This error code will not be generated by the gRPC framework.
OutOfRange Code = 11 OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not // Unimplemented indicates operation is not implemented or not
// supported/enabled in this service. // supported/enabled in this service.
//
// This error code will be generated by the gRPC framework. Most
// commonly, you will see this error code when a method implementation
// is missing on the server. It can also be generated for unknown
// compression algorithms or a disagreement as to whether an RPC should
// be streaming.
Unimplemented Code = 12 Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying // Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors, // system has been broken. If you see one of these errors,
// something is very broken. // something is very broken.
//
// This error code will be generated by the gRPC framework in several
// internal error conditions.
Internal Code = 13 Internal Code = 13
// Unavailable indicates the service is currently unavailable. // Unavailable indicates the service is currently unavailable.
@ -137,13 +174,22 @@ const (
// //
// See litmus test above for deciding between FailedPrecondition, // See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable. // Aborted, and Unavailable.
//
// This error code will be generated by the gRPC framework during
// abrupt shutdown of a server process or network connection.
Unavailable Code = 14 Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption. // DataLoss indicates unrecoverable data loss or corruption.
//
// This error code will not be generated by the gRPC framework.
DataLoss Code = 15 DataLoss Code = 15
// Unauthenticated indicates the request does not have valid // Unauthenticated indicates the request does not have valid
// authentication credentials for the operation. // authentication credentials for the operation.
//
// The gRPC framework will generate this error code when the
// authentication metadata is invalid or a Credentials callback fails,
// but also expect authentication middleware to generate it.
Unauthenticated Code = 16 Unauthenticated Code = 16
_maxCode = 17 _maxCode = 17

View File

@ -22,11 +22,11 @@
package connectivity package connectivity
import ( import (
"context"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
var logger = grpclog.Component("core")
// State indicates the state of connectivity. // State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn. // It can be the state of a ClientConn or SubConn.
type State int type State int
@ -44,7 +44,7 @@ func (s State) String() string {
case Shutdown: case Shutdown:
return "SHUTDOWN" return "SHUTDOWN"
default: default:
grpclog.Errorf("unknown connectivity state: %d", s) logger.Errorf("unknown connectivity state: %d", s)
return "Invalid-State" return "Invalid-State"
} }
} }
@ -61,13 +61,3 @@ const (
// Shutdown indicates the ClientConn has started shutting down. // Shutdown indicates the ClientConn has started shutting down.
Shutdown Shutdown
) )
// Reporter reports the connectivity states.
type Reporter interface {
// CurrentState returns the current state of the reporter.
CurrentState() State
// WaitForStateChange blocks until the reporter's state is different from the given state,
// and returns true.
// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
WaitForStateChange(context.Context, State) bool
}

View File

@ -29,6 +29,7 @@ import (
"net" "net"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
) )
@ -57,9 +58,11 @@ type PerRPCCredentials interface {
type SecurityLevel int type SecurityLevel int
const ( const (
// NoSecurity indicates a connection is insecure. // Invalid indicates an invalid security level.
// The zero SecurityLevel value is invalid for backward compatibility. // The zero SecurityLevel value is invalid for backward compatibility.
NoSecurity SecurityLevel = iota + 1 Invalid SecurityLevel = iota
// NoSecurity indicates a connection is insecure.
NoSecurity
// IntegrityOnly indicates a connection only provides integrity protection. // IntegrityOnly indicates a connection only provides integrity protection.
IntegrityOnly IntegrityOnly
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
@ -124,15 +127,18 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR
// TransportCredentials defines the common interface for all the live gRPC wire // TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL). // protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface { type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the corresponding // ClientHandshake does the authentication handshake specified by the
// authentication protocol on rawConn for clients. It returns the authenticated // corresponding authentication protocol on rawConn for clients. It returns
// connection and the corresponding auth information about the connection. // the authenticated connection and the corresponding auth information
// The auth information should embed CommonAuthInfo to return additional information about // about the connection. The auth information should embed CommonAuthInfo
// the credentials. Implementations must use the provided context to implement timely cancellation. // to return additional information about the credentials. Implementations
// gRPC will try to reconnect if the error returned is a temporary error // must use the provided context to implement timely cancellation. gRPC
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // will try to reconnect if the error returned is a temporary error
// If the returned error is a wrapper error, implementations should make sure that // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
// returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors. // the error implements Temporary() to have the correct retry behaviors.
// Additionally, ClientHandshakeInfo data will be available via the context
// passed to this call.
// //
// If the returned net.Conn is closed, it MUST close the net.Conn provided. // If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
@ -193,6 +199,31 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
return return
} }
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
// balancer etc. Individual credential implementations control the actual
// format of the data that they are willing to receive.
//
// This API is experimental.
type ClientHandshakeInfo struct {
// Attributes contains the attributes for the address. It could be provided
// by the gRPC, resolver, balancer etc.
Attributes *attributes.Attributes
}
// clientHandshakeInfoKey is a struct used as the key to store
// ClientHandshakeInfo in a context.
type clientHandshakeInfoKey struct{}
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
// in ctx.
//
// This API is experimental.
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo)
return chi
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
@ -208,7 +239,7 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
} }
if ci, ok := ri.AuthInfo.(internalInfo); ok { if ci, ok := ri.AuthInfo.(internalInfo); ok {
// CommonAuthInfo.SecurityLevel has an invalid value. // CommonAuthInfo.SecurityLevel has an invalid value.
if ci.GetCommonAuthInfo().SecurityLevel == 0 { if ci.GetCommonAuthInfo().SecurityLevel == Invalid {
return nil return nil
} }
if ci.GetCommonAuthInfo().SecurityLevel < level { if ci.GetCommonAuthInfo().SecurityLevel < level {
@ -223,6 +254,9 @@ func init() {
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
return context.WithValue(ctx, requestInfoKey{}, ri) return context.WithValue(ctx, requestInfoKey{}, ri)
} }
internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context {
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
}
} }
// ChannelzSecurityInfo defines the interface that security protocols should implement // ChannelzSecurityInfo defines the interface that security protocols should implement

View File

@ -25,8 +25,9 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net" "net"
"net/url"
"google.golang.org/grpc/credentials/internal" credinternal "google.golang.org/grpc/internal/credentials"
) )
// TLSInfo contains the auth information for a TLS authenticated connection. // TLSInfo contains the auth information for a TLS authenticated connection.
@ -34,6 +35,8 @@ import (
type TLSInfo struct { type TLSInfo struct {
State tls.ConnectionState State tls.ConnectionState
CommonAuthInfo CommonAuthInfo
// This API is experimental.
SPIFFEID *url.URL
} }
// AuthType returns the type of TLSInfo as a string. // AuthType returns the type of TLSInfo as a string.
@ -69,7 +72,7 @@ func (c tlsCreds) Info() ProtocolInfo {
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints // use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := cloneTLSConfig(c.config) cfg := credinternal.CloneTLSConfig(c.config)
if cfg.ServerName == "" { if cfg.ServerName == "" {
serverName, _, err := net.SplitHostPort(authority) serverName, _, err := net.SplitHostPort(authority)
if err != nil { if err != nil {
@ -94,7 +97,17 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
conn.Close() conn.Close()
return nil, nil, ctx.Err() return nil, nil, ctx.Err()
} }
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
} }
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@ -103,7 +116,17 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
conn.Close() conn.Close()
return nil, nil, err return nil, nil, err
} }
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
} }
func (c *tlsCreds) Clone() TransportCredentials { func (c *tlsCreds) Clone() TransportCredentials {
@ -115,23 +138,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil return nil
} }
const alpnProtoStrH2 = "h2"
func appendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// NewTLS uses c to construct a TransportCredentials based on TLS. // NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials { func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{cloneTLSConfig(c)} tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
return tc return tc
} }
@ -185,7 +195,10 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
// TLSChannelzSecurityValue defines the struct that TLS protocol should return // TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used. // from GetSecurityValue(), containing security info like cipher and certificate used.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type TLSChannelzSecurityValue struct { type TLSChannelzSecurityValue struct {
ChannelzSecurityValue ChannelzSecurityValue
StandardName string StandardName string
@ -218,18 +231,3 @@ var cipherSuiteLookup = map[uint16]string{
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
} }
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -27,7 +27,6 @@ import (
"google.golang.org/grpc/backoff" "google.golang.org/grpc/backoff"
"google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
internalbackoff "google.golang.org/grpc/internal/backoff" internalbackoff "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/envconfig"
@ -46,18 +45,18 @@ type dialOptions struct {
chainUnaryInts []UnaryClientInterceptor chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor chainStreamInts []StreamClientInterceptor
cp Compressor cp Compressor
dc Decompressor dc Decompressor
bs internalbackoff.Strategy bs internalbackoff.Strategy
block bool block bool
insecure bool returnLastError bool
timeout time.Duration insecure bool
scChan <-chan ServiceConfig timeout time.Duration
authority string scChan <-chan ServiceConfig
copts transport.ConnectOptions authority string
callOptions []CallOption copts transport.ConnectOptions
// This is used by v1 balancer dial option WithBalancer to support v1 callOptions []CallOption
// balancer, and also by WithBalancerName dial option. // This is used by WithBalancerName dial option.
balancerBuilder balancer.Builder balancerBuilder balancer.Builder
channelzParentID int64 channelzParentID int64
disableServiceConfig bool disableServiceConfig bool
@ -83,7 +82,10 @@ type DialOption interface {
// EmptyDialOption does not alter the dial configuration. It can be embedded in // EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options. // another structure to build custom dial options.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type EmptyDialOption struct{} type EmptyDialOption struct{}
func (EmptyDialOption) apply(*dialOptions) {} func (EmptyDialOption) apply(*dialOptions) {}
@ -199,19 +201,6 @@ func WithDecompressor(dc Decompressor) DialOption {
}) })
} }
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
// Name resolver will be ignored if this DialOption is specified.
//
// Deprecated: use the new balancer APIs in balancer package and
// WithBalancerName. Will be removed in a future 1.x release.
func WithBalancer(b Balancer) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.balancerBuilder = &balancerWrapperBuilder{
b: b,
}
})
}
// WithBalancerName sets the balancer that the ClientConn will be initialized // WithBalancerName sets the balancer that the ClientConn will be initialized
// with. Balancer registered with balancerName will be used. This function // with. Balancer registered with balancerName will be used. This function
// panics if no balancer was registered by balancerName. // panics if no balancer was registered by balancerName.
@ -252,7 +241,10 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
// using the backoff.DefaultConfig as a base, in cases where you want to // using the backoff.DefaultConfig as a base, in cases where you want to
// override only a subset of the backoff configuration. // override only a subset of the backoff configuration.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithConnectParams(p ConnectParams) DialOption { func WithConnectParams(p ConnectParams) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.bs = internalbackoff.Exponential{Config: p.Backoff} o.bs = internalbackoff.Exponential{Config: p.Backoff}
@ -299,6 +291,22 @@ func WithBlock() DialOption {
}) })
} }
// WithReturnConnectionError returns a DialOption which makes the client connection
// return a string containing both the last connection error that occurred and
// the context.DeadlineExceeded error.
// Implies WithBlock()
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithReturnConnectionError() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
o.returnLastError = true
})
}
// WithInsecure returns a DialOption which disables transport security for this // WithInsecure returns a DialOption which disables transport security for this
// ClientConn. Note that transport security is required unless WithInsecure is // ClientConn. Note that transport security is required unless WithInsecure is
// set. // set.
@ -311,7 +319,10 @@ func WithInsecure() DialOption {
// WithNoProxy returns a DialOption which disables the use of proxies for this // WithNoProxy returns a DialOption which disables the use of proxies for this
// ClientConn. This is ignored if WithDialer or WithContextDialer are used. // ClientConn. This is ignored if WithDialer or WithContextDialer are used.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithNoProxy() DialOption { func WithNoProxy() DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.withProxy = false o.withProxy = false
@ -339,7 +350,10 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
// the ClientConn.WithCreds. This should not be used together with // the ClientConn.WithCreds. This should not be used together with
// WithTransportCredentials. // WithTransportCredentials.
// //
// This API is experimental. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithCredentialsBundle(b credentials.Bundle) DialOption { func WithCredentialsBundle(b credentials.Bundle) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.copts.CredsBundle = b o.copts.CredsBundle = b
@ -404,7 +418,10 @@ func WithStatsHandler(h stats.Handler) DialOption {
// FailOnNonTempDialError only affects the initial dial, and does not do // FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock(). // anything useful unless you are also using WithBlock().
// //
// This is an EXPERIMENTAL API. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func FailOnNonTempDialError(f bool) DialOption { func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.copts.FailOnNonTempDialError = f o.copts.FailOnNonTempDialError = f
@ -423,7 +440,7 @@ func WithUserAgent(s string) DialOption {
// for the client transport. // for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime { if kp.Time < internal.KeepaliveMinPingTime {
grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
kp.Time = internal.KeepaliveMinPingTime kp.Time = internal.KeepaliveMinPingTime
} }
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
@ -459,7 +476,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
} }
// WithChainStreamInterceptor returns a DialOption that specifies the chained // WithChainStreamInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most, // interceptor for streaming RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call. // while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor // All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain. // defined by WithStreamInterceptor will always be prepended to the chain.
@ -482,7 +499,10 @@ func WithAuthority(a string) DialOption {
// current ClientConn's parent. This function is used in nested channel creation // current ClientConn's parent. This function is used in nested channel creation
// (e.g. grpclb dial). // (e.g. grpclb dial).
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithChannelzParentID(id int64) DialOption { func WithChannelzParentID(id int64) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id o.channelzParentID = id
@ -508,7 +528,10 @@ func WithDisableServiceConfig() DialOption {
// 2. Resolver does not return a service config or if the resolver returns an // 2. Resolver does not return a service config or if the resolver returns an
// invalid service config. // invalid service config.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithDefaultServiceConfig(s string) DialOption { func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s o.defaultServiceConfigRawJSON = &s
@ -524,7 +547,10 @@ func WithDefaultServiceConfig(s string) DialOption {
// default in the future. Until then, it may be enabled by setting the // default in the future. Until then, it may be enabled by setting the
// environment variable "GRPC_GO_RETRY" to "on". // environment variable "GRPC_GO_RETRY" to "on".
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithDisableRetry() DialOption { func WithDisableRetry() DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.disableRetry = true o.disableRetry = true
@ -542,7 +568,10 @@ func WithMaxHeaderListSize(s uint32) DialOption {
// WithDisableHealthCheck disables the LB channel health checking for all // WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn. // SubConns of this ClientConn.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithDisableHealthCheck() DialOption { func WithDisableHealthCheck() DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.disableHealthCheck = true o.disableHealthCheck = true
@ -598,7 +627,10 @@ func withResolveNowBackoff(f func(int) time.Duration) DialOption {
// resolver.Register. They will be matched against the scheme used for the // resolver.Register. They will be matched against the scheme used for the
// current Dial only, and will take precedence over the global registry. // current Dial only, and will take precedence over the global registry.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithResolvers(rs ...resolver.Builder) DialOption { func WithResolvers(rs ...resolver.Builder) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.resolvers = append(o.resolvers, rs...) o.resolvers = append(o.resolvers, rs...)

View File

@ -16,6 +16,8 @@
* *
*/ */
//go:generate ./regenerate.sh
/* /*
Package grpc implements an RPC system called gRPC. Package grpc implements an RPC system called gRPC.

View File

@ -19,7 +19,10 @@
// Package encoding defines the interface for the compressor and codec, and // Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs. // functions to register and retrieve compressors and codecs.
// //
// This package is EXPERIMENTAL. // Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package encoding package encoding
import ( import (
@ -46,10 +49,15 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change // coding header. The result must be static; the result cannot change
// between calls. // between calls.
Name() string Name() string
// EXPERIMENTAL: if a Compressor implements // If a Compressor implements
// DecompressedSize(compressedBytes []byte) int, gRPC will call it // DecompressedSize(compressedBytes []byte) int, gRPC will call it
// to determine the size of the buffer allocated for the result of decompression. // to determine the size of the buffer allocated for the result of decompression.
// Return -1 to indicate unknown size. // Return -1 to indicate unknown size.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
} }
var registeredCompressor = make(map[string]Compressor) var registeredCompressor = make(map[string]Compressor)

View File

@ -6,9 +6,9 @@ require (
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
github.com/envoyproxy/go-control-plane v0.9.4 github.com/envoyproxy/go-control-plane v0.9.4
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1
github.com/golang/protobuf v1.3.3 github.com/golang/protobuf v1.3.3
github.com/google/go-cmp v0.2.0 github.com/google/go-cmp v0.4.0
github.com/google/uuid v1.1.2
golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a

View File

@ -23,6 +23,10 @@ github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -50,6 +54,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

117
vendor/google.golang.org/grpc/grpclog/component.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
"google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
type componentData struct {
name string
}
var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warning(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Error(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatal(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) Infof(format string, args ...interface{}) {
c.InfoDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Warningf(format string, args ...interface{}) {
c.WarningDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Errorf(format string, args ...interface{}) {
c.ErrorDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Fatalf(format string, args ...interface{}) {
c.FatalDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Infoln(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warningln(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Errorln(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatalln(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) V(l int) bool {
return V(l)
}
// Component creates a new component and returns it for logging. If a component
// with the name already exists, nothing will be created and it will be
// returned. SetLoggerV2 will panic if it is called with a logger created by
// Component.
func Component(componentName string) DepthLoggerV2 {
if cData, ok := cache[componentName]; ok {
return cData
}
c := &componentData{componentName}
cache[componentName] = c
return c
}

View File

@ -67,6 +67,9 @@ type LoggerV2 interface {
// SetLoggerV2 sets logger that is used in grpc to a V2 logger. // SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions. // Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) { func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
} }
@ -201,8 +204,12 @@ func (g *loggerT) V(l int) bool {
// DepthLoggerV2, the below functions will be called with the appropriate stack // DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore. // depth set for trivial functions the logger may ignore.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface { type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
InfoDepth(depth int, args ...interface{}) InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.

View File

@ -3,4 +3,4 @@
TMP=$(mktemp -d /tmp/sdk.XXX) \ TMP=$(mktemp -d /tmp/sdk.XXX) \
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ && curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
&& unzip -q $TMP.zip -d $TMP \ && unzip -q $TMP.zip -d $TMP \
&& export PATH="$PATH:$TMP/go_appengine" && export PATH="$PATH:$TMP/go_appengine"

View File

@ -27,7 +27,11 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it. // and it is the responsibility of the interceptor to call it.
// This is an EXPERIMENTAL API. //
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream. // Streamer is called by StreamClientInterceptor to create a ClientStream.
@ -35,7 +39,11 @@ type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. // operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
// This is an EXPERIMENTAL API. //
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on // UnaryServerInfo consists of various information about a unary RPC on

View File

@ -25,6 +25,7 @@ import (
"os" "os"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcutil"
) )
// Logger is the global binary logger. It can be used to get binary logger for // Logger is the global binary logger. It can be used to get binary logger for
@ -39,6 +40,8 @@ type Logger interface {
// It is used to get a methodLogger for each individual method. // It is used to get a methodLogger for each individual method.
var binLogger Logger var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
// SetLogger sets the binarg logger. // SetLogger sets the binarg logger.
// //
// Only call this at init time. // Only call this at init time.
@ -146,9 +149,9 @@ func (l *logger) setBlacklist(method string) error {
// Each methodLogger returned by this method is a new instance. This is to // Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call. // generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger { func (l *logger) getMethodLogger(methodName string) *MethodLogger {
s, m, err := parseMethodName(methodName) s, m, err := grpcutil.ParseMethod(methodName)
if err != nil { if err != nil {
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil return nil
} }
if ml, ok := l.methods[s+"/"+m]; ok { if ml, ok := l.methods[s+"/"+m]; ok {

View File

@ -24,8 +24,6 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"google.golang.org/grpc/grpclog"
) )
// NewLoggerFromConfigString reads the string and build a logger. It can be used // NewLoggerFromConfigString reads the string and build a logger. It can be used
@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger {
methods := strings.Split(s, ",") methods := strings.Split(s, ",")
for _, method := range methods { for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil { if err := l.fillMethodLoggerWithConfigString(method); err != nil {
grpclog.Warningf("failed to parse binary log config: %v", err) grpclogLogger.Warningf("failed to parse binary log config: %v", err)
return nil return nil
} }
} }

View File

@ -27,7 +27,6 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
@ -66,7 +65,7 @@ func newMethodLogger(h, m uint64) *MethodLogger {
callID: idGen.next(), callID: idGen.next(),
idWithinCallGen: &callIDGenerator{}, idWithinCallGen: &callIDGenerator{},
sink: defaultSink, // TODO(blog): make it plugable. sink: DefaultSink, // TODO(blog): make it plugable.
} }
} }
@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
if m, ok := c.Message.(proto.Message); ok { if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m) data, err = proto.Marshal(m)
if err != nil { if err != nil {
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
} }
} else if b, ok := c.Message.([]byte); ok { } else if b, ok := c.Message.([]byte); ok {
data = b data = b
} else { } else {
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
} }
ret := &pb.GrpcLogEntry{ ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
if m, ok := c.Message.(proto.Message); ok { if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m) data, err = proto.Marshal(m)
if err != nil { if err != nil {
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
} }
} else if b, ok := c.Message.([]byte); ok { } else if b, ok := c.Message.([]byte); ok {
data = b data = b
} else { } else {
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
} }
ret := &pb.GrpcLogEntry{ ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
@ -315,7 +314,7 @@ type ServerTrailer struct {
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
st, ok := status.FromError(c.Err) st, ok := status.FromError(c.Err)
if !ok { if !ok {
grpclog.Info("binarylogging: error in trailer is not a status error") grpclogLogger.Info("binarylogging: error in trailer is not a status error")
} }
var ( var (
detailsBytes []byte detailsBytes []byte
@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
if stProto != nil && len(stProto.Details) != 0 { if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto) detailsBytes, err = proto.Marshal(stProto)
if err != nil { if err != nil {
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
} }
} }
ret := &pb.GrpcLogEntry{ ret := &pb.GrpcLogEntry{

View File

@ -1,33 +0,0 @@
#!/bin/bash
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux -o pipefail
TMP=$(mktemp -d)
function finish {
rm -rf "$TMP"
}
trap finish EXIT
pushd "$TMP"
mkdir -p grpc/binarylog/grpc_binarylog_v1
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
popd
rm -f ./grpc_binarylog_v1/*.pb.go
cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/

View File

@ -21,32 +21,23 @@ package binarylog
import ( import (
"bufio" "bufio"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"io/ioutil"
"sync" "sync"
"time" "time"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
) )
var ( var (
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). // DefaultSink is the sink where the logs will be written to. It's exported
// for the binarylog package to update.
DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
) )
// SetDefaultSink sets the sink where binary logs will be written to.
//
// Not thread safe. Only set during initialization.
func SetDefaultSink(s Sink) {
if defaultSink != nil {
defaultSink.Close()
}
defaultSink = s
}
// Sink writes log entry into the binary log sink. // Sink writes log entry into the binary log sink.
//
// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
type Sink interface { type Sink interface {
// Write will be called to write the log entry into the sink. // Write will be called to write the log entry into the sink.
// //
@ -67,7 +58,7 @@ func (ns *noopSink) Close() error { return nil }
// message is prefixed with a 4 byte big endian unsigned integer as the length. // message is prefixed with a 4 byte big endian unsigned integer as the length.
// //
// No buffer is done, Close() doesn't try to close the writer. // No buffer is done, Close() doesn't try to close the writer.
func newWriterSink(w io.Writer) *writerSink { func newWriterSink(w io.Writer) Sink {
return &writerSink{out: w} return &writerSink{out: w}
} }
@ -78,7 +69,7 @@ type writerSink struct {
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
b, err := proto.Marshal(e) b, err := proto.Marshal(e)
if err != nil { if err != nil {
grpclog.Infof("binary logging: failed to marshal proto message: %v", err) grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err)
} }
hdr := make([]byte, 4) hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b))) binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@ -93,17 +84,17 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
func (ws *writerSink) Close() error { return nil } func (ws *writerSink) Close() error { return nil }
type bufWriteCloserSink struct { type bufferedSink struct {
mu sync.Mutex mu sync.Mutex
closer io.Closer closer io.Closer
out *writerSink // out is built on buf. out Sink // out is built on buf.
buf *bufio.Writer // buf is kept for flush. buf *bufio.Writer // buf is kept for flush.
writeStartOnce sync.Once writeStartOnce sync.Once
writeTicker *time.Ticker writeTicker *time.Ticker
} }
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
// Start the write loop when Write is called. // Start the write loop when Write is called.
fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.writeStartOnce.Do(fs.startFlushGoroutine)
fs.mu.Lock() fs.mu.Lock()
@ -119,44 +110,50 @@ const (
bufFlushDuration = 60 * time.Second bufFlushDuration = 60 * time.Second
) )
func (fs *bufWriteCloserSink) startFlushGoroutine() { func (fs *bufferedSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration) fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() { go func() {
for range fs.writeTicker.C { for range fs.writeTicker.C {
fs.mu.Lock() fs.mu.Lock()
fs.buf.Flush() if err := fs.buf.Flush(); err != nil {
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
}
fs.mu.Unlock() fs.mu.Unlock()
} }
}() }()
} }
func (fs *bufWriteCloserSink) Close() error { func (fs *bufferedSink) Close() error {
if fs.writeTicker != nil { if fs.writeTicker != nil {
fs.writeTicker.Stop() fs.writeTicker.Stop()
} }
fs.mu.Lock() fs.mu.Lock()
fs.buf.Flush() if err := fs.buf.Flush(); err != nil {
fs.closer.Close() grpclogLogger.Warningf("failed to flush to Sink: %v", err)
fs.out.Close() }
if err := fs.closer.Close(); err != nil {
grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
}
if err := fs.out.Close(); err != nil {
grpclogLogger.Warningf("failed to close the Sink: %v", err)
}
fs.mu.Unlock() fs.mu.Unlock()
return nil return nil
} }
func newBufWriteCloserSink(o io.WriteCloser) Sink { // NewBufferedSink creates a binary log sink with the given WriteCloser.
//
// Write() marshals the proto message and writes it to the given writer. Each
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// Content is kept in a buffer, and is flushed every 60 seconds.
//
// Close closes the WriteCloser.
func NewBufferedSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o) bufW := bufio.NewWriter(o)
return &bufWriteCloserSink{ return &bufferedSink{
closer: o, closer: o,
out: newWriterSink(bufW), out: newWriterSink(bufW),
buf: bufW, buf: bufW,
} }
} }
// NewTempFileSink creates a temp file and returns a Sink that writes to this
// file.
func NewTempFileSink() (Sink, error) {
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
return newBufWriteCloserSink(tempFile), nil
}

View File

@ -1,41 +0,0 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package binarylog
import (
"errors"
"strings"
)
// parseMethodName splits service and method from the input. It expects format
// "/service/method".
//
// TODO: move to internal/grpcutil.
func parseMethodName(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")
}
methodName = methodName[1:]
pos := strings.LastIndex(methodName, "/")
if pos < 0 {
return "", "", errors.New("invalid method name: suffix /method is missing")
}
return methodName[:pos], methodName[pos+1:], nil
}

View File

@ -30,7 +30,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/grpclog"
) )
const ( const (
@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
// by pid). It returns the unique channelz tracking id assigned to this subchannel. // by pid). It returns the unique channelz tracking id assigned to this subchannel.
func RegisterSubChannel(c Channel, pid int64, ref string) int64 { func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
if pid == 0 { if pid == 0 {
grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") logger.Error("a SubChannel's parent id cannot be 0")
return 0 return 0
} }
id := idGen.genID() id := idGen.genID()
@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 {
// this listen socket. // this listen socket.
func RegisterListenSocket(s Socket, pid int64, ref string) int64 { func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 { if pid == 0 {
grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") logger.Error("a ListenSocket's parent id cannot be 0")
return 0 return 0
} }
id := idGen.genID() id := idGen.genID()
@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
// this normal socket. // this normal socket.
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 { if pid == 0 {
grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") logger.Error("a NormalSocket's parent id cannot be 0")
return 0 return 0
} }
id := idGen.genID() id := idGen.genID()
@ -294,17 +294,15 @@ type TraceEventDesc struct {
} }
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
for d := desc; d != nil; d = d.Parent { for d := desc; d != nil; d = d.Parent {
switch d.Severity { switch d.Severity {
case CtUNKNOWN: case CtUnknown, CtInfo:
grpclog.InfoDepth(depth+1, d.Desc) l.InfoDepth(depth+1, d.Desc)
case CtINFO:
grpclog.InfoDepth(depth+1, d.Desc)
case CtWarning: case CtWarning:
grpclog.WarningDepth(depth+1, d.Desc) l.WarningDepth(depth+1, d.Desc)
case CtError: case CtError:
grpclog.ErrorDepth(depth+1, d.Desc) l.ErrorDepth(depth+1, d.Desc)
} }
} }
if getMaxTraceEntry() == 0 { if getMaxTraceEntry() == 0 {

View File

@ -21,80 +21,82 @@ package channelz
import ( import (
"fmt" "fmt"
"google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/grpclog"
) )
// Info logs through grpclog.Info and adds a trace event if channelz is on. var logger = grpclog.Component("channelz")
func Info(id int64, args ...interface{}) {
// Info logs and adds a trace event if channelz is on.
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtINFO, Severity: CtInfo,
}) })
} else { } else {
grpclog.InfoDepth(1, args...) l.InfoDepth(1, args...)
} }
} }
// Infof logs through grpclog.Infof and adds a trace event if channelz is on. // Infof logs and adds a trace event if channelz is on.
func Infof(id int64, format string, args ...interface{}) { func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg, Desc: msg,
Severity: CtINFO, Severity: CtInfo,
}) })
} else { } else {
grpclog.InfoDepth(1, msg) l.InfoDepth(1, msg)
} }
} }
// Warning logs through grpclog.Warning and adds a trace event if channelz is on. // Warning logs and adds a trace event if channelz is on.
func Warning(id int64, args ...interface{}) { func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtWarning, Severity: CtWarning,
}) })
} else { } else {
grpclog.WarningDepth(1, args...) l.WarningDepth(1, args...)
} }
} }
// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. // Warningf logs and adds a trace event if channelz is on.
func Warningf(id int64, format string, args ...interface{}) { func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg, Desc: msg,
Severity: CtWarning, Severity: CtWarning,
}) })
} else { } else {
grpclog.WarningDepth(1, msg) l.WarningDepth(1, msg)
} }
} }
// Error logs through grpclog.Error and adds a trace event if channelz is on. // Error logs and adds a trace event if channelz is on.
func Error(id int64, args ...interface{}) { func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtError, Severity: CtError,
}) })
} else { } else {
grpclog.ErrorDepth(1, args...) l.ErrorDepth(1, args...)
} }
} }
// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. // Errorf logs and adds a trace event if channelz is on.
func Errorf(id int64, format string, args ...interface{}) { func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
if IsOn() { if IsOn() {
AddTraceEvent(id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg, Desc: msg,
Severity: CtError, Severity: CtError,
}) })
} else { } else {
grpclog.ErrorDepth(1, msg) l.ErrorDepth(1, msg)
} }
} }

View File

@ -26,7 +26,6 @@ import (
"google.golang.org/grpc/connectivity" "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
) )
// entry represents a node in the channelz database. // entry represents a node in the channelz database.
@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) {
// the addrConn will create a new transport. And when registering the new transport in // the addrConn will create a new transport. And when registering the new transport in
// channelz, its parent addrConn could have already been torn down and deleted // channelz, its parent addrConn could have already been torn down and deleted
// from channelz tracking, and thus reach the code here. // from channelz tracking, and thus reach the code here.
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
} }
func (d *dummyEntry) deleteChild(id int64) { func (d *dummyEntry) deleteChild(id int64) {
// It is possible for a normal program to reach here under race condition. // It is possible for a normal program to reach here under race condition.
// Refer to the example described in addChild(). // Refer to the example described in addChild().
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
} }
func (d *dummyEntry) triggerDelete() { func (d *dummyEntry) triggerDelete() {
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
} }
func (*dummyEntry) deleteSelfIfReady() { func (*dummyEntry) deleteSelfIfReady() {
@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) {
case *channel: case *channel:
c.nestedChans[id] = v.refName c.nestedChans[id] = v.refName
default: default:
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
} }
} }
@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) {
if v, ok := e.(*normalSocket); ok { if v, ok := e.(*normalSocket); ok {
sc.sockets[id] = v.refName sc.sockets[id] = v.refName
} else { } else {
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
} }
} }
@ -493,11 +492,11 @@ type listenSocket struct {
} }
func (ls *listenSocket) addChild(id int64, e entry) { func (ls *listenSocket) addChild(id int64, e entry) {
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
} }
func (ls *listenSocket) deleteChild(id int64) { func (ls *listenSocket) deleteChild(id int64) {
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
} }
func (ls *listenSocket) triggerDelete() { func (ls *listenSocket) triggerDelete() {
@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() {
} }
func (ls *listenSocket) deleteSelfIfReady() { func (ls *listenSocket) deleteSelfIfReady() {
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
} }
func (ls *listenSocket) getParentID() int64 { func (ls *listenSocket) getParentID() int64 {
@ -522,11 +521,11 @@ type normalSocket struct {
} }
func (ns *normalSocket) addChild(id int64, e entry) { func (ns *normalSocket) addChild(id int64, e entry) {
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
} }
func (ns *normalSocket) deleteChild(id int64) { func (ns *normalSocket) deleteChild(id int64) {
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
} }
func (ns *normalSocket) triggerDelete() { func (ns *normalSocket) triggerDelete() {
@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() {
} }
func (ns *normalSocket) deleteSelfIfReady() { func (ns *normalSocket) deleteSelfIfReady() {
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
} }
func (ns *normalSocket) getParentID() int64 { func (ns *normalSocket) getParentID() int64 {
@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) {
case *listenSocket: case *listenSocket:
s.listenSockets[id] = v.refName s.listenSockets[id] = v.refName
default: default:
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
} }
} }
@ -673,10 +672,10 @@ func (c *channelTrace) clear() {
type Severity int type Severity int
const ( const (
// CtUNKNOWN indicates unknown severity of a trace event. // CtUnknown indicates unknown severity of a trace event.
CtUNKNOWN Severity = iota CtUnknown Severity = iota
// CtINFO indicates info level severity of a trace event. // CtInfo indicates info level severity of a trace event.
CtINFO CtInfo
// CtWarning indicates warning level severity of a trace event. // CtWarning indicates warning level severity of a trace event.
CtWarning CtWarning
// CtError indicates error level severity of a trace event. // CtError indicates error level severity of a trace event.

View File

@ -22,8 +22,6 @@ package channelz
import ( import (
"sync" "sync"
"google.golang.org/grpc/grpclog"
) )
var once sync.Once var once sync.Once
@ -39,6 +37,6 @@ type SocketOptionData struct {
// Windows OS doesn't support Socket Option // Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) { func (s *SocketOptionData) Getsockopt(fd uintptr) {
once.Do(func() { once.Do(func() {
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.")
}) })
} }

View File

@ -0,0 +1,67 @@
// +build !appengine
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials defines APIs for parsing SPIFFE ID.
//
// All APIs in this package are experimental.
package credentials
import (
"crypto/tls"
"net/url"
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("credentials")
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
// is invalid, return nil with warning.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
return nil
}
var spiffeID *url.URL
for _, uri := range state.PeerCertificates[0].URIs {
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
continue
}
// From this point, we assume the uri is intended for a SPIFFE ID.
if len(uri.String()) > 2048 {
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
return nil
}
if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 {
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
return nil
}
if len(uri.Host) > 255 {
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
return nil
}
// A valid SPIFFE certificate can only have exactly one URI SAN field.
if len(state.PeerCertificates[0].URIs) > 1 {
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
return nil
}
spiffeID = uri
}
return spiffeID
}

View File

@ -1,6 +1,8 @@
// +build appengine
/* /*
* *
* Copyright 2017 gRPC authors. * Copyright 2020 gRPC authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,29 +18,14 @@
* *
*/ */
// This file contains wrappers for grpclog functions. package credentials
// The transport package only logs to verbose level 2 by default.
package transport import (
"crypto/tls"
"net/url"
)
import "google.golang.org/grpc/grpclog" // SPIFFEIDFromState is a no-op for appengine builds.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
const logLevel = 2 return nil
func infof(format string, args ...interface{}) {
if grpclog.V(logLevel) {
grpclog.Infof(format, args...)
}
}
func warningf(format string, args ...interface{}) {
if grpclog.V(logLevel) {
grpclog.Warningf(format, args...)
}
}
func errorf(format string, args ...interface{}) {
if grpclog.V(logLevel) {
grpclog.Errorf(format, args...)
}
} }

View File

@ -18,8 +18,7 @@
* *
*/ */
// Package internal contains credentials-internal code. package credentials
package internal
import ( import (
"net" "net"

View File

@ -18,7 +18,7 @@
* *
*/ */
package internal package credentials
import ( import (
"net" "net"

View File

@ -0,0 +1,50 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import "crypto/tls"
const alpnProtoStrH2 = "h2"
// AppendH2ToNextProtos appends h2 to next protos.
func AppendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// CloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -34,5 +34,5 @@ var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on") Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
) )

View File

@ -19,6 +19,10 @@
// Package grpclog (internal) defines depth logging for grpc. // Package grpclog (internal) defines depth logging for grpc.
package grpclog package grpclog
import (
"os"
)
// Logger is the logger used for the non-depth log functions. // Logger is the logger used for the non-depth log functions.
var Logger LoggerV2 var Logger LoggerV2
@ -30,7 +34,7 @@ func InfoDepth(depth int, args ...interface{}) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...) DepthLogger.InfoDepth(depth, args...)
} else { } else {
Logger.Info(args...) Logger.Infoln(args...)
} }
} }
@ -39,7 +43,7 @@ func WarningDepth(depth int, args ...interface{}) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...) DepthLogger.WarningDepth(depth, args...)
} else { } else {
Logger.Warning(args...) Logger.Warningln(args...)
} }
} }
@ -48,7 +52,7 @@ func ErrorDepth(depth int, args ...interface{}) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...) DepthLogger.ErrorDepth(depth, args...)
} else { } else {
Logger.Error(args...) Logger.Errorln(args...)
} }
} }
@ -57,8 +61,9 @@ func FatalDepth(depth int, args ...interface{}) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...) DepthLogger.FatalDepth(depth, args...)
} else { } else {
Logger.Fatal(args...) Logger.Fatalln(args...)
} }
os.Exit(1)
} }
// LoggerV2 does underlying logging work for grpclog. // LoggerV2 does underlying logging work for grpclog.
@ -105,7 +110,10 @@ type LoggerV2 interface {
// This is a copy of the DepthLoggerV2 defined in the external grpclog package. // This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency. // It is defined here to avoid a circular dependency.
// //
// This API is EXPERIMENTAL. // Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface { type DepthLoggerV2 interface {
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
InfoDepth(depth int, args ...interface{}) InfoDepth(depth int, args ...interface{})

View File

@ -18,10 +18,15 @@
package grpclog package grpclog
import (
"fmt"
)
// PrefixLogger does logging with a prefix. // PrefixLogger does logging with a prefix.
// //
// Logging method on a nil logs without any prefix. // Logging method on a nil logs without any prefix.
type PrefixLogger struct { type PrefixLogger struct {
logger DepthLoggerV2
prefix string prefix string
} }
@ -30,34 +35,47 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
if pl != nil { if pl != nil {
// Handle nil, so the tests can pass in a nil logger. // Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
} }
Logger.Infof(format, args...) InfoDepth(1, fmt.Sprintf(format, args...))
} }
// Warningf does warning logging. // Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil { if pl != nil {
format = pl.prefix + format format = pl.prefix + format
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
} }
Logger.Warningf(format, args...) WarningDepth(1, fmt.Sprintf(format, args...))
} }
// Errorf does error logging. // Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil { if pl != nil {
format = pl.prefix + format format = pl.prefix + format
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
} }
Logger.Errorf(format, args...) ErrorDepth(1, fmt.Sprintf(format, args...))
} }
// Debugf does info logging at verbose level 2. // Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
if Logger.V(2) { if !Logger.V(2) {
pl.Infof(format, args...) return
} }
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
} }
// NewPrefixLogger creates a prefix logger with the given prefix. // NewPrefixLogger creates a prefix logger with the given prefix.
func NewPrefixLogger(prefix string) *PrefixLogger { func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{prefix: prefix} return &PrefixLogger{logger: logger, prefix: prefix}
} }

View File

@ -0,0 +1,63 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"strconv"
"time"
)
const maxTimeoutValue int64 = 100000000 - 1
// div does integer division and round-up the result. Note that this is
// equivalent to (d+r-1)/r but has less chance to overflow.
func div(d, r time.Duration) int64 {
if d%r > 0 {
return int64(d/r + 1)
}
return int64(d / r)
}
// EncodeDuration encodes the duration to the format grpc-timeout header
// accepts.
//
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
func EncodeDuration(t time.Duration) string {
// TODO: This is simplistic and not bandwidth efficient. Improve it.
if t <= 0 {
return "0n"
}
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "n"
}
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "u"
}
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "m"
}
if d := div(t, time.Second); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "S"
}
if d := div(t, time.Minute); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "M"
}
// Note that maxTimeoutValue * time.Hour > MaxInt64.
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
}

View File

@ -0,0 +1,40 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"context"
"google.golang.org/grpc/metadata"
)
type mdExtraKey struct{}
// WithExtraMetadata creates a new context with incoming md attached.
func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
return context.WithValue(ctx, mdExtraKey{}, md)
}
// ExtraMetadata returns the incoming metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to copies of the returned MD.
func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
return
}

View File

@ -0,0 +1,84 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"errors"
"strings"
)
// ParseMethod splits service and method from the input. It expects format
// "/service/method".
//
func ParseMethod(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")
}
methodName = methodName[1:]
pos := strings.LastIndex(methodName, "/")
if pos < 0 {
return "", "", errors.New("invalid method name: suffix /method is missing")
}
return methodName[:pos], methodName[pos+1:], nil
}
const baseContentType = "application/grpc"
// ContentSubtype returns the content-subtype for the given content-type. The
// given content-type must be a valid content-type that starts with
// "application/grpc". A content-subtype will follow "application/grpc" after a
// "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details.
//
// If contentType is not a valid content-type for gRPC, the boolean
// will be false, otherwise true. If content-type == "application/grpc",
// "application/grpc+", or "application/grpc;", the boolean will be true,
// but no content-subtype will be returned.
//
// contentType is assumed to be lowercase already.
func ContentSubtype(contentType string) (string, bool) {
if contentType == baseContentType {
return "", true
}
if !strings.HasPrefix(contentType, baseContentType) {
return "", false
}
// guaranteed since != baseContentType and has baseContentType prefix
switch contentType[len(baseContentType)] {
case '+', ';':
// this will return true for "application/grpc+" or "application/grpc;"
// which the previous validContentType function tested to be valid, so we
// just say that no content-subtype is specified in this case
return contentType[len(baseContentType)+1:], true
default:
return "", false
}
}
// ContentType builds full content type with the given sub-type.
//
// contentSubtype is assumed to be lowercase
func ContentType(contentSubtype string) string {
if contentSubtype == "" {
return baseContentType
}
return baseContentType + "+" + contentSubtype
}

View File

@ -25,6 +25,7 @@ import (
"time" "time"
"google.golang.org/grpc/connectivity" "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/serviceconfig"
) )
var ( var (
@ -40,9 +41,22 @@ var (
// NewRequestInfoContext creates a new context based on the argument context attaching // NewRequestInfoContext creates a new context based on the argument context attaching
// the passed in RequestInfo to the new context. // the passed in RequestInfo to the new context.
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
// NewClientHandshakeInfoContext returns a copy of the input context with
// the passed in ClientHandshakeInfo struct added to it.
NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context
// ParseServiceConfigForTesting is for creating a fake // ParseServiceConfigForTesting is for creating a fake
// ClientConn for resolver testing only // ClientConn for resolver testing only
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
// GetCertificateProviderBuilder returns the registered builder for the
// given name. This is set by package certprovider for use from xDS
// bootstrap code while parsing certificate provider configs in the
// bootstrap file.
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
) )
// HealthChecker defines the signature of the client-side LB channel health checking function. // HealthChecker defines the signature of the client-side LB channel health checking function.

Some files were not shown because too many files have changed in this diff Show More