build error fix: replace grpc to 1.29.1 (#577)

This commit is contained in:
yubo 2021-02-03 16:42:27 +08:00 committed by GitHub
parent 3fdd61edfc
commit db97453c54
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
77 changed files with 1408 additions and 2418 deletions

19
go.mod
View File

@ -8,12 +8,11 @@ require (
github.com/codegangsta/negroni v1.0.0
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe
github.com/ericchiang/k8s v1.2.0 // indirect
github.com/ericchiang/k8s v1.2.0
github.com/garyburd/redigo v1.6.2
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/gin-contrib/pprof v1.3.0
github.com/gin-gonic/gin v1.6.3
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 // indirect
github.com/go-sql-driver/mysql v1.5.0
github.com/google/uuid v1.1.2
github.com/gorilla/mux v1.7.3
@ -21,20 +20,17 @@ require (
github.com/hpcloud/tail v1.0.0
github.com/influxdata/influxdb v1.8.0
github.com/influxdata/telegraf v1.17.2
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 // indirect
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 // indirect
github.com/m3db/m3 v0.15.17
github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-sqlite3 v1.14.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/mojocn/base64Captcha v1.3.1
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad
github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.9.1 // indirect
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62 // indirect
github.com/shirou/gopsutil v3.20.11+incompatible // indirect
github.com/soniah/gosnmp v1.25.0 // indirect
github.com/spaolacci/murmur3 v1.1.0
github.com/spf13/viper v1.7.1
github.com/streadway/amqp v1.0.0
@ -48,7 +44,7 @@ require (
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gopkg.in/ldap.v3 v3.1.0
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.3.0
xorm.io/core v0.7.3
@ -85,3 +81,6 @@ replace go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.5
// https://github.com/ory/dockertest/issues/212
replace golang.org/x/sys => golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6
// for transfer
replace google.golang.org/grpc => google.golang.org/grpc v1.29.1

46
go.sum
View File

@ -1,5 +1,4 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@ -88,8 +87,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8=
github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8=
github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II=
github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc=
github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
@ -181,7 +178,6 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
@ -256,9 +252,6 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZi
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI=
@ -293,7 +286,6 @@ github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwv
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
@ -377,7 +369,6 @@ github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlV
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@ -470,6 +461,7 @@ github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosnmp/gosnmp v1.29.0 h1:fEkud7oiYVzR64L+/BQA7uvp+7COI9+XkrUQi8JunYM=
github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
@ -482,6 +474,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.14.1 h1:YuM9SXYy583fxvSOkzCDyBPCtY+/IMSHEG1dKFMLZsA=
github.com/grpc-ecosystem/grpc-gateway v1.14.1/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ=
@ -558,8 +551,6 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/telegraf v1.16.2 h1:G988b0+CL2IVDft9V2ZUteKgnbp+eI7vtQ0liPhQKxw=
github.com/influxdata/telegraf v1.16.2/go.mod h1:LZ/6hlf60cwqGr8phfbRKf8x1HoAoqxoMpTp/iqcNXk=
github.com/influxdata/telegraf v1.17.2 h1:SfwhXtAbZeX4y56wLg3Ku5d88e2lmcBVIh7k2bg++84=
github.com/influxdata/telegraf v1.17.2/go.mod h1:lBWJl18yjThBjYAHnHeWFE3nI6mb4dTUUWB9HQFYOWs=
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY=
@ -610,7 +601,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
@ -638,6 +628,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s=
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leanovate/gopter v0.2.8 h1:eFPtJ3aa5zLfbxGROSNY75T9Dume60CWBAqoWQ3h/ig=
github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8=
@ -653,7 +644,6 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1 h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/m3db/bitset v2.0.0+incompatible/go.mod h1:X8CCqZmZxs2O6d4qHhiqtAKCin4G5mScPhiwX9rsc5c=
github.com/m3db/bloom v3.0.1+incompatible/go.mod h1:W6XzpFw4t+CIYq+NGyp5c2394YsUc1P1+W/KAWty2lU=
github.com/m3db/bloom/v4 v4.0.0-20200901140942-52efb8544fe9/go.mod h1:JDmGHlO6ygyY1V9eOHtXiNl3+axznDTrBqwWEeWALlQ=
@ -714,7 +704,6 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4f
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885/go.mod h1:wRyVMWiOZeVj+MieWS5tIBBtJ3RtqqMbPsA5Z+t5b5U=
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw=
github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA=
github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
@ -766,11 +755,9 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
@ -1036,7 +1023,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo=
github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo=
github.com/vmihailenco/msgpack v2.8.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
@ -1138,7 +1124,6 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx
golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -1160,7 +1145,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1179,7 +1163,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -1230,11 +1213,9 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqG
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@ -1336,30 +1317,10 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE=
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
@ -1436,7 +1397,6 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -59,3 +59,6 @@ func TestCollect(t *testing.T) {
URLs: []string{"http://localhost:18080/metrics"},
})
}
func TestCollect2(t *testing.T) {
}

View File

@ -2,22 +2,22 @@ language: go
matrix:
include:
- go: 1.14.x
env: VET=1 GO111MODULE=on
- go: 1.14.x
env: RACE=1 GO111MODULE=on
- go: 1.14.x
env: RUN386=1
- go: 1.14.x
env: GRPC_GO_RETRY=on
- go: 1.14.x
env: TESTEXTRAS=1
- go: 1.13.x
env: GO111MODULE=on
env: VET=1 GO111MODULE=on
- go: 1.13.x
env: RACE=1 GO111MODULE=on
- go: 1.13.x
env: RUN386=1
- go: 1.13.x
env: GRPC_GO_RETRY=on
- go: 1.13.x
env: TESTEXTRAS=1
- go: 1.12.x
env: GO111MODULE=on
- go: 1.11.x # Keep until interop tests no longer require Go1.11
- go: 1.11.x
env: GO111MODULE=on
- go: 1.9.x
env: GAE=1
go_import_path: google.golang.org/grpc

View File

@ -57,5 +57,6 @@ How to get your contributions merged smoothly and quickly.
- `make vet` to catch vet errors
- `make test` to run the tests
- `make testrace` to run tests in race mode
- optional `make testappengine` to run tests with appengine
- Exceptions to the rules can be made if there's a compelling reason for doing so.

View File

@ -21,7 +21,6 @@ test: testdeps
testsubmodule: testdeps
cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
testappengine: testappenginedeps
goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...

View File

@ -1,53 +1,64 @@
# gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
The [Go][] implementation of [gRPC][]: A high performance, open source, general
RPC framework that puts mobile and HTTP/2 first. For more information see the
[Go gRPC docs][], or jump directly into the [quick start][].
The Go implementation of [gRPC](https://grpc.io/): A high performance, open
source, general RPC framework that puts mobile and HTTP/2 first. For more
information see the [gRPC Quick Start:
Go](https://grpc.io/docs/quickstart/go.html) guide.
## Prerequisites
Installation
------------
- **[Go][]**: any one of the **three latest major** [releases][go-releases].
To install this package, you need to install Go and setup your Go workspace on
your computer. The simplest way to install the library is to run:
## Installation
With [Go module][] support (Go 1.11+), simply add the following import
```go
import "google.golang.org/grpc"
```
to your code, and then `go [build|run|test]` will automatically fetch the
necessary dependencies.
Otherwise, to install the `grpc-go` package, run the following command:
```console
$ go get -u google.golang.org/grpc
```
> **Note:** If you are trying to access `grpc-go` from **China**, see the
> [FAQ](#FAQ) below.
With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
your source code and `go [build|run|test]` will automatically download the
necessary dependencies ([Go modules
ref](https://github.com/golang/go/wiki/Modules)).
## Learn more
If you are trying to access grpc-go from within China, please see the
[FAQ](#FAQ) below.
- [Go gRPC docs][], which include a [quick start][] and [API
reference][API] among other resources
- [Low-level technical docs](Documentation) from this repository
- [Performance benchmark][]
- [Examples](examples)
Prerequisites
-------------
gRPC-Go requires Go 1.9 or later.
## FAQ
Documentation
-------------
- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
descriptions.
- Documentation on specific topics can be found in the [Documentation
directory](Documentation/).
- Examples can be found in the [examples directory](examples/).
### I/O Timeout Errors
Performance
-----------
Performance benchmark data for grpc-go and other languages is maintained in
[this
dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status
------
General Availability [Google Cloud Platform Launch
Stages](https://cloud.google.com/terms/launch-stages).
FAQ
---
#### I/O Timeout Errors
The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens:
```console
```
$ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
```
@ -58,7 +69,7 @@ To build Go code, there are several options:
- Without Go module support: `git clone` the repo manually:
```sh
```
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
```
@ -68,7 +79,7 @@ To build Go code, there are several options:
- With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory:
```sh
```
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy
go mod vendor
@ -76,17 +87,19 @@ To build Go code, there are several options:
```
Again, this will need to be done for all transitive dependencies hosted on
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
golang.org as well. Please refer to [this
issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
this concern.
### Compiling error, undefined: grpc.SupportPackageIsVersion
#### Compiling error, undefined: grpc.SupportPackageIsVersion
#### If you are using Go modules:
##### If you are using Go modules:
Ensure your gRPC-Go version is `require`d at the appropriate version in
Please ensure your gRPC-Go version is `require`d at the appropriate version in
the same module containing the generated `.pb.go` files. For example,
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
```go
```
module <your module name>
require (
@ -94,27 +107,23 @@ require (
)
```
#### If you are *not* using Go modules:
##### If you are *not* using Go modules:
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
Please update proto package, gRPC package and rebuild the proto files:
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- `go get -u google.golang.org/grpc`
- `protoc --go_out=plugins=grpc:. *.proto`
```sh
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
go get -u google.golang.org/grpc
protoc --go_out=plugins=grpc:. *.proto
#### How to turn on logging
The default logger is controlled by the environment variables. Turn everything
on by setting:
```
GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
```
### How to turn on logging
The default logger is controlled by environment variables. Turn everything on
like this:
```console
$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
```
### The RPC failed with error `"code = Unavailable desc = transport is closing"`
#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
This error means the connection the RPC is using was closed, and there are many
possible reasons, including:
@ -130,12 +139,3 @@ It can be tricky to debug this because the error happens on the client side but
the root cause of the connection being closed is on the server side. Turn on
logging on __both client and server__, and see if there are any transport
errors.
[API]: https://pkg.go.dev/google.golang.org/grpc
[Go]: https://golang.org
[Go module]: https://github.com/golang/go/wiki/Modules
[gRPC]: https://grpc.io
[Go gRPC docs]: https://grpc.io/docs/languages/go
[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696
[quick start]: https://grpc.io/docs/languages/go/quickstart
[go-releases]: https://golang.org/doc/devel/release.html

View File

@ -19,10 +19,7 @@
// Package attributes defines a generic key/value store used in various gRPC
// components.
//
// Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
// All APIs in this package are EXPERIMENTAL.
package attributes
import "fmt"
@ -53,9 +50,6 @@ func New(kvs ...interface{}) *Attributes {
// times, the last value overwrites all previous values for that key. To
// remove an existing key, use a nil value.
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
if a == nil {
return New(kvs...)
}
if len(kvs)%2 != 0 {
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
}
@ -72,8 +66,5 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
// Value returns the value associated with these attributes for key, or nil if
// no value is associated with key.
func (a *Attributes) Value(key interface{}) interface{} {
if a == nil {
return nil
}
return a.m[key]
}

View File

@ -48,10 +48,7 @@ type BackoffConfig struct {
// here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type ConnectParams struct {
// Backoff specifies the configuration options for connection backoff.
Backoff backoff.Config

View File

@ -111,9 +111,6 @@ type NewSubConnOptions struct {
// CredsBundle is the credentials bundle that will be used in the created
// SubConn. If it's nil, the original creds from grpc DialOptions will be
// used.
//
// Deprecated: Use the Attributes field in resolver.Address to pass
// arbitrary data to the credential handshaker.
CredsBundle credentials.Bundle
// HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn
@ -126,7 +123,7 @@ type State struct {
// determine the state of the ClientConn.
ConnectivityState connectivity.State
// Picker is used to choose connections (SubConns) for RPCs.
Picker Picker
Picker V2Picker
}
// ClientConn represents a gRPC ClientConn.
@ -144,11 +141,20 @@ type ClientConn interface {
// The SubConn will be shutdown.
RemoveSubConn(SubConn)
// UpdateBalancerState is called by balancer to notify gRPC that some internal
// state in balancer has changed.
//
// gRPC will update the connectivity state of the ClientConn, and will call pick
// on the new picker to pick new SubConn.
//
// Deprecated: use UpdateState instead
UpdateBalancerState(s connectivity.State, p Picker)
// UpdateState notifies gRPC that the balancer's internal state has
// changed.
//
// gRPC will update the connectivity state of the ClientConn, and will call
// Pick on the new Picker to pick new SubConns.
// gRPC will update the connectivity state of the ClientConn, and will call pick
// on the new picker to pick new SubConns.
UpdateState(State)
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
@ -226,17 +232,56 @@ type DoneInfo struct {
var (
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
// gRPC will block the RPC until a new picker is available via UpdateState().
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
ErrNoSubConnAvailable = errors.New("no SubConn is available")
// ErrTransientFailure indicates all SubConns are in TransientFailure.
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
//
// Deprecated: return an appropriate error based on the last resolution or
// connection attempt instead. The behavior is the same for any non-gRPC
// status error.
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
)
// Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed.
//
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
//
// Deprecated: use V2Picker instead
type Picker interface {
// Pick returns the SubConn to be used to send the RPC.
// The returned SubConn must be one returned by NewSubConn().
//
// This functions is expected to return:
// - a SubConn that is known to be READY;
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
// made (for example, some SubConn is in CONNECTING mode);
// - other errors if no active connecting is happening (for example, all SubConn
// are in TRANSIENT_FAILURE mode).
//
// If a SubConn is returned:
// - If it is READY, gRPC will send the RPC on it;
// - If it is not ready, or becomes not ready after it's returned, gRPC will
// block until UpdateBalancerState() is called and will call pick on the
// new picker. The done function returned from Pick(), if not nil, will be
// called with nil error, no bytes sent and no bytes received.
//
// If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
// - If the error is ErrTransientFailure or implements IsTransientFailure()
// bool, returning true:
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
// is called to pick again;
// - Otherwise, RPC will fail with unavailable error.
// - Else (error is other non-nil error):
// - The RPC will fail with the error's status code, or Unknown if it is
// not a status error.
//
// The returned done() function will be called once the rpc has finished,
// with the final status of that RPC. If the SubConn returned is not a
// valid SubConn type, done may not be called. done may be nil if balancer
// doesn't care about the RPC status.
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
}
// PickResult contains information related to a connection chosen for an RPC.
type PickResult struct {
// SubConn is the connection to use for this pick, if its state is Ready.
@ -252,19 +297,24 @@ type PickResult struct {
Done func(DoneInfo)
}
// TransientFailureError returns e. It exists for backward compatibility and
// will be deleted soon.
//
// Deprecated: no longer necessary, picker errors are treated this way by
// default.
func TransientFailureError(e error) error { return e }
type transientFailureError struct {
error
}
// Picker is used by gRPC to pick a SubConn to send an RPC.
func (e *transientFailureError) IsTransientFailure() bool { return true }
// TransientFailureError wraps err in an error implementing
// IsTransientFailure() bool, returning true.
func TransientFailureError(err error) error {
return &transientFailureError{error: err}
}
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed.
//
// The pickers used by gRPC can be updated by ClientConn.UpdateState().
type Picker interface {
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
type V2Picker interface {
// Pick returns the connection to use for this RPC and related information.
//
// Pick should not block. If the balancer needs to do I/O or any blocking
@ -277,13 +327,14 @@ type Picker interface {
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
// Picker is provided by the balancer (using ClientConn.UpdateState).
//
// - If the error is a status error (implemented by the grpc/status
// package), gRPC will terminate the RPC with the code and message
// provided.
// - If the error implements IsTransientFailure() bool, returning true,
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
// terminated with this error's Error() string and status code
// Unavailable.
//
// - For all other errors, wait for ready RPCs will wait, but non-wait for
// ready RPCs will be terminated with this error's Error() string and
// status code Unavailable.
// - Any other errors terminate all RPCs with the code and message
// provided. If the error is not a status error, it will be converted by
// gRPC to a status error with code Unknown.
Pick(info PickInfo) (PickResult, error)
}
@ -292,21 +343,29 @@ type Picker interface {
//
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
//
// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
// guaranteed to be called synchronously from the same goroutine. There's no
// guarantee on picker.Pick, it may be called anytime.
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
// to be called synchronously from the same goroutine.
// There's no guarantee on picker.Pick, it may be called anytime.
type Balancer interface {
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes. If the error returned is ErrBadResolverState, the ClientConn
// will begin calling ResolveNow on the active name resolver with
// exponential backoff until a subsequent call to UpdateClientConnState
// returns a nil error. Any other errors are currently ignored.
UpdateClientConnState(ClientConnState) error
// ResolverError is called by gRPC when the name resolver reports an error.
ResolverError(error)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
// HandleSubConnStateChange is called by gRPC when the connectivity state
// of sc has changed.
// Balancer is expected to aggregate all the state of SubConn and report
// that back to gRPC.
// Balancer should also generate and update Pickers when its internal state has
// been changed by the new state.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateSubConnState will be called instead.
HandleSubConnStateChange(sc SubConn, state connectivity.State)
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
// balancers.
// Balancer can create new SubConn or remove SubConn with the addresses.
// An empty address slice and a non-nil error will be passed if the resolver returns
// non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateClientConnState will be called instead.
HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
@ -334,6 +393,27 @@ type ClientConnState struct {
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes. If the error returned is ErrBadResolverState, the ClientConn
// will begin calling ResolveNow on the active name resolver with
// exponential backoff until a subsequent call to UpdateClientConnState
// returns a nil error. Any other errors are currently ignored.
UpdateClientConnState(ClientConnState) error
// ResolverError is called by gRPC when the name resolver reports an error.
ResolverError(error)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
// and returns one aggregated connectivity state.
//

View File

@ -19,6 +19,7 @@
package base
import (
"context"
"errors"
"fmt"
@ -28,11 +29,10 @@ import (
"google.golang.org/grpc/resolver"
)
var logger = grpclog.Component("balancer")
type baseBuilder struct {
name string
pickerBuilder PickerBuilder
v2PickerBuilder V2PickerBuilder
config Config
}
@ -40,6 +40,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
v2PickerBuilder: bb.v2PickerBuilder,
subConns: make(map[resolver.Address]balancer.SubConn),
scStates: make(map[balancer.SubConn]connectivity.State),
@ -49,7 +50,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
// Initialize picker to a picker that always returns
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
// may call UpdateState with this picker.
if bb.pickerBuilder != nil {
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
} else {
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
}
return bal
}
@ -57,9 +62,12 @@ func (bb *baseBuilder) Name() string {
return bb.name
}
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
type baseBalancer struct {
cc balancer.ClientConn
pickerBuilder PickerBuilder
v2PickerBuilder V2PickerBuilder
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
@ -67,34 +75,43 @@ type baseBalancer struct {
subConns map[resolver.Address]balancer.SubConn
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
v2Picker balancer.V2Picker
config Config
resolverErr error // the last error reported by the resolver; cleared on successful resolution
connErr error // the last connection error; cleared upon leaving TransientFailure
}
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
panic("not implemented")
}
func (b *baseBalancer) ResolverError(err error) {
b.resolverErr = err
if len(b.subConns) == 0 {
b.state = connectivity.TransientFailure
}
if b.state != connectivity.TransientFailure {
// The picker will not change since the balancer does not currently
// report an error.
return
}
b.regeneratePicker()
if b.picker != nil {
b.cc.UpdateBalancerState(b.state, b.picker)
} else {
b.cc.UpdateState(balancer.State{
ConnectivityState: b.state,
Picker: b.picker,
Picker: b.v2Picker,
})
}
}
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
if logger.V(2) {
logger.Info("base.baseBalancer: got new ClientConn state: ", s)
if grpclog.V(2) {
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
}
// Successful resolution; clear resolver error and ensure we return nil.
b.resolverErr = nil
@ -106,7 +123,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
if err != nil {
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue
}
b.subConns[a] = sc
@ -120,7 +137,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.cc.RemoveSubConn(sc)
delete(b.subConns, a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
// The entry will be deleted in UpdateSubConnState.
// The entry will be deleted in HandleSubConnStateChange.
}
}
// If resolver state contains no addresses, return an error so ClientConn
@ -154,9 +171,24 @@ func (b *baseBalancer) mergeErrors() error {
// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
b.picker = NewErrPicker(b.mergeErrors())
if b.pickerBuilder != nil {
b.picker = NewErrPicker(balancer.ErrTransientFailure)
} else {
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
}
return
}
if b.pickerBuilder != nil {
readySCs := make(map[resolver.Address]balancer.SubConn)
// Filter out all ready SCs from full subConn map.
for addr, sc := range b.subConns {
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[addr] = sc
}
}
b.picker = b.pickerBuilder.Build(readySCs)
} else {
readySCs := make(map[balancer.SubConn]SubConnInfo)
// Filter out all ready SCs from full subConn map.
@ -165,18 +197,23 @@ func (b *baseBalancer) regeneratePicker() {
readySCs[sc] = SubConnInfo{Address: addr}
}
}
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
}
}
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
panic("not implemented")
}
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
if logger.V(2) {
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
}
oldS, ok := b.scStates[sc]
if !ok {
if logger.V(2) {
logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
}
return
}
@ -210,7 +247,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
b.regeneratePicker()
}
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
if b.picker != nil {
b.cc.UpdateBalancerState(b.state, b.picker)
} else {
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
}
}
// Close is a nop because base balancer doesn't have internal state to clean up,
@ -218,20 +259,28 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
func (b *baseBalancer) Close() {
}
// NewErrPicker returns a Picker that always returns err on Pick().
// NewErrPicker returns a picker that always returns err on Pick().
func NewErrPicker(err error) balancer.Picker {
return &errPicker{err: err}
}
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
//
// Deprecated: use NewErrPicker instead.
var NewErrPickerV2 = NewErrPicker
type errPicker struct {
err error // Pick() always returns this err.
}
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, p.err
}
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
func NewErrPickerV2(err error) balancer.V2Picker {
return &errPickerV2{err: err}
}
type errPickerV2 struct {
err error // Pick() always returns this err.
}
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}

View File

@ -37,8 +37,15 @@ import (
// PickerBuilder creates balancer.Picker.
type PickerBuilder interface {
// Build takes a slice of ready SubConns, and returns a picker that will be
// used by gRPC to pick a SubConn.
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
}
// V2PickerBuilder creates balancer.V2Picker.
type V2PickerBuilder interface {
// Build returns a picker that will be used by gRPC to pick a SubConn.
Build(info PickerBuildInfo) balancer.Picker
Build(info PickerBuildInfo) balancer.V2Picker
}
// PickerBuildInfo contains information needed by the picker builder to
@ -55,17 +62,32 @@ type SubConnInfo struct {
Address resolver.Address // the address used to create this SubConn
}
// NewBalancerBuilder returns a balancer builder. The balancers
// built by this builder will use the picker builder to build pickers.
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
return NewBalancerBuilderWithConfig(name, pb, Config{})
}
// Config contains the config info about the base balancer builder.
type Config struct {
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
HealthCheck bool
}
// NewBalancerBuilder returns a base balancer builder configured by the provided config.
func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
pickerBuilder: pb,
config: config,
}
}
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
v2PickerBuilder: pb,
config: config,
}
}

View File

@ -1,51 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package state declares grpclb types to be set by resolvers wishing to pass
// information to grpclb via resolver.State Attributes.
package state
import (
"google.golang.org/grpc/resolver"
)
// keyType is the key to use for storing State in Attributes.
type keyType string
const key = keyType("grpc.grpclb.state")
// State contains gRPCLB-relevant data passed from the name resolver.
type State struct {
// BalancerAddresses contains the remote load balancer address(es). If
// set, overrides any resolver-provided addresses with Type of GRPCLB.
BalancerAddresses []resolver.Address
}
// Set returns a copy of the provided state with attributes containing s. s's
// data should not be mutated after calling Set.
func Set(state resolver.State, s *State) resolver.State {
state.Attributes = state.Attributes.WithValues(key, s)
return state
}
// Get returns the grpclb State in the resolver.State, or nil if not present.
// The returned data should not be mutated.
func Get(state resolver.State) *State {
s, _ := state.Attributes.Value(key).(*State)
return s
}

View File

@ -33,11 +33,9 @@ import (
// Name is the name of round_robin balancer.
const Name = "round_robin"
var logger = grpclog.Component("roundrobin")
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
}
func init() {
@ -46,10 +44,10 @@ func init() {
type rrPickerBuilder struct{}
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
logger.Infof("roundrobinPicker: newPicker called with info: %v", info)
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
if len(info.ReadySCs) == 0 {
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
}
var scs []balancer.SubConn
for sc := range info.ReadySCs {

View File

@ -74,7 +74,11 @@ func (ccb *ccBalancerWrapper) watcher() {
}
ccb.balancerMu.Lock()
su := t.(*scStateUpdate)
ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
} else {
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
}
ccb.balancerMu.Unlock()
case <-ccb.done.Done():
}
@ -119,13 +123,19 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock()
return ccb.balancer.UpdateClientConnState(*ccs)
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
return ub.UpdateClientConnState(*ccs)
}
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
return nil
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ccb.balancerMu.Lock()
ccb.balancer.ResolverError(err)
ub.ResolverError(err)
ccb.balancerMu.Unlock()
}
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@ -163,6 +173,21 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePicker(p)
ccb.cc.csMgr.updateState(s)
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
ccb.mu.Lock()
defer ccb.mu.Unlock()
@ -174,7 +199,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePicker(s.Picker)
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
ccb.cc.csMgr.updateState(s.ConnectivityState)
}
@ -220,7 +245,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
ac, err := cc.newAddrConn(addrs, opts)
if err != nil {
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return
}
acbw.ac = ac

View File

@ -1,15 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc/binlog/v1/binarylog.proto
// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
package grpc_binarylog_v1
package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
duration "github.com/golang/protobuf/ptypes/duration"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import duration "github.com/golang/protobuf/ptypes/duration"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -20,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Enumerates the type of event
// Note the terminology is different from the RPC semantics
@ -66,7 +64,6 @@ var GrpcLogEntry_EventType_name = map[int32]string{
6: "EVENT_TYPE_SERVER_TRAILER",
7: "EVENT_TYPE_CANCEL",
}
var GrpcLogEntry_EventType_value = map[string]int32{
"EVENT_TYPE_UNKNOWN": 0,
"EVENT_TYPE_CLIENT_HEADER": 1,
@ -81,9 +78,8 @@ var GrpcLogEntry_EventType_value = map[string]int32{
func (x GrpcLogEntry_EventType) String() string {
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
}
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{0, 0}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
}
// Enumerates the entity that generates the log entry
@ -100,7 +96,6 @@ var GrpcLogEntry_Logger_name = map[int32]string{
1: "LOGGER_CLIENT",
2: "LOGGER_SERVER",
}
var GrpcLogEntry_Logger_value = map[string]int32{
"LOGGER_UNKNOWN": 0,
"LOGGER_CLIENT": 1,
@ -110,9 +105,8 @@ var GrpcLogEntry_Logger_value = map[string]int32{
func (x GrpcLogEntry_Logger) String() string {
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
}
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{0, 1}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
}
type Address_Type int32
@ -134,7 +128,6 @@ var Address_Type_name = map[int32]string{
2: "TYPE_IPV6",
3: "TYPE_UNIX",
}
var Address_Type_value = map[string]int32{
"TYPE_UNKNOWN": 0,
"TYPE_IPV4": 1,
@ -145,9 +138,8 @@ var Address_Type_value = map[string]int32{
func (x Address_Type) String() string {
return proto.EnumName(Address_Type_name, int32(x))
}
func (Address_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{7, 0}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
}
// Log entry we store in binary logs
@ -193,17 +185,16 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
func (*GrpcLogEntry) ProtoMessage() {}
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{0}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
}
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
}
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
}
func (m *GrpcLogEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_GrpcLogEntry.Merge(m, src)
func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
}
func (m *GrpcLogEntry) XXX_Size() int {
return xxx_messageInfo_GrpcLogEntry.Size(m)
@ -326,9 +317,9 @@ func (m *GrpcLogEntry) GetPeer() *Address {
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} {
return []interface{}{
// XXX_OneofFuncs is for the internal use of the proto package.
func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
@ -336,6 +327,108 @@ func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} {
}
}
func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ClientHeader); err != nil {
return err
}
case *GrpcLogEntry_ServerHeader:
b.EncodeVarint(7<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ServerHeader); err != nil {
return err
}
case *GrpcLogEntry_Message:
b.EncodeVarint(8<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Message); err != nil {
return err
}
case *GrpcLogEntry_Trailer:
b.EncodeVarint(9<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Trailer); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
}
return nil
}
func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*GrpcLogEntry)
switch tag {
case 6: // payload.client_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ClientHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ClientHeader{msg}
return true, err
case 7: // payload.server_header
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ServerHeader)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_ServerHeader{msg}
return true, err
case 8: // payload.message
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Message)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Message{msg}
return true, err
case 9: // payload.trailer
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Trailer)
err := b.DecodeMessage(msg)
m.Payload = &GrpcLogEntry_Trailer{msg}
return true, err
default:
return false, nil
}
}
func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
m := msg.(*GrpcLogEntry)
// payload
switch x := m.Payload.(type) {
case *GrpcLogEntry_ClientHeader:
s := proto.Size(x.ClientHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_ServerHeader:
s := proto.Size(x.ServerHeader)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Message:
s := proto.Size(x.Message)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *GrpcLogEntry_Trailer:
s := proto.Size(x.Trailer)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ClientHeader struct {
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
@ -360,17 +453,16 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} }
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
func (*ClientHeader) ProtoMessage() {}
func (*ClientHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{1}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
}
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
}
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
}
func (m *ClientHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientHeader.Merge(m, src)
func (dst *ClientHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ClientHeader.Merge(dst, src)
}
func (m *ClientHeader) XXX_Size() int {
return xxx_messageInfo_ClientHeader.Size(m)
@ -421,17 +513,16 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} }
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
func (*ServerHeader) ProtoMessage() {}
func (*ServerHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{2}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
}
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
}
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
}
func (m *ServerHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerHeader.Merge(m, src)
func (dst *ServerHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerHeader.Merge(dst, src)
}
func (m *ServerHeader) XXX_Size() int {
return xxx_messageInfo_ServerHeader.Size(m)
@ -469,17 +560,16 @@ func (m *Trailer) Reset() { *m = Trailer{} }
func (m *Trailer) String() string { return proto.CompactTextString(m) }
func (*Trailer) ProtoMessage() {}
func (*Trailer) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{3}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
}
func (m *Trailer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Trailer.Unmarshal(m, b)
}
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
}
func (m *Trailer) XXX_Merge(src proto.Message) {
xxx_messageInfo_Trailer.Merge(m, src)
func (dst *Trailer) XXX_Merge(src proto.Message) {
xxx_messageInfo_Trailer.Merge(dst, src)
}
func (m *Trailer) XXX_Size() int {
return xxx_messageInfo_Trailer.Size(m)
@ -534,17 +624,16 @@ func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{4}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
}
func (m *Message) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Message.Unmarshal(m, b)
}
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
}
func (m *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(m, src)
func (dst *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(dst, src)
}
func (m *Message) XXX_Size() int {
return xxx_messageInfo_Message.Size(m)
@ -601,17 +690,16 @@ func (m *Metadata) Reset() { *m = Metadata{} }
func (m *Metadata) String() string { return proto.CompactTextString(m) }
func (*Metadata) ProtoMessage() {}
func (*Metadata) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{5}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
}
func (m *Metadata) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metadata.Unmarshal(m, b)
}
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
}
func (m *Metadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metadata.Merge(m, src)
func (dst *Metadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metadata.Merge(dst, src)
}
func (m *Metadata) XXX_Size() int {
return xxx_messageInfo_Metadata.Size(m)
@ -642,17 +730,16 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
func (*MetadataEntry) ProtoMessage() {}
func (*MetadataEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{6}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
}
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
}
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
}
func (m *MetadataEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetadataEntry.Merge(m, src)
func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetadataEntry.Merge(dst, src)
}
func (m *MetadataEntry) XXX_Size() int {
return xxx_messageInfo_MetadataEntry.Size(m)
@ -692,17 +779,16 @@ func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) {
return fileDescriptor_b7972e58de45083a, []int{7}
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
}
func (m *Address) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Address.Unmarshal(m, b)
}
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
}
func (m *Address) XXX_Merge(src proto.Message) {
xxx_messageInfo_Address.Merge(m, src)
func (dst *Address) XXX_Merge(src proto.Message) {
xxx_messageInfo_Address.Merge(dst, src)
}
func (m *Address) XXX_Size() int {
return xxx_messageInfo_Address.Size(m)
@ -735,9 +821,6 @@ func (m *Address) GetIpPort() uint32 {
}
func init() {
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
@ -746,67 +829,72 @@ func init() {
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
}
func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) }
func init() {
proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
}
var fileDescriptor_b7972e58de45083a = []byte{
// 904 bytes of a gzipped FileDescriptorProto
var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
// 900 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09,
0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda,
0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59,
0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c,
0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8,
0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52,
0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2,
0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0,
0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22,
0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54,
0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03,
0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f,
0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee,
0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1,
0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d,
0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94,
0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf,
0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9,
0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca,
0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0,
0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c,
0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c,
0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a,
0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e,
0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd,
0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1,
0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe,
0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1,
0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05,
0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71,
0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28,
0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37,
0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a,
0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38,
0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64,
0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a,
0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15,
0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b,
0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b,
0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f,
0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55,
0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72,
0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79,
0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41,
0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f,
0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95,
0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32,
0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a,
0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10,
0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4,
0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b,
0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80,
0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed,
0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3,
0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00,
0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
0xd4, 0x07, 0x00, 0x00,
}

View File

@ -68,6 +68,8 @@ var (
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@ -149,17 +151,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if channelz.IsOn() {
if cc.dopts.channelzParentID != 0 {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Channel Created",
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
},
})
} else {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
channelz.Info(logger, cc.channelzID, "Channel Created")
channelz.Info(cc.channelzID, "Channel Created")
}
cc.csMgr.channelzID = cc.channelzID
}
@ -215,14 +217,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
defer func() {
select {
case <-ctx.Done():
switch {
case ctx.Err() == err:
conn = nil
case err == nil || !cc.dopts.returnLastError:
conn, err = nil, ctx.Err()
default:
conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
}
default:
}
}()
@ -245,14 +240,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
// Determine the resolver to use.
cc.parsedTarget = grpcutil.ParseTarget(cc.target)
unixScheme := strings.HasPrefix(cc.target, "unix:")
channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
if resolverBuilder == nil {
// If resolver builder is still nil, the parsed target's scheme is
// not registered. Fallback to default resolver and set Endpoint to
// the original target.
channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
cc.parsedTarget = resolver.Target{
Scheme: resolver.GetDefaultScheme(),
Endpoint: target,
@ -268,8 +262,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.authority != "" {
cc.authority = cc.dopts.authority
} else if unixScheme {
cc.authority = "localhost"
} else {
// Use endpoint from "scheme://authority/endpoint" as the default
// authority for ClientConn.
@ -319,7 +311,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if s == connectivity.Ready {
break
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
if err = cc.connectionError(); err != nil {
if err = cc.blockingpicker.connectionError(); err != nil {
terr, ok := err.(interface {
Temporary() bool
})
@ -330,9 +322,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled.
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
return nil, err
}
return nil, ctx.Err()
}
}
@ -425,7 +414,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
return
}
csm.state = state
channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
@ -501,18 +490,11 @@ type ClientConn struct {
channelzID int64 // channelz unique identification number
czData *channelzData
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@ -527,11 +509,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@ -686,9 +664,9 @@ func (cc *ClientConn) switchBalancer(name string) {
return
}
channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name)
if cc.dopts.balancerBuilder != nil {
channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
return
}
if cc.balancerWrapper != nil {
@ -697,11 +675,11 @@ func (cc *ClientConn) switchBalancer(name string) {
builder := balancer.Get(name)
if builder == nil {
channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder()
} else {
channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name)
}
cc.curBalancerName = builder.Name()
@ -742,12 +720,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
}
if channelz.IsOn() {
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Created",
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
},
})
}
@ -781,11 +759,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
}
// Target returns the target string of the ClientConn.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
func (cc *ClientConn) Target() string {
return cc.target
}
@ -844,7 +818,7 @@ func (ac *addrConn) connect() error {
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
ac.mu.Lock()
defer ac.mu.Unlock()
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
@ -864,7 +838,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
break
}
}
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound {
ac.addrs = addrs
}
@ -875,10 +849,9 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the service's default
// config under the service (i.e /service/) and then for the default for all services (empty string).
//
// If there is a default MethodConfig for the service, we return it.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
// the service, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
@ -887,14 +860,12 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
if cc.sc == nil {
return MethodConfig{}
}
if m, ok := cc.sc.Methods[method]; ok {
return m
}
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
if m, ok := cc.sc.Methods[method[:i+1]]; ok {
return m
m = cc.sc.Methods[method[:i+1]]
}
return cc.sc.Methods[""]
return m
}
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@ -986,10 +957,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
// However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock()
conns := cc.conns
@ -1033,15 +1001,15 @@ func (cc *ClientConn) Close() error {
if channelz.IsOn() {
ted := &channelz.TraceEventDesc{
Desc: "Channel Deleted",
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
}
if cc.dopts.channelzParentID != 0 {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
channelz.AddTraceEvent(cc.channelzID, 0, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(cc.channelzID)
@ -1085,7 +1053,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
return
}
ac.state = s
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s)
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
@ -1222,7 +1190,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
}
ac.mu.Unlock()
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
if err == nil {
@ -1231,7 +1199,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
if firstConnErr == nil {
firstConnErr = err
}
ac.cc.updateConnectionError(err)
ac.cc.blockingpicker.updateConnectionError(err)
}
// Couldn't connect to any address.
@ -1246,9 +1214,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
onCloseCalled := make(chan struct{})
reconnect := grpcsync.NewEvent()
authority := ac.cc.authority
// addr.ServerName takes precedent over ClientConn authority, if present.
if addr.ServerName == "" {
addr.ServerName = ac.cc.authority
if addr.ServerName != "" {
authority = addr.ServerName
}
target := transport.TargetInfo{
Addr: addr.Addr,
Metadata: addr.Metadata,
Authority: authority,
}
once := sync.Once{}
@ -1294,10 +1269,10 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
copts.ChannelzParentID = ac.channelzID
}
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose)
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
if err != nil {
// newTr is either nil, or closed.
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
return nil, nil, err
}
@ -1305,7 +1280,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
case <-time.After(time.Until(connectDeadline)):
// We didn't get the preface in time.
newTr.Close()
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
return nil, nil, errors.New("timed out waiting for server handshake")
case <-prefaceReceived:
// We got the preface - huzzah! things are good.
@ -1322,7 +1297,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
//
// LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
// 2. internal.HealthCheckFunc is set by importing the grpc/health package
// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
// 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it
//
@ -1352,7 +1327,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.")
return
}
@ -1382,9 +1357,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
}
}
}()
@ -1449,12 +1424,12 @@ func (ac *addrConn) tearDown(err error) {
ac.mu.Lock()
}
if channelz.IsOn() {
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Deleted",
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
@ -1557,15 +1532,3 @@ func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
}
return resolver.Get(scheme)
}
func (cc *ClientConn) updateConnectionError(err error) {
cc.lceMu.Lock()
cc.lastConnectionError = err
cc.lceMu.Unlock()
}
func (cc *ClientConn) connectionError() error {
cc.lceMu.Lock()
defer cc.lceMu.Unlock()
return cc.lastConnectionError
}

View File

@ -33,9 +33,6 @@ const (
OK Code = 0
// Canceled indicates the operation was canceled (typically by the caller).
//
// The gRPC framework will generate this error code when cancellation
// is requested.
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
@ -43,17 +40,12 @@ const (
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
//
// The gRPC framework will generate this error code in the above two
// mentioned cases.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
//
// This error code will not be generated by the gRPC framework.
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
@ -61,21 +53,14 @@ const (
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
//
// The gRPC framework will generate this error code when the deadline is
// exceeded.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
//
// This error code will not be generated by the gRPC framework.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
//
// This error code will not be generated by the gRPC framework.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
@ -84,17 +69,10 @@ const (
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
//
// This error code will not be generated by the gRPC core framework,
// but expect authentication middleware to use it.
PermissionDenied Code = 7
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
//
// This error code will be generated by the gRPC framework in
// out-of-memory and server overload situations, or when a message is
// larger than the configured maximum size.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
@ -116,8 +94,6 @@ const (
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
//
// This error code will not be generated by the gRPC framework.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
@ -126,8 +102,6 @@ const (
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
//
// This error code will not be generated by the gRPC framework.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
@ -145,26 +119,15 @@ const (
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
//
// This error code will not be generated by the gRPC framework.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
//
// This error code will be generated by the gRPC framework. Most
// commonly, you will see this error code when a method implementation
// is missing on the server. It can also be generated for unknown
// compression algorithms or a disagreement as to whether an RPC should
// be streaming.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
//
// This error code will be generated by the gRPC framework in several
// internal error conditions.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
@ -174,22 +137,13 @@ const (
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
//
// This error code will be generated by the gRPC framework during
// abrupt shutdown of a server process or network connection.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
//
// This error code will not be generated by the gRPC framework.
DataLoss Code = 15
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
//
// The gRPC framework will generate this error code when the
// authentication metadata is invalid or a Credentials callback fails,
// but also expect authentication middleware to generate it.
Unauthenticated Code = 16
_maxCode = 17

View File

@ -22,11 +22,11 @@
package connectivity
import (
"context"
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("core")
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
@ -44,7 +44,7 @@ func (s State) String() string {
case Shutdown:
return "SHUTDOWN"
default:
logger.Errorf("unknown connectivity state: %d", s)
grpclog.Errorf("unknown connectivity state: %d", s)
return "Invalid-State"
}
}
@ -61,3 +61,13 @@ const (
// Shutdown indicates the ClientConn has started shutting down.
Shutdown
)
// Reporter reports the connectivity states.
type Reporter interface {
// CurrentState returns the current state of the reporter.
CurrentState() State
// WaitForStateChange blocks until the reporter's state is different from the given state,
// and returns true.
// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
WaitForStateChange(context.Context, State) bool
}

View File

@ -29,7 +29,6 @@ import (
"net"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/internal"
)
@ -58,11 +57,9 @@ type PerRPCCredentials interface {
type SecurityLevel int
const (
// Invalid indicates an invalid security level.
// The zero SecurityLevel value is invalid for backward compatibility.
Invalid SecurityLevel = iota
// NoSecurity indicates a connection is insecure.
NoSecurity
// The zero SecurityLevel value is invalid for backward compatibility.
NoSecurity SecurityLevel = iota + 1
// IntegrityOnly indicates a connection only provides integrity protection.
IntegrityOnly
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
@ -127,18 +124,15 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
// ClientHandshake does the authentication handshake specified by the
// corresponding authentication protocol on rawConn for clients. It returns
// the authenticated connection and the corresponding auth information
// about the connection. The auth information should embed CommonAuthInfo
// to return additional information about the credentials. Implementations
// must use the provided context to implement timely cancellation. gRPC
// will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
// returned error is a wrapper error, implementations should make sure that
// ClientHandshake does the authentication handshake specified by the corresponding
// authentication protocol on rawConn for clients. It returns the authenticated
// connection and the corresponding auth information about the connection.
// The auth information should embed CommonAuthInfo to return additional information about
// the credentials. Implementations must use the provided context to implement timely cancellation.
// gRPC will try to reconnect if the error returned is a temporary error
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
// Additionally, ClientHandshakeInfo data will be available via the context
// passed to this call.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
@ -199,31 +193,6 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
return
}
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
// balancer etc. Individual credential implementations control the actual
// format of the data that they are willing to receive.
//
// This API is experimental.
type ClientHandshakeInfo struct {
// Attributes contains the attributes for the address. It could be provided
// by the gRPC, resolver, balancer etc.
Attributes *attributes.Attributes
}
// clientHandshakeInfoKey is a struct used as the key to store
// ClientHandshakeInfo in a context.
type clientHandshakeInfoKey struct{}
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
// in ctx.
//
// This API is experimental.
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo)
return chi
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
@ -239,7 +208,7 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
}
if ci, ok := ri.AuthInfo.(internalInfo); ok {
// CommonAuthInfo.SecurityLevel has an invalid value.
if ci.GetCommonAuthInfo().SecurityLevel == Invalid {
if ci.GetCommonAuthInfo().SecurityLevel == 0 {
return nil
}
if ci.GetCommonAuthInfo().SecurityLevel < level {
@ -254,9 +223,6 @@ func init() {
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
return context.WithValue(ctx, requestInfoKey{}, ri)
}
internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context {
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
}
}
// ChannelzSecurityInfo defines the interface that security protocols should implement

View File

@ -25,9 +25,8 @@ import (
"fmt"
"io/ioutil"
"net"
"net/url"
credinternal "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/credentials/internal"
)
// TLSInfo contains the auth information for a TLS authenticated connection.
@ -35,8 +34,6 @@ import (
type TLSInfo struct {
State tls.ConnectionState
CommonAuthInfo
// This API is experimental.
SPIFFEID *url.URL
}
// AuthType returns the type of TLSInfo as a string.
@ -72,7 +69,7 @@ func (c tlsCreds) Info() ProtocolInfo {
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := credinternal.CloneTLSConfig(c.config)
cfg := cloneTLSConfig(c.config)
if cfg.ServerName == "" {
serverName, _, err := net.SplitHostPort(authority)
if err != nil {
@ -97,17 +94,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
conn.Close()
return nil, nil, ctx.Err()
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@ -116,17 +103,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
conn.Close()
return nil, nil, err
}
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
}
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
if id != nil {
tlsInfo.SPIFFEID = id
}
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
@ -138,10 +115,23 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil
}
const alpnProtoStrH2 = "h2"
func appendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
tc := &tlsCreds{cloneTLSConfig(c)}
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
return tc
}
@ -195,10 +185,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string
@ -231,3 +218,18 @@ var cipherSuiteLookup = map[uint16]string{
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
}
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -27,6 +27,7 @@ import (
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
internalbackoff "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
@ -49,14 +50,14 @@ type dialOptions struct {
dc Decompressor
bs internalbackoff.Strategy
block bool
returnLastError bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
authority string
copts transport.ConnectOptions
callOptions []CallOption
// This is used by WithBalancerName dial option.
// This is used by v1 balancer dial option WithBalancer to support v1
// balancer, and also by WithBalancerName dial option.
balancerBuilder balancer.Builder
channelzParentID int64
disableServiceConfig bool
@ -82,10 +83,7 @@ type DialOption interface {
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type EmptyDialOption struct{}
func (EmptyDialOption) apply(*dialOptions) {}
@ -201,6 +199,19 @@ func WithDecompressor(dc Decompressor) DialOption {
})
}
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
// Name resolver will be ignored if this DialOption is specified.
//
// Deprecated: use the new balancer APIs in balancer package and
// WithBalancerName. Will be removed in a future 1.x release.
func WithBalancer(b Balancer) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.balancerBuilder = &balancerWrapperBuilder{
b: b,
}
})
}
// WithBalancerName sets the balancer that the ClientConn will be initialized
// with. Balancer registered with balancerName will be used. This function
// panics if no balancer was registered by balancerName.
@ -241,10 +252,7 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
// using the backoff.DefaultConfig as a base, in cases where you want to
// override only a subset of the backoff configuration.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithConnectParams(p ConnectParams) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.bs = internalbackoff.Exponential{Config: p.Backoff}
@ -291,22 +299,6 @@ func WithBlock() DialOption {
})
}
// WithReturnConnectionError returns a DialOption which makes the client connection
// return a string containing both the last connection error that occurred and
// the context.DeadlineExceeded error.
// Implies WithBlock()
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithReturnConnectionError() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
o.returnLastError = true
})
}
// WithInsecure returns a DialOption which disables transport security for this
// ClientConn. Note that transport security is required unless WithInsecure is
// set.
@ -319,10 +311,7 @@ func WithInsecure() DialOption {
// WithNoProxy returns a DialOption which disables the use of proxies for this
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithNoProxy() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.withProxy = false
@ -350,10 +339,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
// the ClientConn.WithCreds. This should not be used together with
// WithTransportCredentials.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is experimental.
func WithCredentialsBundle(b credentials.Bundle) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.CredsBundle = b
@ -418,10 +404,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
// FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock().
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.FailOnNonTempDialError = f
@ -440,7 +423,7 @@ func WithUserAgent(s string) DialOption {
// for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
kp.Time = internal.KeepaliveMinPingTime
}
return newFuncDialOption(func(o *dialOptions) {
@ -476,7 +459,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
}
// WithChainStreamInterceptor returns a DialOption that specifies the chained
// interceptor for streaming RPCs. The first interceptor will be the outer most,
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
@ -499,10 +482,7 @@ func WithAuthority(a string) DialOption {
// current ClientConn's parent. This function is used in nested channel creation
// (e.g. grpclb dial).
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithChannelzParentID(id int64) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id
@ -528,10 +508,7 @@ func WithDisableServiceConfig() DialOption {
// 2. Resolver does not return a service config or if the resolver returns an
// invalid service config.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s
@ -547,10 +524,7 @@ func WithDefaultServiceConfig(s string) DialOption {
// default in the future. Until then, it may be enabled by setting the
// environment variable "GRPC_GO_RETRY" to "on".
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithDisableRetry() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableRetry = true
@ -568,10 +542,7 @@ func WithMaxHeaderListSize(s uint32) DialOption {
// WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithDisableHealthCheck() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableHealthCheck = true
@ -627,10 +598,7 @@ func withResolveNowBackoff(f func(int) time.Duration) DialOption {
// resolver.Register. They will be matched against the scheme used for the
// current Dial only, and will take precedence over the global registry.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func WithResolvers(rs ...resolver.Builder) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.resolvers = append(o.resolvers, rs...)

View File

@ -16,8 +16,6 @@
*
*/
//go:generate ./regenerate.sh
/*
Package grpc implements an RPC system called gRPC.

View File

@ -19,10 +19,7 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
// Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
// This package is EXPERIMENTAL.
package encoding
import (
@ -49,15 +46,10 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
// If a Compressor implements
// EXPERIMENTAL: if a Compressor implements
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
// to determine the size of the buffer allocated for the result of decompression.
// Return -1 to indicate unknown size.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
}
var registeredCompressor = make(map[string]Compressor)

View File

@ -6,9 +6,9 @@ require (
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
github.com/envoyproxy/go-control-plane v0.9.4
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1
github.com/golang/protobuf v1.3.3
github.com/google/go-cmp v0.4.0
github.com/google/uuid v1.1.2
github.com/google/go-cmp v0.2.0
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a

View File

@ -23,10 +23,6 @@ github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -54,8 +50,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

View File

@ -1,117 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"fmt"
"google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
type componentData struct {
name string
}
var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warning(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Error(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatal(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) Infof(format string, args ...interface{}) {
c.InfoDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Warningf(format string, args ...interface{}) {
c.WarningDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Errorf(format string, args ...interface{}) {
c.ErrorDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Fatalf(format string, args ...interface{}) {
c.FatalDepth(1, fmt.Sprintf(format, args...))
}
func (c *componentData) Infoln(args ...interface{}) {
c.InfoDepth(1, args...)
}
func (c *componentData) Warningln(args ...interface{}) {
c.WarningDepth(1, args...)
}
func (c *componentData) Errorln(args ...interface{}) {
c.ErrorDepth(1, args...)
}
func (c *componentData) Fatalln(args ...interface{}) {
c.FatalDepth(1, args...)
}
func (c *componentData) V(l int) bool {
return V(l)
}
// Component creates a new component and returns it for logging. If a component
// with the name already exists, nothing will be created and it will be
// returned. SetLoggerV2 will panic if it is called with a logger created by
// Component.
func Component(componentName string) DepthLoggerV2 {
if cData, ok := cache[componentName]; ok {
return cData
}
c := &componentData{componentName}
cache[componentName] = c
return c
}

View File

@ -67,9 +67,6 @@ type LoggerV2 interface {
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
@ -204,12 +201,8 @@ func (g *loggerT) V(l int) bool {
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.

View File

@ -27,11 +27,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
@ -39,11 +35,7 @@ type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on

View File

@ -25,7 +25,6 @@ import (
"os"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcutil"
)
// Logger is the global binary logger. It can be used to get binary logger for
@ -40,8 +39,6 @@ type Logger interface {
// It is used to get a methodLogger for each individual method.
var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
// SetLogger sets the binarg logger.
//
// Only call this at init time.
@ -149,9 +146,9 @@ func (l *logger) setBlacklist(method string) error {
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
s, m, err := grpcutil.ParseMethod(methodName)
s, m, err := parseMethodName(methodName)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
if ml, ok := l.methods[s+"/"+m]; ok {

View File

@ -24,6 +24,8 @@ import (
"regexp"
"strconv"
"strings"
"google.golang.org/grpc/grpclog"
)
// NewLoggerFromConfigString reads the string and build a logger. It can be used
@ -50,7 +52,7 @@ func NewLoggerFromConfigString(s string) Logger {
methods := strings.Split(s, ",")
for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
grpclogLogger.Warningf("failed to parse binary log config: %v", err)
grpclog.Warningf("failed to parse binary log config: %v", err)
return nil
}
}

View File

@ -27,6 +27,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@ -65,7 +66,7 @@ func newMethodLogger(h, m uint64) *MethodLogger {
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
sink: DefaultSink, // TODO(blog): make it plugable.
sink: defaultSink, // TODO(blog): make it plugable.
}
}
@ -218,12 +219,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
@ -258,12 +259,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
ret := &pb.GrpcLogEntry{
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
@ -314,7 +315,7 @@ type ServerTrailer struct {
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
grpclog.Info("binarylogging: error in trailer is not a status error")
}
var (
detailsBytes []byte
@ -324,7 +325,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
ret := &pb.GrpcLogEntry{

View File

@ -21,23 +21,32 @@ package binarylog
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
"github.com/golang/protobuf/proto"
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/grpclog"
)
var (
// DefaultSink is the sink where the logs will be written to. It's exported
// for the binarylog package to update.
DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
)
// Sink writes log entry into the binary log sink.
// SetDefaultSink sets the sink where binary logs will be written to.
//
// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
// Not thread safe. Only set during initialization.
func SetDefaultSink(s Sink) {
if defaultSink != nil {
defaultSink.Close()
}
defaultSink = s
}
// Sink writes log entry into the binary log sink.
type Sink interface {
// Write will be called to write the log entry into the sink.
//
@ -58,7 +67,7 @@ func (ns *noopSink) Close() error { return nil }
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
func newWriterSink(w io.Writer) Sink {
func newWriterSink(w io.Writer) *writerSink {
return &writerSink{out: w}
}
@ -69,7 +78,7 @@ type writerSink struct {
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err)
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
}
hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@ -84,17 +93,17 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
func (ws *writerSink) Close() error { return nil }
type bufferedSink struct {
type bufWriteCloserSink struct {
mu sync.Mutex
closer io.Closer
out Sink // out is built on buf.
out *writerSink // out is built on buf.
buf *bufio.Writer // buf is kept for flush.
writeStartOnce sync.Once
writeTicker *time.Ticker
}
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
// Start the write loop when Write is called.
fs.writeStartOnce.Do(fs.startFlushGoroutine)
fs.mu.Lock()
@ -110,50 +119,44 @@ const (
bufFlushDuration = 60 * time.Second
)
func (fs *bufferedSink) startFlushGoroutine() {
func (fs *bufWriteCloserSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() {
for range fs.writeTicker.C {
fs.mu.Lock()
if err := fs.buf.Flush(); err != nil {
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
}
fs.buf.Flush()
fs.mu.Unlock()
}
}()
}
func (fs *bufferedSink) Close() error {
func (fs *bufWriteCloserSink) Close() error {
if fs.writeTicker != nil {
fs.writeTicker.Stop()
}
fs.mu.Lock()
if err := fs.buf.Flush(); err != nil {
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
}
if err := fs.closer.Close(); err != nil {
grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
}
if err := fs.out.Close(); err != nil {
grpclogLogger.Warningf("failed to close the Sink: %v", err)
}
fs.buf.Flush()
fs.closer.Close()
fs.out.Close()
fs.mu.Unlock()
return nil
}
// NewBufferedSink creates a binary log sink with the given WriteCloser.
//
// Write() marshals the proto message and writes it to the given writer. Each
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// Content is kept in a buffer, and is flushed every 60 seconds.
//
// Close closes the WriteCloser.
func NewBufferedSink(o io.WriteCloser) Sink {
func newBufWriteCloserSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o)
return &bufferedSink{
return &bufWriteCloserSink{
closer: o,
out: newWriterSink(bufW),
buf: bufW,
}
}
// NewTempFileSink creates a temp file and returns a Sink that writes to this
// file.
func NewTempFileSink() (Sink, error) {
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
return newBufWriteCloserSink(tempFile), nil
}

View File

@ -30,7 +30,7 @@ import (
"sync/atomic"
"time"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpclog"
)
const (
@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a SubChannel's parent id cannot be 0")
grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
return 0
}
id := idGen.genID()
@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 {
// this listen socket.
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a ListenSocket's parent id cannot be 0")
grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
// this normal socket.
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a NormalSocket's parent id cannot be 0")
grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
return 0
}
id := idGen.genID()
@ -294,15 +294,17 @@ type TraceEventDesc struct {
}
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
for d := desc; d != nil; d = d.Parent {
switch d.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, d.Desc)
case CtUNKNOWN:
grpclog.InfoDepth(depth+1, d.Desc)
case CtINFO:
grpclog.InfoDepth(depth+1, d.Desc)
case CtWarning:
l.WarningDepth(depth+1, d.Desc)
grpclog.WarningDepth(depth+1, d.Desc)
case CtError:
l.ErrorDepth(depth+1, d.Desc)
grpclog.ErrorDepth(depth+1, d.Desc)
}
}
if getMaxTraceEntry() == 0 {

View File

@ -21,82 +21,80 @@ package channelz
import (
"fmt"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpclog"
)
var logger = grpclog.Component("channelz")
// Info logs and adds a trace event if channelz is on.
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
// Info logs through grpclog.Info and adds a trace event if channelz is on.
func Info(id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtInfo,
Severity: CtINFO,
})
} else {
l.InfoDepth(1, args...)
grpclog.InfoDepth(1, args...)
}
}
// Infof logs and adds a trace event if channelz is on.
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
func Infof(id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtInfo,
Severity: CtINFO,
})
} else {
l.InfoDepth(1, msg)
grpclog.InfoDepth(1, msg)
}
}
// Warning logs and adds a trace event if channelz is on.
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
func Warning(id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
} else {
l.WarningDepth(1, args...)
grpclog.WarningDepth(1, args...)
}
}
// Warningf logs and adds a trace event if channelz is on.
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
func Warningf(id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtWarning,
})
} else {
l.WarningDepth(1, msg)
grpclog.WarningDepth(1, msg)
}
}
// Error logs and adds a trace event if channelz is on.
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
// Error logs through grpclog.Error and adds a trace event if channelz is on.
func Error(id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
} else {
l.ErrorDepth(1, args...)
grpclog.ErrorDepth(1, args...)
}
}
// Errorf logs and adds a trace event if channelz is on.
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
func Errorf(id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
AddTraceEvent(id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtError,
})
} else {
l.ErrorDepth(1, msg)
grpclog.ErrorDepth(1, msg)
}
}

View File

@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
)
// entry represents a node in the channelz database.
@ -59,17 +60,17 @@ func (d *dummyEntry) addChild(id int64, e entry) {
// the addrConn will create a new transport. And when registering the new transport in
// channelz, its parent addrConn could have already been torn down and deleted
// from channelz tracking, and thus reach the code here.
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
}
func (d *dummyEntry) deleteChild(id int64) {
// It is possible for a normal program to reach here under race condition.
// Refer to the example described in addChild().
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
}
func (d *dummyEntry) triggerDelete() {
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
}
func (*dummyEntry) deleteSelfIfReady() {
@ -214,7 +215,7 @@ func (c *channel) addChild(id int64, e entry) {
case *channel:
c.nestedChans[id] = v.refName
default:
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
}
}
@ -325,7 +326,7 @@ func (sc *subChannel) addChild(id int64, e entry) {
if v, ok := e.(*normalSocket); ok {
sc.sockets[id] = v.refName
} else {
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
}
}
@ -492,11 +493,11 @@ type listenSocket struct {
}
func (ls *listenSocket) addChild(id int64, e entry) {
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
}
func (ls *listenSocket) deleteChild(id int64) {
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
}
func (ls *listenSocket) triggerDelete() {
@ -505,7 +506,7 @@ func (ls *listenSocket) triggerDelete() {
}
func (ls *listenSocket) deleteSelfIfReady() {
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
}
func (ls *listenSocket) getParentID() int64 {
@ -521,11 +522,11 @@ type normalSocket struct {
}
func (ns *normalSocket) addChild(id int64, e entry) {
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
}
func (ns *normalSocket) deleteChild(id int64) {
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
}
func (ns *normalSocket) triggerDelete() {
@ -534,7 +535,7 @@ func (ns *normalSocket) triggerDelete() {
}
func (ns *normalSocket) deleteSelfIfReady() {
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
}
func (ns *normalSocket) getParentID() int64 {
@ -593,7 +594,7 @@ func (s *server) addChild(id int64, e entry) {
case *listenSocket:
s.listenSockets[id] = v.refName
default:
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
}
}
@ -672,10 +673,10 @@ func (c *channelTrace) clear() {
type Severity int
const (
// CtUnknown indicates unknown severity of a trace event.
CtUnknown Severity = iota
// CtInfo indicates info level severity of a trace event.
CtInfo
// CtUNKNOWN indicates unknown severity of a trace event.
CtUNKNOWN Severity = iota
// CtINFO indicates info level severity of a trace event.
CtINFO
// CtWarning indicates warning level severity of a trace event.
CtWarning
// CtError indicates error level severity of a trace event.

View File

@ -22,6 +22,8 @@ package channelz
import (
"sync"
"google.golang.org/grpc/grpclog"
)
var once sync.Once
@ -37,6 +39,6 @@ type SocketOptionData struct {
// Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.")
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
})
}

View File

@ -1,67 +0,0 @@
// +build !appengine
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package credentials defines APIs for parsing SPIFFE ID.
//
// All APIs in this package are experimental.
package credentials
import (
"crypto/tls"
"net/url"
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("credentials")
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
// is invalid, return nil with warning.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
return nil
}
var spiffeID *url.URL
for _, uri := range state.PeerCertificates[0].URIs {
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
continue
}
// From this point, we assume the uri is intended for a SPIFFE ID.
if len(uri.String()) > 2048 {
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
return nil
}
if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 {
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
return nil
}
if len(uri.Host) > 255 {
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
return nil
}
// A valid SPIFFE certificate can only have exactly one URI SAN field.
if len(state.PeerCertificates[0].URIs) > 1 {
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
return nil
}
spiffeID = uri
}
return spiffeID
}

View File

@ -1,31 +0,0 @@
// +build appengine
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"crypto/tls"
"net/url"
)
// SPIFFEIDFromState is a no-op for appengine builds.
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
return nil
}

View File

@ -1,60 +0,0 @@
// +build !appengine
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"net"
"syscall"
)
type sysConn = syscall.Conn
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
// SyscallConn() (the method in interface syscall.Conn) is explicitly
// implemented on this type,
//
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
// help here).
type syscallConn struct {
net.Conn
// sysConn is a type alias of syscall.Conn. It's necessary because the name
// `Conn` collides with `net.Conn`.
sysConn
}
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
// will be used for read/write.
//
// This function returns newConn if rawConn doesn't implement syscall.Conn.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
sysConn, ok := rawConn.(syscall.Conn)
if !ok {
return newConn
}
return &syscallConn{
Conn: newConn,
sysConn: sysConn,
}
}

View File

@ -1,30 +0,0 @@
// +build appengine
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import (
"net"
)
// WrapSyscallConn returns newConn on appengine.
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
return newConn
}

View File

@ -1,50 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package credentials
import "crypto/tls"
const alpnProtoStrH2 = "h2"
// AppendH2ToNextProtos appends h2 to next protos.
func AppendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// CloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
//
// If cfg is nil, a new zero tls.Config is returned.
//
// TODO: inline this function if possible.
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View File

@ -34,5 +34,5 @@ var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
)

View File

@ -19,10 +19,6 @@
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
import (
"os"
)
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
@ -34,7 +30,7 @@ func InfoDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
Logger.Infoln(args...)
Logger.Info(args...)
}
}
@ -43,7 +39,7 @@ func WarningDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
Logger.Warningln(args...)
Logger.Warning(args...)
}
}
@ -52,7 +48,7 @@ func ErrorDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
Logger.Errorln(args...)
Logger.Error(args...)
}
}
@ -61,9 +57,8 @@ func FatalDepth(depth int, args ...interface{}) {
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
Logger.Fatalln(args...)
Logger.Fatal(args...)
}
os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
@ -110,10 +105,7 @@ type LoggerV2 interface {
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type DepthLoggerV2 interface {
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
InfoDepth(depth int, args ...interface{})

View File

@ -18,15 +18,10 @@
package grpclog
import (
"fmt"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
logger DepthLoggerV2
prefix string
}
@ -35,47 +30,34 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
Logger.Infof(format, args...)
}
// Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
WarningDepth(1, fmt.Sprintf(format, args...))
Logger.Warningf(format, args...)
}
// Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
if pl != nil {
format = pl.prefix + format
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
ErrorDepth(1, fmt.Sprintf(format, args...))
Logger.Errorf(format, args...)
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
if !Logger.V(2) {
return
if Logger.V(2) {
pl.Infof(format, args...)
}
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// NewPrefixLogger creates a prefix logger with the given prefix.
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
func NewPrefixLogger(prefix string) *PrefixLogger {
return &PrefixLogger{prefix: prefix}
}

View File

@ -1,63 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"strconv"
"time"
)
const maxTimeoutValue int64 = 100000000 - 1
// div does integer division and round-up the result. Note that this is
// equivalent to (d+r-1)/r but has less chance to overflow.
func div(d, r time.Duration) int64 {
if d%r > 0 {
return int64(d/r + 1)
}
return int64(d / r)
}
// EncodeDuration encodes the duration to the format grpc-timeout header
// accepts.
//
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
func EncodeDuration(t time.Duration) string {
// TODO: This is simplistic and not bandwidth efficient. Improve it.
if t <= 0 {
return "0n"
}
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "n"
}
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "u"
}
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "m"
}
if d := div(t, time.Second); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "S"
}
if d := div(t, time.Minute); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "M"
}
// Note that maxTimeoutValue * time.Hour > MaxInt64.
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
}

View File

@ -1,40 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"context"
"google.golang.org/grpc/metadata"
)
type mdExtraKey struct{}
// WithExtraMetadata creates a new context with incoming md attached.
func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
return context.WithValue(ctx, mdExtraKey{}, md)
}
// ExtraMetadata returns the incoming metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
// Modification should be made to copies of the returned MD.
func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
return
}

View File

@ -1,84 +0,0 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpcutil
import (
"errors"
"strings"
)
// ParseMethod splits service and method from the input. It expects format
// "/service/method".
//
func ParseMethod(methodName string) (service, method string, _ error) {
if !strings.HasPrefix(methodName, "/") {
return "", "", errors.New("invalid method name: should start with /")
}
methodName = methodName[1:]
pos := strings.LastIndex(methodName, "/")
if pos < 0 {
return "", "", errors.New("invalid method name: suffix /method is missing")
}
return methodName[:pos], methodName[pos+1:], nil
}
const baseContentType = "application/grpc"
// ContentSubtype returns the content-subtype for the given content-type. The
// given content-type must be a valid content-type that starts with
// "application/grpc". A content-subtype will follow "application/grpc" after a
// "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details.
//
// If contentType is not a valid content-type for gRPC, the boolean
// will be false, otherwise true. If content-type == "application/grpc",
// "application/grpc+", or "application/grpc;", the boolean will be true,
// but no content-subtype will be returned.
//
// contentType is assumed to be lowercase already.
func ContentSubtype(contentType string) (string, bool) {
if contentType == baseContentType {
return "", true
}
if !strings.HasPrefix(contentType, baseContentType) {
return "", false
}
// guaranteed since != baseContentType and has baseContentType prefix
switch contentType[len(baseContentType)] {
case '+', ';':
// this will return true for "application/grpc+" or "application/grpc;"
// which the previous validContentType function tested to be valid, so we
// just say that no content-subtype is specified in this case
return contentType[len(baseContentType)+1:], true
default:
return "", false
}
}
// ContentType builds full content type with the given sub-type.
//
// contentSubtype is assumed to be lowercase
func ContentType(contentSubtype string) string {
if contentSubtype == "" {
return baseContentType
}
return baseContentType + "+" + contentSubtype
}

View File

@ -25,7 +25,6 @@ import (
"time"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/serviceconfig"
)
var (
@ -41,22 +40,9 @@ var (
// NewRequestInfoContext creates a new context based on the argument context attaching
// the passed in RequestInfo to the new context.
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
// NewClientHandshakeInfoContext returns a copy of the input context with
// the passed in ClientHandshakeInfo struct added to it.
NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context
// ParseServiceConfigForTesting is for creating a fake
// ClientConn for resolver testing only
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
// This function compares the config without rawJSON stripped, in case the
// there's difference in white space.
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
// GetCertificateProviderBuilder returns the registered builder for the
// given name. This is set by package certprovider for use from xDS
// bootstrap code while parsing certificate provider configs in the
// bootstrap file.
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
)
// HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@ -32,7 +32,6 @@ import (
"sync"
"time"
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
@ -44,8 +43,6 @@ import (
// addresses from SRV records. Must not be changed after init time.
var EnableSRVLookups = false
var logger = grpclog.Component("dns")
func init() {
resolver.Register(NewBuilder())
}
@ -254,7 +251,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
}
addr := ip + ":" + strconv.Itoa(int(s.Port))
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
}
}
return newAddrs, nil
@ -274,7 +271,7 @@ func handleDNSError(err error, lookupType string) error {
err = filterError(err)
if err != nil {
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
logger.Info(err)
grpclog.Infoln(err)
}
return err
}
@ -297,7 +294,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
// TXT record must have "grpc_config=" attribute in order to be used as service config.
if !strings.HasPrefix(res, txtAttribute) {
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
// This is not an error; it is the equivalent of not having a service config.
return nil
}
@ -329,15 +326,13 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
return nil, hostErr
}
state := resolver.State{Addresses: addrs}
if len(srv) > 0 {
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
state := &resolver.State{
Addresses: append(addrs, srv...),
}
if !d.disableServiceConfig {
state.ServiceConfig = d.lookupTXT()
}
return &state, nil
return state, nil
}
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@ -423,12 +418,12 @@ func canaryingSC(js string) string {
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
logger.Warningf("dns: error parsing service config json: %v", err)
grpclog.Warningf("dns: error parsing service config json: %v", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
logger.Warningf("dns: error getting client hostname: %v", err)
grpclog.Warningf("dns: error getting client hostname: %v", err)
return ""
}
var sc string

View File

@ -1,106 +0,0 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig contains utility functions to parse service config.
package serviceconfig
import (
"encoding/json"
"fmt"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/grpclog"
externalserviceconfig "google.golang.org/grpc/serviceconfig"
)
var logger = grpclog.Component("core")
// BalancerConfig wraps the name and config associated with one load balancing
// policy. It corresponds to a single entry of the loadBalancingConfig field
// from ServiceConfig.
//
// It implements the json.Unmarshaler interface.
//
// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
type BalancerConfig struct {
Name string
Config externalserviceconfig.LoadBalancingConfig
}
type intermediateBalancerConfig []map[string]json.RawMessage
// UnmarshalJSON implements the json.Unmarshaler interface.
//
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
// config. This method iterates through that list in order, and stops at the
// first policy that is supported.
// - If the config for the first supported policy is invalid, the whole service
// config is invalid.
// - If the list doesn't contain any supported policy, the whole service config
// is invalid.
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
var ir intermediateBalancerConfig
err := json.Unmarshal(b, &ir)
if err != nil {
return err
}
for i, lbcfg := range ir {
if len(lbcfg) != 1 {
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
}
var (
name string
jsonCfg json.RawMessage
)
// Get the key:value pair from the map. We have already made sure that
// the map contains a single entry.
for name, jsonCfg = range lbcfg {
}
builder := balancer.Get(name)
if builder == nil {
// If the balancer is not registered, move on to the next config.
// This is not an error.
continue
}
bc.Name = name
parser, ok := builder.(balancer.ConfigParser)
if !ok {
if string(jsonCfg) != "{}" {
logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
// Stop at this, though the builder doesn't support parsing config.
return nil
}
cfg, err := parser.ParseConfig(jsonCfg)
if err != nil {
return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
}
bc.Config = cfg
return nil
}
// This is reached when the for loop iterates over all entries, but didn't
// return. This means we had a loadBalancingConfig slice but did not
// encounter a registered policy. The config is considered invalid in this
// case.
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
}

View File

@ -97,7 +97,7 @@ func (s *Status) Err() error {
if s.Code() == codes.OK {
return nil
}
return &Error{e: s.Proto()}
return (*Error)(s.Proto())
}
// WithDetails returns a new status with the provided details messages appended to the status.
@ -136,27 +136,26 @@ func (s *Status) Details() []interface{} {
return details
}
// Error wraps a pointer of a status proto. It implements error and Status,
// and a nil *Error should never be returned by this package.
type Error struct {
e *spb.Status
}
// Error is an alias of a status proto. It implements error and Status,
// and a nil Error should never be returned by this package.
type Error spb.Status
func (e *Error) Error() string {
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage())
func (se *Error) Error() string {
p := (*spb.Status)(se)
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
// GRPCStatus returns the Status represented by se.
func (e *Error) GRPCStatus() *Status {
return FromProto(e.e)
func (se *Error) GRPCStatus() *Status {
return FromProto((*spb.Status)(se))
}
// Is implements future error.Is functionality.
// A Error is equivalent if the code and message are identical.
func (e *Error) Is(target error) bool {
func (se *Error) Is(target error) bool {
tse, ok := target.(*Error)
if !ok {
return false
}
return proto.Equal(e.e, tse.e)
return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
}

View File

@ -32,18 +32,16 @@ import (
"google.golang.org/grpc/grpclog"
)
var logger = grpclog.Component("core")
// GetCPUTime returns the how much CPU time has passed since the start of this process.
func GetCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
logger.Fatal(err)
grpclog.Fatal(err)
}
return ts.Nano()
}
// Rusage is an alias for syscall.Rusage under linux environment.
// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
type Rusage syscall.Rusage
// GetRusage returns the resource usage of current process.

View File

@ -18,8 +18,6 @@
*
*/
// Package syscall provides functionalities that grpc uses to get low-level
// operating system stats/info.
package syscall
import (
@ -31,11 +29,10 @@ import (
)
var once sync.Once
var logger = grpclog.Component("core")
func log() {
once.Do(func() {
logger.Info("CPU time info is unavailable on non-linux or appengine environment.")
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
})
}

View File

@ -505,9 +505,7 @@ func (l *loopyWriter) run() (err error) {
// 1. When the connection is closed by some other known issue.
// 2. User closed the connection.
// 3. A graceful close of connection.
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter.run returning. %v", err)
}
infof("transport: loopyWriter.run returning. %v", err)
err = nil
}
}()
@ -607,9 +605,7 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
if logger.V(logLevel) {
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
}
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
return nil
}
// Case 1.A: Server is responding back with headers.
@ -662,9 +658,7 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
if logger.V(logLevel) {
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
}
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
}
}
var (
@ -863,56 +857,53 @@ func (l *loopyWriter) processData() (bool, error) {
return false, nil
}
var (
idx int
buf []byte
)
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if len(dataItem.h) != 0 { // data header has not been written out yet.
buf = dataItem.h
} else {
idx = 1
buf = dataItem.d
}
size := http2MaxFrameLen
if len(buf) < size {
size = len(buf)
}
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
str.state = waitingOnStreamQuota
return false, nil
} else if maxSize > strQuota {
maxSize = strQuota
}
if maxSize > int(l.sendQuota) { // connection-level flow control.
maxSize = int(l.sendQuota)
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
dSize := min(maxSize-hSize, len(dataItem.d))
if hSize != 0 {
if dSize == 0 {
buf = dataItem.h
} else {
// We can add some data to grpc message header to distribute bytes more equally across frames.
// Copy on the stack to avoid generating garbage
var localBuf [http2MaxFrameLen]byte
copy(localBuf[:hSize], dataItem.h)
copy(localBuf[hSize:], dataItem.d[:dSize])
buf = localBuf[:hSize+dSize]
}
} else {
buf = dataItem.d
} else if strQuota < size {
size = strQuota
}
size := hSize + dSize
if l.sendQuota < uint32(size) { // connection-level flow control.
size = int(l.sendQuota)
}
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
if dataItem.endStream && size == len(buf) {
// buf contains either data or it contains header but data is empty.
if idx == 1 || len(dataItem.d) == 0 {
endStream = true
}
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
return false, err
}
buf = buf[size:]
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
dataItem.d = dataItem.d[dSize:]
if idx == 0 {
dataItem.h = buf
} else {
dataItem.d = buf
}
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
str.itl.dequeue()
@ -933,10 +924,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -39,7 +39,6 @@ import (
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@ -58,7 +57,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
contentSubtype, validContentType := contentSubtype(contentType)
if !validContentType {
return nil, errors.New("invalid gRPC request content-type")
}

View File

@ -32,7 +32,6 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@ -42,7 +41,6 @@ import (
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
@ -163,7 +161,7 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@ -216,20 +214,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}
if transportCreds != nil {
// gRPC, resolver, balancer etc. can specify arbitrary data in the
// Attributes field of resolver.Address, which is shoved into connectCtx
// and passed to the credential handshaker. This makes it possible for
// address specific arbitrary data to reach the credential handshaker.
contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context)
connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
scheme = "https"
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
if transportCreds.Info().SecurityProtocol == "tls" {
scheme = "https"
}
}
dynamicWindow := true
icwz := int32(initialWindowSize)
@ -355,9 +345,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
errorf("transport: loopyWriter.run returning. Err: %v", err)
}
// If it's a connection error, let reader goroutine handle it
// since there might be data in the buffers.
@ -437,7 +425,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
if callHdr.PreviousAttempts > 0 {
@ -452,7 +440,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
// Send out timeout regardless its value. The server can detect timeout context by itself.
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := time.Until(dl)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
}
for k, v := range authData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@ -566,26 +554,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
return callAuthData, nil
}
// PerformedIOError wraps an error to indicate IO may have been performed
// before the error occurred.
type PerformedIOError struct {
Err error
}
// Error implements error.
func (p PerformedIOError) Error() string {
return p.Err.Error()
}
// NewStream creates a stream and registers it into the transport as "active"
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
ctx = peer.NewContext(ctx, t.getPeer())
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
// We may have performed I/O in the per-RPC creds callback, so do not
// allow transparent retry.
return nil, PerformedIOError{err}
return nil, err
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@ -879,10 +854,18 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
d: data,
}
if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
if hdr != nil || data != nil { // If it's not an empty data frame.
// Add some data to grpc message header so that we can equally
// distribute bytes across frames.
emptyLen := http2MaxFrameLen - len(hdr)
if emptyLen > len(data) {
emptyLen = len(data)
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
df.h, df.d = hdr, data
// TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
return err
}
@ -1016,9 +999,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
}
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
statusCode = codes.Unknown
}
if statusCode == codes.Canceled {
@ -1100,9 +1081,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
return
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if logger.V(logLevel) {
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
id := f.LastStreamID
if id > 0 && id%2 != 1 {
@ -1206,8 +1185,8 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
state := &decodeState{}
// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
state.data.isGRPC = !initialHeader
if h2code, err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream)
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
return
}
@ -1306,13 +1285,7 @@ func (t *http2Client) reader() {
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
errorDetail := t.framer.fr.ErrorDetail()
var msg string
if errorDetail != nil {
msg = errorDetail.Error()
} else {
msg = "received invalid frame"
}
msg := t.framer.fr.ErrorDetail().Error()
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
@ -1338,9 +1311,7 @@ func (t *http2Client) reader() {
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
if logger.V(logLevel) {
logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
}
errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
}
}
}

View File

@ -34,10 +34,10 @@ import (
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
@ -289,9 +289,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
errorf("transport: loopyWriter.run returning. Err: %v", err)
}
t.conn.Close()
close(t.writerDone)
@ -306,12 +304,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
state := &decodeState{
serverSide: true,
}
if h2code, err := state.decodeHeader(frame); err != nil {
if _, ok := status.FromError(err); ok {
if err := state.decodeHeader(frame); err != nil {
if se, ok := status.FromError(err); ok {
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: h2code,
rstCode: statusCodeConvTab[se.Code()],
onWrite: func() {},
})
}
@ -362,9 +360,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
}
warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: true,
@ -395,9 +391,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
}
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
s.cancel()
return true
}
@ -460,9 +454,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
}
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
@ -482,9 +474,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
t.Close()
return
}
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
}
warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
t.Close()
return
}
@ -507,9 +497,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
}
}
@ -611,10 +599,6 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
if !ok {
return
}
if s.getState() == streamReadDone {
t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
return
}
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@ -735,9 +719,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
if logger.V(logLevel) {
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
}
errorf("transport: Got too many pings from the client, closing the connection.")
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
@ -770,9 +752,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
if logger.V(logLevel) {
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
}
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
return false
}
}
@ -809,7 +789,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
@ -859,7 +839,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
@ -869,7 +849,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
@ -929,6 +909,13 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
return ContextErr(s.ctx.Err())
}
}
// Add some data to header frame so that we can equally distribute bytes across frames.
emptyLen := http2MaxFrameLen - len(hdr)
if emptyLen > len(data) {
emptyLen = len(data)
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
df := &dataFrame{
streamID: s.id,
h: hdr,
@ -1000,9 +987,7 @@ func (t *http2Server) keepalive() {
select {
case <-ageTimer.C:
// Close the connection after grace period.
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to maximum connection age.")
}
infof("transport: closing server transport due to maximum connection age.")
t.Close()
case <-t.done:
}
@ -1019,9 +1004,7 @@ func (t *http2Server) keepalive() {
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to idleness.")
}
infof("transport: closing server transport due to idleness.")
t.Close()
return
}

View File

@ -37,8 +37,6 @@ import (
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/status"
)
@ -52,7 +50,7 @@ const (
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
baseContentType = "application/grpc"
)
var (
@ -73,6 +71,13 @@ var (
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
http2.ErrCodeHTTP11Required: codes.Internal,
}
statusCodeConvTab = map[codes.Code]http2.ErrCode{
codes.Internal: http2.ErrCodeInternal,
codes.Canceled: http2.ErrCodeCancel,
codes.Unavailable: http2.ErrCodeRefusedStream,
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
}
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
@ -92,7 +97,6 @@ var (
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
logger = grpclog.Component("transport")
)
type parsedHeaderData struct {
@ -178,6 +182,46 @@ func isWhitelistedHeader(hdr string) bool {
}
}
// contentSubtype returns the content-subtype for the given content-type. The
// given content-type must be a valid content-type that starts with
// "application/grpc". A content-subtype will follow "application/grpc" after a
// "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details.
//
// If contentType is not a valid content-type for gRPC, the boolean
// will be false, otherwise true. If content-type == "application/grpc",
// "application/grpc+", or "application/grpc;", the boolean will be true,
// but no content-subtype will be returned.
//
// contentType is assumed to be lowercase already.
func contentSubtype(contentType string) (string, bool) {
if contentType == baseContentType {
return "", true
}
if !strings.HasPrefix(contentType, baseContentType) {
return "", false
}
// guaranteed since != baseContentType and has baseContentType prefix
switch contentType[len(baseContentType)] {
case '+', ';':
// this will return true for "application/grpc+" or "application/grpc;"
// which the previous validContentType function tested to be valid, so we
// just say that no content-subtype is specified in this case
return contentType[len(baseContentType)+1:], true
default:
return "", false
}
}
// contentSubtype is assumed to be lowercase
func contentType(contentSubtype string) string {
if contentSubtype == "" {
return baseContentType
}
return baseContentType + "+" + contentSubtype
}
func (d *decodeState) status() *status.Status {
if d.data.statusGen == nil {
// No status-details were provided; generate status using code/msg.
@ -215,11 +259,11 @@ func decodeMetadataHeader(k, v string) (string, error) {
return v, nil
}
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) {
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
// frame.Truncated is set to true when framer detects that the current header
// list size hits MaxHeaderListSize limit.
if frame.Truncated {
return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit")
return status.Error(codes.Internal, "peer header list size exceeded limit")
}
for _, hf := range frame.Fields {
@ -228,10 +272,10 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode
if d.data.isGRPC {
if d.data.grpcErr != nil {
return http2.ErrCodeProtocol, d.data.grpcErr
return d.data.grpcErr
}
if d.serverSide {
return http2.ErrCodeNo, nil
return nil
}
if d.data.rawStatusCode == nil && d.data.statusGen == nil {
// gRPC status doesn't exist.
@ -243,12 +287,12 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode
code := int(codes.Unknown)
d.data.rawStatusCode = &code
}
return http2.ErrCodeNo, nil
return nil
}
// HTTP fallback mode
if d.data.httpErr != nil {
return http2.ErrCodeProtocol, d.data.httpErr
return d.data.httpErr
}
var (
@ -263,7 +307,7 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode
}
}
return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg())
return status.Error(code, d.constructHTTPErrMsg())
}
// constructErrMsg constructs error message to be returned in HTTP fallback mode.
@ -296,7 +340,7 @@ func (d *decodeState) addMetadata(k, v string) {
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
switch f.Name {
case "content-type":
contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value)
contentSubtype, validContentType := contentSubtype(f.Value)
if !validContentType {
d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
return
@ -368,9 +412,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) {
}
v, err := decodeMetadataHeader(f.Name, f.Value)
if err != nil {
if logger.V(logLevel) {
logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
}
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
return
}
d.addMetadata(f.Name, v)
@ -407,6 +449,41 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
return
}
const maxTimeoutValue int64 = 100000000 - 1
// div does integer division and round-up the result. Note that this is
// equivalent to (d+r-1)/r but has less chance to overflow.
func div(d, r time.Duration) int64 {
if m := d % r; m > 0 {
return int64(d/r + 1)
}
return int64(d / r)
}
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
func encodeTimeout(t time.Duration) string {
if t <= 0 {
return "0n"
}
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "n"
}
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "u"
}
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "m"
}
if d := div(t, time.Second); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "S"
}
if d := div(t, time.Minute); d <= maxTimeoutValue {
return strconv.FormatInt(d, 10) + "M"
}
// Note that maxTimeoutValue * time.Hour > MaxInt64.
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
}
func decodeTimeout(s string) (time.Duration, error) {
size := len(s)
if size < 2 {

View File

@ -35,14 +35,11 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
const logLevel = 2
type bufferPool struct {
pool sync.Pool
}
@ -571,10 +568,17 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
}
// TargetInfo contains the information of the target such as network address and metadata.
type TargetInfo struct {
Addr string
Metadata interface{}
Authority string
}
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
}
// Options provides additional hints and information for message

View File

@ -20,31 +20,80 @@ package grpc
import (
"context"
"fmt"
"io"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
// v2PickerWrapper wraps a balancer.Picker while providing the
// balancer.V2Picker API. It requires a pickerWrapper to generate errors
// including the latest connectionError. To be deleted when balancer.Picker is
// updated to the balancer.V2Picker API.
type v2PickerWrapper struct {
picker balancer.Picker
connErr *connErr
}
func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
sc, done, err := v.picker.Pick(info.Ctx, info)
if err != nil {
if err == balancer.ErrTransientFailure {
return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
}
return balancer.PickResult{}, err
}
return balancer.PickResult{SubConn: sc, Done: done}, nil
}
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
mu sync.Mutex
done bool
blockingCh chan struct{}
picker balancer.Picker
picker balancer.V2Picker
// The latest connection error. TODO: remove when V1 picker is deprecated;
// balancer should be responsible for providing the error.
*connErr
}
type connErr struct {
mu sync.Mutex
err error
}
func (c *connErr) updateConnectionError(err error) {
c.mu.Lock()
c.err = err
c.mu.Unlock()
}
func (c *connErr) connectionError() error {
c.mu.Lock()
err := c.err
c.mu.Unlock()
return err
}
func newPickerWrapper() *pickerWrapper {
return &pickerWrapper{blockingCh: make(chan struct{})}
return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
}
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
}
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
pw.mu.Lock()
if pw.done {
pw.mu.Unlock()
@ -105,6 +154,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
var errStr string
if lastPickErr != nil {
errStr = "latest balancer error: " + lastPickErr.Error()
} else if connectionErr := pw.connectionError(); connectionErr != nil {
errStr = "latest connection error: " + connectionErr.Error()
} else {
errStr = ctx.Err().Error()
}
@ -129,22 +180,23 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
if err == balancer.ErrNoSubConnAvailable {
continue
}
if _, ok := status.FromError(err); ok {
// Status error: end the RPC unconditionally with this status.
return nil, nil, err
}
// For all other errors, wait for ready RPCs should block and other
// RPCs should fail with unavailable.
if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
if !failfast {
lastPickErr = err
continue
}
return nil, nil, status.Error(codes.Unavailable, err.Error())
}
if _, ok := status.FromError(err); ok {
return nil, nil, err
}
// err is some other error.
return nil, nil, status.Error(codes.Unknown, err.Error())
}
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
if !ok {
logger.Error("subconn returned from pick is not *acBalancerWrapper")
grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
continue
}
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
@ -158,7 +210,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
// DoneInfo with default value works.
pickResult.Done(balancer.DoneInfo{})
}
logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
// A valid picker always returns READY subConn. This means the state of ac
// just changed, and picker will be updated shortly.

View File

@ -20,10 +20,13 @@ package grpc
import (
"errors"
"fmt"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
// PickFirstBalancerName is the name of the pick_first balancer.
@ -49,16 +52,30 @@ type pickfirstBalancer struct {
sc balancer.SubConn
}
var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil {
b.ResolverError(err)
return
}
b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
}
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
}
func (b *pickfirstBalancer) ResolverError(err error) {
switch b.state {
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
// Set a failing picker if we don't have a good picker.
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
)
}
if logger.V(2) {
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
}
}
@ -71,13 +88,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e
var err error
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
if err != nil {
if logger.V(2) {
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
if grpclog.V(2) {
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
})
Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
)
return balancer.ErrBadResolverState
}
b.state = connectivity.Idle
@ -91,12 +108,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e
}
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
if logger.V(2) {
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
}
if b.sc != sc {
if logger.V(2) {
logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
}
return
}
@ -112,9 +129,15 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S
case connectivity.Connecting:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
case connectivity.TransientFailure:
err := balancer.ErrTransientFailure
// TODO: this can be unconditional after the V1 API is removed, as
// SubConnState will always contain a connection error.
if s.ConnectionError != nil {
err = balancer.TransientFailureError(s.ConnectionError)
}
b.cc.UpdateState(balancer.State{
ConnectivityState: s.ConnectivityState,
Picker: &picker{err: s.ConnectionError},
Picker: &picker{err: err},
})
}
}

View File

@ -25,10 +25,7 @@ import (
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte

View File

@ -1,139 +0,0 @@
#!/bin/bash
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu -o pipefail
WORKDIR=$(mktemp -d)
function finish {
rm -rf "$WORKDIR"
}
trap finish EXIT
export GOBIN=${WORKDIR}/bin
export PATH=${GOBIN}:${PATH}
mkdir -p ${GOBIN}
echo "remove existing generated files"
# grpc_testingv3/testv3.pb.go is not re-generated because it was
# intentionally generated by an older version of protoc-gen-go.
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
echo "go install github.com/golang/protobuf/protoc-gen-go"
(cd test/tools && go install github.com/golang/protobuf/protoc-gen-go)
echo "go install cmd/protoc-gen-go-grpc"
(cd cmd/protoc-gen-go-grpc && go install .)
echo "git clone https://github.com/grpc/grpc-proto"
git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
# Pull in code.proto as a proto dependency
mkdir -p ${WORKDIR}/googleapis/google/rpc
echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
# Pull in the following repos to build the MeshCA config proto.
ENVOY_API_REPOS=(
"https://github.com/envoyproxy/data-plane-api"
"https://github.com/cncf/udpa"
"https://github.com/envoyproxy/protoc-gen-validate"
)
for repo in ${ENVOY_API_REPOS[@]}; do
dirname=$(basename ${repo})
mkdir -p ${WORKDIR}/${dirname}
echo "git clone ${repo}"
git clone --quiet ${repo} ${WORKDIR}/${dirname}
done
# Pull in the MeshCA service proto.
mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1
echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto"
curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto
mkdir -p ${WORKDIR}/out
# Generates sources without the embed requirement
LEGACY_SOURCES=(
${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
profiling/proto/service.proto
reflection/grpc_reflection_v1alpha/reflection.proto
)
# Generates only the new gRPC Service symbols
SOURCES=(
$(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto
${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto
${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto
)
# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
# import path of 'bar' in the generated code when 'foo.proto' is imported in
# one of the sources.
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3
for src in ${SOURCES[@]}; do
echo "protoc ${src}"
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
-I"." \
-I${WORKDIR}/grpc-proto \
-I${WORKDIR}/googleapis \
-I${WORKDIR}/data-plane-api \
-I${WORKDIR}/udpa \
-I${WORKDIR}/protoc-gen-validate \
-I${WORKDIR}/istio \
${src}
done
for src in ${LEGACY_SOURCES[@]}; do
echo "protoc ${src}"
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
-I"." \
-I${WORKDIR}/grpc-proto \
-I${WORKDIR}/googleapis \
-I${WORKDIR}/data-plane-api \
-I${WORKDIR}/udpa \
-I${WORKDIR}/protoc-gen-validate \
-I${WORKDIR}/istio \
${src}
done
# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
# current location. Move it into the right place.
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1
# grpc_testingv3/testv3.pb.go is not re-generated because it was
# intentionally generated by an older version of protoc-gen-go.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
# grpc/service_config/service_config.proto does not have a go_package option.
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
# istio/google/security/meshca/v1/meshca.proto does not have a go_package option.
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/
mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .

View File

@ -85,19 +85,12 @@ const (
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
//
// Deprecated: to select the GRPCLB load balancing policy, use a service
// config with a corresponding loadBalancingConfig. To supply balancer
// addresses to the GRPCLB load balancing policy, set State.Attributes
// using balancer/grpclb/state.Set.
// Deprecated: use Attributes in Address instead.
GRPCLB
)
// Address represents a server the client connects to.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string

View File

@ -140,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
if ccr.done.HasFired() {
return
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
@ -152,7 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) {
if ccr.done.HasFired() {
return
}
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
}
@ -161,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
if ccr.done.HasFired() {
return
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
}
@ -175,14 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
if ccr.done.HasFired() {
return
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
if ccr.cc.dopts.disableServiceConfig {
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
return
}
scpr := parseServiceConfig(sc)
if scpr.Err != nil {
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
ccr.poll(balancer.ErrBadResolverState)
return
}
@ -215,8 +215,8 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
Severity: channelz.CtInfo,
Severity: channelz.CtINFO,
})
}

View File

@ -155,6 +155,7 @@ func (d *gzipDecompressor) Type() string {
type callInfo struct {
compressorType string
failFast bool
stream ClientStream
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
@ -179,7 +180,7 @@ type CallOption interface {
// after is called after the call has completed. after cannot return an
// error, so any failures should be reported via output parameters.
after(*callInfo, *csAttempt)
after(*callInfo)
}
// EmptyCallOption does not alter the Call configuration.
@ -188,7 +189,7 @@ type CallOption interface {
type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo, *csAttempt) {}
func (EmptyCallOption) after(*callInfo) {}
// Header returns a CallOptions that retrieves the header metadata
// for a unary RPC.
@ -198,18 +199,16 @@ func Header(md *metadata.MD) CallOption {
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
func (o HeaderCallOption) before(c *callInfo) error { return nil }
func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
*o.HeaderAddr, _ = attempt.s.Header()
func (o HeaderCallOption) after(c *callInfo) {
if c.stream != nil {
*o.HeaderAddr, _ = c.stream.Header()
}
}
// Trailer returns a CallOptions that retrieves the trailer metadata
@ -220,18 +219,16 @@ func Trailer(md *metadata.MD) CallOption {
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
func (o TrailerCallOption) before(c *callInfo) error { return nil }
func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
*o.TrailerAddr = attempt.s.Trailer()
func (o TrailerCallOption) after(c *callInfo) {
if c.stream != nil {
*o.TrailerAddr = c.stream.Trailer()
}
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
@ -242,20 +239,18 @@ func Peer(p *peer.Peer) CallOption {
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type PeerCallOption struct {
PeerAddr *peer.Peer
}
func (o PeerCallOption) before(c *callInfo) error { return nil }
func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
if x, ok := peer.FromContext(attempt.s.Context()); ok {
func (o PeerCallOption) after(c *callInfo) {
if c.stream != nil {
if x, ok := peer.FromContext(c.stream.Context()); ok {
*o.PeerAddr = *x
}
}
}
// WaitForReady configures the action to take when an RPC is attempted on broken
@ -281,11 +276,7 @@ func FailFast(failFast bool) CallOption {
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type FailFastCallOption struct {
FailFast bool
}
@ -294,7 +285,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o FailFastCallOption) after(c *callInfo) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive.
@ -304,11 +295,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can receive.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type MaxRecvMsgSizeCallOption struct {
MaxRecvMsgSize int
}
@ -317,7 +304,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send.
@ -327,11 +314,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
// size in bytes the client can send.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type MaxSendMsgSizeCallOption struct {
MaxSendMsgSize int
}
@ -340,7 +323,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@ -350,11 +333,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type PerRPCCredsCallOption struct {
Creds credentials.PerRPCCredentials
}
@ -363,26 +342,19 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o PerRPCCredsCallOption) after(c *callInfo) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func UseCompressor(name string) CallOption {
return CompressorCallOption{CompressorType: name}
}
// CompressorCallOption is a CallOption that indicates the compressor to use.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type CompressorCallOption struct {
CompressorType string
}
@ -391,7 +363,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o CompressorCallOption) after(c *callInfo) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@ -415,11 +387,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type ContentSubtypeCallOption struct {
ContentSubtype string
}
@ -428,7 +396,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o ContentSubtypeCallOption) after(c *callInfo) {}
// ForceCodec returns a CallOption that will set the given Codec to be
// used for all request and response messages for a call. The result of calling
@ -443,10 +411,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
func ForceCodec(codec encoding.Codec) CallOption {
return ForceCodecCallOption{Codec: codec}
}
@ -454,10 +419,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
// ForceCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type ForceCodecCallOption struct {
Codec encoding.Codec
}
@ -466,7 +428,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error {
c.codec = o.Codec
return nil
}
func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o ForceCodecCallOption) after(c *callInfo) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@ -479,10 +441,7 @@ func CallCustomCodec(codec Codec) CallOption {
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type CustomCodecCallOption struct {
Codec Codec
}
@ -491,26 +450,19 @@ func (o CustomCodecCallOption) before(c *callInfo) error {
c.codec = o.Codec
return nil
}
func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o CustomCodecCallOption) after(c *callInfo) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func MaxRetryRPCBufferSize(bytes int) CallOption {
return MaxRetryRPCBufferSizeCallOption{bytes}
}
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This is an EXPERIMENTAL API.
type MaxRetryRPCBufferSizeCallOption struct {
MaxRetryRPCBufferSize int
}
@ -519,7 +471,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@ -921,7 +873,7 @@ type channelzData struct {
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
// support package version is 7.
// support package version is 6.
//
// Older versions are kept for compatibility. They may be removed if
// compatibility cannot be maintained.
@ -932,7 +884,6 @@ const (
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
SupportPackageIsVersion6 = true
SupportPackageIsVersion7 = true
)
const grpcUA = "grpc-go/" + Version

View File

@ -42,7 +42,6 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
@ -59,7 +58,6 @@ const (
)
var statusOK = status.New(codes.OK, "")
var logger = grpclog.Component("core")
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
@ -80,22 +78,15 @@ type ServiceDesc struct {
Metadata interface{}
}
// serviceInfo wraps information about a service. It is very similar to
// ServiceDesc and is constructed from it for internal purposes.
type serviceInfo struct {
// Contains the implementation for the methods in this service.
serviceImpl interface{}
methods map[string]*MethodDesc
streams map[string]*StreamDesc
// service consists of the information of the server serving this service and
// the methods in this service.
type service struct {
server interface{} // the server for service methods
md map[string]*MethodDesc
sd map[string]*StreamDesc
mdata interface{}
}
type serverWorkerData struct {
st transport.ServerTransport
wg *sync.WaitGroup
stream *transport.Stream
}
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
@ -106,7 +97,7 @@ type Server struct {
serve bool
drain bool
cv *sync.Cond // signaled when connections close for GracefulStop
services map[string]*serviceInfo // service name -> service info
m map[string]*service // service name -> service info
events trace.EventLog
quit *grpcsync.Event
@ -116,8 +107,6 @@ type Server struct {
channelzID int64 // channelz unique identification number
czData *channelzData
serverWorkerChannels []chan *serverWorkerData
}
type serverOptions struct {
@ -144,7 +133,6 @@ type serverOptions struct {
connectionTimeout time.Duration
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
}
var defaultServerOptions = serverOptions{
@ -163,10 +151,7 @@ type ServerOption interface {
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
@ -228,7 +213,7 @@ func InitialConnWindowSize(s int32) ServerOption {
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
if kp.Time > 0 && kp.Time < time.Second {
logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
kp.Time = time.Second
}
@ -247,12 +232,6 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
//
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
//
// Deprecated: register codecs using encoding.RegisterCodec. The server will
// automatically use registered codecs based on the incoming requests' headers.
// See also
// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
@ -265,8 +244,7 @@ func CustomCodec(codec Codec) ServerOption {
// default, server messages will be sent using the same compressor with which
// request messages were sent.
//
// Deprecated: use encoding.RegisterCompressor instead. Will be supported
// throughout 1.x.
// Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.cp = cp
@ -277,8 +255,7 @@ func RPCCompressor(cp Compressor) ServerOption {
// messages. It has higher priority than decompressors registered via
// encoding.RegisterCompressor.
//
// Deprecated: use encoding.RegisterCompressor instead. Will be supported
// throughout 1.x.
// Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.dc = dc
@ -288,7 +265,7 @@ func RPCDecompressor(dc Decompressor) ServerOption {
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default limit.
//
// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
// Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
@ -358,7 +335,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
}
// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
// for streaming RPCs. The first interceptor will be the outer most,
// for stream RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All stream interceptors added by this method will be chained.
func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
@ -408,10 +385,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
// new connections. If this is not set, the default is 120 seconds. A zero or
// negative value will result in an immediate timeout.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func ConnectionTimeout(d time.Duration) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d
@ -429,79 +403,13 @@ func MaxHeaderListSize(s uint32) ServerOption {
// HeaderTableSize returns a ServerOption that sets the size of dynamic
// header table for stream.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func HeaderTableSize(s uint32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.headerTableSize = &s
})
}
// NumStreamWorkers returns a ServerOption that sets the number of worker
// goroutines that should be used to process incoming streams. Setting this to
// zero (default) will disable workers and spawn a new goroutine for each
// stream.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func NumStreamWorkers(numServerWorkers uint32) ServerOption {
// TODO: If/when this API gets stabilized (i.e. stream workers become the
// only way streams are processed), change the behavior of the zero value to
// a sane default. Preliminary experiments suggest that a value equal to the
// number of CPUs available is most performant; requires thorough testing.
return newFuncServerOption(func(o *serverOptions) {
o.numServerWorkers = numServerWorkers
})
}
// serverWorkerResetThreshold defines how often the stack must be reset. Every
// N requests, by spawning a new goroutine in its place, a worker can reset its
// stack so that large stacks don't live in memory forever. 2^16 should allow
// each goroutine stack to live for at least a few seconds in a typical
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
// serverWorkers blocks on a *transport.Stream channel forever and waits for
// data to be fed by serveStreams. This allows different requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
//
// [1] https://github.com/golang/go/issues/18138
func (s *Server) serverWorker(ch chan *serverWorkerData) {
// To make sure all server workers don't reset at the same time, choose a
// random number of iterations before resetting.
threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
for completed := 0; completed < threshold; completed++ {
data, ok := <-ch
if !ok {
return
}
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
data.wg.Done()
}
go s.serverWorker(ch)
}
// initServerWorkers creates worker goroutines and channels to process incoming
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
s.serverWorkerChannels[i] = make(chan *serverWorkerData)
go s.serverWorker(s.serverWorkerChannels[i])
}
}
func (s *Server) stopServerWorkers() {
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
close(s.serverWorkerChannels[i])
}
}
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@ -513,7 +421,7 @@ func NewServer(opt ...ServerOption) *Server {
lis: make(map[net.Listener]bool),
opts: opts,
conns: make(map[transport.ServerTransport]bool),
services: make(map[string]*serviceInfo),
m: make(map[string]*service),
quit: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
czData: new(channelzData),
@ -526,10 +434,6 @@ func NewServer(opt ...ServerOption) *Server {
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
}
if s.opts.numServerWorkers > 0 {
s.initServerWorkers()
}
if channelz.IsOn() {
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
}
@ -552,29 +456,14 @@ func (s *Server) errorf(format string, a ...interface{}) {
}
}
// ServiceRegistrar wraps a single method that supports service registration. It
// enables users to pass concrete types other than grpc.Server to the service
// registration methods exported by the IDL generated code.
type ServiceRegistrar interface {
// RegisterService registers a service and its implementation to the
// concrete type implementing this interface. It may not be called
// once the server has started serving.
// desc describes the service and its methods and handlers. impl is the
// service implementation which is passed to the method handlers.
RegisterService(desc *ServiceDesc, impl interface{})
}
// RegisterService registers a service and its implementation to the gRPC
// server. It is called from the IDL generated code. This must be called before
// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
// ensure it implements sd.HandlerType.
// invoking Serve.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
if ss != nil {
ht := reflect.TypeOf(sd.HandlerType).Elem()
st := reflect.TypeOf(ss)
if !st.Implements(ht) {
logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
}
grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
}
s.register(sd, ss)
}
@ -584,26 +473,26 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) {
defer s.mu.Unlock()
s.printf("RegisterService(%q)", sd.ServiceName)
if s.serve {
logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
}
if _, ok := s.services[sd.ServiceName]; ok {
logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
if _, ok := s.m[sd.ServiceName]; ok {
grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
}
info := &serviceInfo{
serviceImpl: ss,
methods: make(map[string]*MethodDesc),
streams: make(map[string]*StreamDesc),
srv := &service{
server: ss,
md: make(map[string]*MethodDesc),
sd: make(map[string]*StreamDesc),
mdata: sd.Metadata,
}
for i := range sd.Methods {
d := &sd.Methods[i]
info.methods[d.MethodName] = d
srv.md[d.MethodName] = d
}
for i := range sd.Streams {
d := &sd.Streams[i]
info.streams[d.StreamName] = d
srv.sd[d.StreamName] = d
}
s.services[sd.ServiceName] = info
s.m[sd.ServiceName] = srv
}
// MethodInfo contains the information of an RPC including its method name and type.
@ -627,16 +516,16 @@ type ServiceInfo struct {
// Service names include the package names, in the form of <package>.<service>.
func (s *Server) GetServiceInfo() map[string]ServiceInfo {
ret := make(map[string]ServiceInfo)
for n, srv := range s.services {
methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
for m := range srv.methods {
for n, srv := range s.m {
methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
for m := range srv.md {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: false,
IsServerStream: false,
})
}
for m, d := range srv.streams {
for m, d := range srv.sd {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: d.ClientStreams,
@ -793,7 +682,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
rawConn.Close()
}
rawConn.SetDeadline(time.Time{})
@ -840,7 +729,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
s.mu.Unlock()
c.Close()
channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
return nil
}
@ -850,27 +739,12 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close()
var wg sync.WaitGroup
var roundRobinCounter uint32
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
if s.opts.numServerWorkers > 0 {
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
select {
case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
default:
// If all stream workers are busy, fallback to the default code path.
go func() {
s.handleStream(st, stream, s.traceInfo(st, stream))
wg.Done()
}()
}
} else {
go func() {
defer wg.Done()
s.handleStream(st, stream, s.traceInfo(st, stream))
}()
}
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
@ -905,12 +779,8 @@ var _ http.Handler = (*Server)(nil)
// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
// separate from grpc-go's HTTP/2 server. Performance and features may vary
// between the two paths. ServeHTTP does not support some gRPC features
// available through grpc-go's HTTP/2 server.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
// and subject to change.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
if err != nil {
@ -998,12 +868,12 @@ func (s *Server) incrCallsFailed() {
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err)
return err
}
compData, err := compress(data, cp, comp)
if err != nil {
channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err)
return err
}
hdr, payload := msgHeader(data, compData)
@ -1052,7 +922,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
}
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
sh := s.opts.statsHandler
if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
@ -1175,8 +1045,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
}
}
return err
}
@ -1191,7 +1063,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
WireLength: payInfo.wireLength + headerLen,
WireLength: payInfo.wireLength,
Data: d,
Length: len(d),
})
@ -1207,7 +1079,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
return nil
}
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
@ -1220,7 +1092,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
trInfo.tr.SetError()
}
if e := t.WriteStatus(stream, appStatus); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
if binlog != nil {
if h, _ := stream.Header(); h.Len() > 0 {
@ -1249,7 +1121,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
if sts, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, sts); e != nil {
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
@ -1333,7 +1205,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
}
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
}
@ -1450,8 +1322,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}
var appErr error
var server interface{}
if info != nil {
server = info.serviceImpl
if srv != nil {
server = srv.server
}
if s.opts.streamInt == nil {
appErr = sd.Handler(server, ss)
@ -1517,7 +1389,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@ -1527,13 +1399,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
service := sm[:pos]
method := sm[pos+1:]
srv, knownService := s.services[service]
srv, knownService := s.m[service]
if knownService {
if md, ok := srv.methods[method]; ok {
if md, ok := srv.md[method]; ok {
s.processUnaryRPC(t, stream, srv, md, trInfo)
return
}
if sd, ok := srv.streams[method]; ok {
if sd, ok := srv.sd[method]; ok {
s.processStreamingRPC(t, stream, srv, sd, trInfo)
return
}
@ -1558,7 +1430,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@ -1571,10 +1443,7 @@ type streamKey struct{}
// NewContextWithServerTransportStream creates a new context from ctx and
// attaches stream to it.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
return context.WithValue(ctx, streamKey{}, stream)
}
@ -1586,10 +1455,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans
//
// See also NewContextWithServerTransportStream.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
type ServerTransportStream interface {
Method() string
SetHeader(md metadata.MD) error
@ -1601,10 +1467,7 @@ type ServerTransportStream interface {
// ctx. Returns nil if the given context has no stream associated with it
// (which implies it is not an RPC invocation context).
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// This API is EXPERIMENTAL.
func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
return s
@ -1644,9 +1507,6 @@ func (s *Server) Stop() {
for c := range st {
c.Close()
}
if s.opts.numServerWorkers > 0 {
s.stopServerWorkers()
}
s.mu.Lock()
if s.events != nil {

View File

@ -20,16 +20,15 @@ package grpc
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/serviceconfig"
)
@ -80,7 +79,7 @@ type ServiceConfig struct {
serviceconfig.Config
// LB is the load balancer the service providers recommends. The balancer
// specified via grpc.WithBalancerName will override this. This is deprecated;
// specified via grpc.WithBalancer will override this. This is deprecated;
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
// will be used.
LB *string
@ -225,27 +224,19 @@ func parseDuration(s *string) (*time.Duration, error) {
}
type jsonName struct {
Service string
Method string
Service *string
Method *string
}
var (
errDuplicatedName = errors.New("duplicated name")
errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
)
func (j jsonName) generatePath() (string, error) {
if j.Service == "" {
if j.Method != "" {
return "", errEmptyServiceNonEmptyMethod
func (j jsonName) generatePath() (string, bool) {
if j.Service == nil {
return "", false
}
return "", nil
res := "/" + *j.Service + "/"
if j.Method != nil {
res += *j.Method
}
res := "/" + j.Service + "/"
if j.Method != "" {
res += j.Method
}
return res, nil
return res, true
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
@ -258,10 +249,12 @@ type jsonMC struct {
RetryPolicy *jsonRetryPolicy
}
type loadBalancingConfig map[string]json.RawMessage
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
LoadBalancingConfig *internalserviceconfig.BalancerConfig
LoadBalancingConfig *[]loadBalancingConfig
MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig
@ -277,7 +270,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
@ -287,25 +280,53 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
if c := rsc.LoadBalancingConfig; c != nil {
sc.lbConfig = &lbConfig{
name: c.Name,
cfg: c.Config,
if rsc.LoadBalancingConfig != nil {
for i, lbcfg := range *rsc.LoadBalancingConfig {
if len(lbcfg) != 1 {
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
grpclog.Warningf(err.Error())
return &serviceconfig.ParseResult{Err: err}
}
var name string
var jsonCfg json.RawMessage
for name, jsonCfg = range lbcfg {
}
builder := balancer.Get(name)
if builder == nil {
continue
}
sc.lbConfig = &lbConfig{name: name}
if parser, ok := builder.(balancer.ConfigParser); ok {
var err error
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
if err != nil {
return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
}
} else if string(jsonCfg) != "{}" {
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
break
}
if sc.lbConfig == nil {
// We had a loadBalancingConfig field but did not encounter a
// supported policy. The config is considered invalid in this
// case.
err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
grpclog.Warningf(err.Error())
return &serviceconfig.ParseResult{Err: err}
}
}
if rsc.MethodConfig == nil {
return &serviceconfig.ParseResult{Config: &sc}
}
paths := map[string]struct{}{}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
}
d, err := parseDuration(m.Timeout)
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
@ -314,7 +335,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
Timeout: d,
}
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@ -331,22 +352,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
}
}
for i, n := range *m.Name {
path, err := n.generatePath()
if err != nil {
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
if _, ok := paths[path]; ok {
err = errDuplicatedName
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
return &serviceconfig.ParseResult{Err: err}
}
paths[path] = struct{}{}
for _, n := range *m.Name {
if path, valid := n.generatePath(); valid {
sc.Methods[path] = mc
}
}
}
if sc.retryThrottling != nil {
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
@ -377,7 +388,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
*mb <= 0 ||
jrp.BackoffMultiplier <= 0 ||
len(jrp.RetryableStatusCodes) == 0 {
logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
return nil, nil
}
@ -421,34 +432,3 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
func newInt(b int) *int {
return &b
}
func init() {
internal.EqualServiceConfigForTesting = equalServiceConfig
}
// equalServiceConfig compares two configs. The rawJSONString field is ignored,
// because they may diff in white spaces.
//
// If any of them is NOT *ServiceConfig, return false.
func equalServiceConfig(a, b serviceconfig.Config) bool {
aa, ok := a.(*ServiceConfig)
if !ok {
return false
}
bb, ok := b.(*ServiceConfig)
if !ok {
return false
}
aaRaw := aa.rawJSONString
aa.rawJSONString = ""
bbRaw := bb.rawJSONString
bb.rawJSONString = ""
defer func() {
aa.rawJSONString = aaRaw
bb.rawJSONString = bbRaw
}()
// Using reflect.DeepEqual instead of cmp.Equal because many balancer
// configs are unexported, and cmp.Equal cannot compare unexported fields
// from unexported structs.
return reflect.DeepEqual(aa, bb)
}

View File

@ -19,10 +19,7 @@
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
// This package is EXPERIMENTAL.
package serviceconfig
// Config represents an opaque data structure holding a service config.

View File

@ -16,6 +16,8 @@
*
*/
//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
// Package stats is for collecting and reporting various network and RPC stats.
// This package is for monitoring purpose only. All fields are read-only.
// All APIs are experimental.

View File

@ -35,7 +35,6 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@ -278,6 +277,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
cs.binlog = binarylog.GetMethodLogger(method)
cs.callInfo.stream = cs
// Only this initial attempt has stats/tracing.
// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
if err := cs.newAttemptLocked(sh, trInfo); err != nil {
@ -347,16 +347,7 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r
if err := cs.ctx.Err(); err != nil {
return toRPCErr(err)
}
ctx := cs.ctx
if cs.cc.parsedTarget.Scheme == "xds" {
// Add extra metadata (metadata that will be added by transport) to context
// so the balancer can see them.
ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs(
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
))
}
t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
return err
}
@ -374,11 +365,6 @@ func (a *csAttempt) newStream() error {
cs.callHdr.PreviousAttempts = cs.numRetries
s, err := a.t.NewStream(cs.ctx, cs.callHdr)
if err != nil {
if _, ok := err.(transport.PerformedIOError); ok {
// Return without converting to an RPC error so retry code can
// inspect.
return err
}
return toRPCErr(err)
}
cs.attempt.s = s
@ -474,22 +460,12 @@ func (cs *clientStream) commitAttempt() {
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
// the error that should be returned by the operation.
func (cs *clientStream) shouldRetry(err error) error {
unprocessed := false
if cs.attempt.s == nil {
pioErr, ok := err.(transport.PerformedIOError)
if ok {
// Unwrap error.
err = toRPCErr(pioErr.Err)
} else {
unprocessed = true
}
if !ok && !cs.callInfo.failFast {
// In the event of a non-IO operation error from NewStream, we
if cs.attempt.s == nil && !cs.callInfo.failFast {
// In the event of any error from NewStream (attempt.s == nil), we
// never attempted to write anything to the wire, so we can retry
// indefinitely for non-fail-fast RPCs.
return nil
}
}
if cs.finished || cs.committed {
// RPC is finished or committed; cannot retry.
return err
@ -497,12 +473,13 @@ func (cs *clientStream) shouldRetry(err error) error {
// Wait for the trailers.
if cs.attempt.s != nil {
<-cs.attempt.s.Done()
unprocessed = cs.attempt.s.Unprocessed()
}
if cs.firstAttempt && unprocessed {
if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
// First attempt, stream unprocessed: transparently retry.
cs.firstAttempt = false
return nil
}
cs.firstAttempt = false
if cs.cc.dopts.disableRetry {
return err
}
@ -520,13 +497,13 @@ func (cs *clientStream) shouldRetry(err error) error {
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
cs.retryThrottler.throttle() // This counts as a failure for throttling.
return err
}
hasPushback = true
} else if len(sps) > 1 {
channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
cs.retryThrottler.throttle() // This counts as a failure for throttling.
return err
}
@ -588,7 +565,6 @@ func (cs *clientStream) retryLocked(lastErr error) error {
cs.commitAttemptLocked()
return err
}
cs.firstAttempt = false
if err := cs.newAttemptLocked(nil, nil); err != nil {
return err
}
@ -823,15 +799,6 @@ func (cs *clientStream) finish(err error) {
}
cs.finished = true
cs.commitAttemptLocked()
if cs.attempt != nil {
cs.attempt.finish(err)
// after functions all rely upon having a stream.
if cs.attempt.s != nil {
for _, o := range cs.opts {
o.after(cs.callInfo, cs.attempt)
}
}
}
cs.mu.Unlock()
// For binary logging. only log cancel in finish (could be caused by RPC ctx
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
@ -853,6 +820,15 @@ func (cs *clientStream) finish(err error) {
cs.cc.incrCallsSucceeded()
}
}
if cs.attempt != nil {
cs.attempt.finish(err)
// after functions all rely upon having a stream.
if cs.attempt.s != nil {
for _, o := range cs.opts {
o.after(cs.callInfo)
}
}
}
cs.cancel()
}
@ -929,7 +905,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
})
}
@ -1090,6 +1066,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
t: t,
}
as.callInfo.stream = as
s, err := as.t.NewStream(as.ctx, as.callHdr)
if err != nil {
err = toRPCErr(err)
@ -1511,7 +1488,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
})
}

View File

@ -17,12 +17,7 @@
*/
// Package tap defines the function handles which are executed on the transport
// layer of gRPC-Go and related information.
//
// Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
package tap
import (

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.33.1"
const Version = "1.29.1"

64
vendor/google.golang.org/grpc/vet.sh generated vendored
View File

@ -39,7 +39,8 @@ if [[ "$1" = "-install" ]]; then
golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck \
github.com/client9/misspell/cmd/misspell
github.com/client9/misspell/cmd/misspell \
github.com/golang/protobuf/protoc-gen-go
popd
else
# Ye olde `go get` incantation.
@ -49,7 +50,8 @@ if [[ "$1" = "-install" ]]; then
golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck \
github.com/client9/misspell/cmd/misspell
github.com/client9/misspell/cmd/misspell \
github.com/golang/protobuf/protoc-gen-go
fi
if [[ -z "${VET_SKIP_PROTO}" ]]; then
if [[ "${TRAVIS}" = "true" ]]; then
@ -83,23 +85,17 @@ not git grep -l 'x/net/context' -- "*.go"
# thread safety.
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
# - Ensure all ptypes proto packages are renamed when importing.
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
# - Ensure all xds proto imports are renamed to *pb or *grpc.
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
# - Check imports that are illegal in appengine (until Go 1.11).
# TODO: Remove when we drop Go 1.10 support
go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
# - gofmt, goimports, golint (with exceptions for generated code), go vet.
gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
golint ./... 2>&1 | not grep -vE "\.pb\.go:"
goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go"
golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:"
go vet -all ./...
misspell -error .
@ -111,9 +107,9 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then
(git status; git --no-pager diff; exit 1)
fi
# - Check that our modules are tidy.
# - Check that our module is tidy.
if go help mod >& /dev/null; then
find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy'
go mod tidy && \
git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
fi
@ -127,18 +123,18 @@ staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
# Error if anything other than deprecation warnings are printed.
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
# Only ignore the following deprecated types/fields/functions.
not grep -Fv '.CredsBundle
not grep -Fv '.HandleResolvedAddrs
.HandleSubConnStateChange
.HeaderMap
.Metadata is deprecated: use Attributes
.NewAddress
.NewServiceConfig
.Metadata is deprecated: use Attributes
.Type is deprecated: use Attributes
balancer.ErrTransientFailure
.UpdateBalancerState
balancer.Picker
grpc.CallCustomCodec
grpc.Code
grpc.Compressor
grpc.CustomCodec
grpc.Decompressor
grpc.MaxMsgSize
grpc.MethodConfig
@ -146,7 +142,9 @@ grpc.NewGZIPCompressor
grpc.NewGZIPDecompressor
grpc.RPCCompressor
grpc.RPCDecompressor
grpc.RoundRobin
grpc.ServiceConfig
grpc.WithBalancer
grpc.WithBalancerName
grpc.WithCompressor
grpc.WithDecompressor
@ -156,38 +154,10 @@ grpc.WithServiceConfig
grpc.WithTimeout
http.CloseNotifier
info.SecurityVersion
naming.Resolver
naming.Update
naming.Watcher
resolver.Backend
resolver.GRPCLB' "${SC_OUT}"
# - special golint on package comments.
lint_package_comment_per_package() {
# Number of files in this go package.
fileCount=$(go list -f '{{len .GoFiles}}' $1)
if [ ${fileCount} -eq 0 ]; then
return 0
fi
# Number of package errors generated by golint.
lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
# golint complains about every file that's missing the package comment. If the
# number of files for this package is greater than the number of errors, there's
# at least one file with package comment, good. Otherwise, fail.
if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
echo "Package $1 (with ${fileCount} files) is missing package comment"
return 1
fi
}
lint_package_comment() {
set +ex
count=0
for i in $(go list ./...); do
lint_package_comment_per_package "$i"
((count += $?))
done
set -ex
return $count
}
lint_package_comment
echo SUCCESS

22
vendor/modules.txt vendored
View File

@ -116,6 +116,13 @@ github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
# github.com/golang/snappy v0.0.1
github.com/golang/snappy
# github.com/google/go-cmp v0.5.2
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/cmpopts
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
# github.com/google/go-github/v32 v32.1.0
github.com/google/go-github/v32/github
# github.com/google/go-querystring v1.0.0
@ -124,6 +131,8 @@ github.com/google/go-querystring/query
github.com/google/uuid
# github.com/gorilla/mux v1.7.3
github.com/gorilla/mux
# github.com/gosnmp/gosnmp v1.29.0
github.com/gosnmp/gosnmp
# github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-uuid
# github.com/hashicorp/golang-lru v0.5.4
@ -156,6 +165,7 @@ github.com/influxdata/influxdb/pkg/escape
github.com/influxdata/telegraf
github.com/influxdata/telegraf/filter
github.com/influxdata/telegraf/internal
github.com/influxdata/telegraf/internal/snmp
github.com/influxdata/telegraf/metric
github.com/influxdata/telegraf/plugins/common/tls
github.com/influxdata/telegraf/plugins/inputs
@ -167,13 +177,18 @@ github.com/influxdata/telegraf/plugins/inputs/mysql/v1
github.com/influxdata/telegraf/plugins/inputs/mysql/v2
github.com/influxdata/telegraf/plugins/inputs/nginx
github.com/influxdata/telegraf/plugins/inputs/prometheus
github.com/influxdata/telegraf/plugins/inputs/rabbitmq
github.com/influxdata/telegraf/plugins/inputs/redis
github.com/influxdata/telegraf/plugins/inputs/snmp
github.com/influxdata/telegraf/plugins/inputs/tengine
github.com/influxdata/telegraf/plugins/inputs/zookeeper
github.com/influxdata/telegraf/plugins/parsers/json
github.com/influxdata/telegraf/plugins/parsers/prometheus
github.com/influxdata/telegraf/plugins/parsers/prometheus/common
github.com/influxdata/telegraf/selfstat
github.com/influxdata/telegraf/testutil
# github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
github.com/influxdata/wlog
# github.com/jcmturner/gofork v1.0.0
github.com/jcmturner/gofork/encoding/asn1
github.com/jcmturner/gofork/x/crypto/pbkdf2
@ -627,18 +642,18 @@ google.golang.org/genproto/protobuf/api
google.golang.org/genproto/protobuf/field_mask
google.golang.org/genproto/protobuf/ptype
google.golang.org/genproto/protobuf/source_context
# google.golang.org/grpc v1.33.1
# google.golang.org/grpc v1.33.1 => google.golang.org/grpc v1.29.1
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
google.golang.org/grpc/balancer
google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/internal
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/grpclog
@ -648,7 +663,6 @@ google.golang.org/grpc/internal/balancerload
google.golang.org/grpc/internal/binarylog
google.golang.org/grpc/internal/buffer
google.golang.org/grpc/internal/channelz
google.golang.org/grpc/internal/credentials
google.golang.org/grpc/internal/envconfig
google.golang.org/grpc/internal/grpclog
google.golang.org/grpc/internal/grpcrand
@ -656,12 +670,12 @@ google.golang.org/grpc/internal/grpcsync
google.golang.org/grpc/internal/grpcutil
google.golang.org/grpc/internal/resolver/dns
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/serviceconfig
google.golang.org/grpc/internal/status
google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
google.golang.org/grpc/keepalive
google.golang.org/grpc/metadata
google.golang.org/grpc/naming
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
google.golang.org/grpc/resolver/dns