add vendor

This commit is contained in:
710leo 2020-09-26 17:02:52 +08:00
parent ed35ddc388
commit 2d9287805e
1842 changed files with 487204 additions and 78 deletions

5
go.mod
View File

@ -10,8 +10,6 @@ require (
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe
github.com/eapache/go-resiliency v1.2.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/garyburd/redigo v1.6.2
github.com/gin-contrib/pprof v1.3.0
github.com/gin-gonic/gin v1.6.3
@ -19,14 +17,11 @@ require (
github.com/go-sql-driver/mysql v1.5.0
github.com/google/go-cmp v0.5.1 // indirect
github.com/google/uuid v1.1.2
github.com/gorilla/context v1.1.1 // indirect
github.com/gorilla/mux v1.6.2
github.com/hpcloud/tail v1.0.0
github.com/influxdata/influxdb v1.8.0
github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-sqlite3 v1.14.0 // indirect
github.com/onsi/ginkgo v1.7.0 // indirect
github.com/onsi/gomega v1.4.3 // indirect
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad
github.com/pquerna/cachecontrol v0.0.0-20200819021114-67c6ae64274f // indirect
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect

75
go.sum
View File

@ -25,12 +25,9 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
@ -82,7 +79,6 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe h1:VOrqop9SqFzqwZpROEOZpIufuLEUoJ3reNhdOdC9Zzw=
github.com/dgryski/go-tsz v0.0.0-20180227144327-03b7d791f4fe/go.mod h1:ft6P746mYUFQBCsH3OkFBG8FtjLx1XclLMo+9Jh1Yts=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
@ -95,21 +91,15 @@ github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7j
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/garyburd/redigo v1.6.2 h1:yE/pwKCrbLpLpQICzYTeZ7JsTA/C53wFTJHaEtRqniM=
github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0=
github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0=
@ -125,14 +115,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
@ -151,7 +135,6 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@ -168,14 +151,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -189,20 +164,17 @@ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
@ -253,8 +225,6 @@ github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBv
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
@ -264,7 +234,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@ -272,9 +241,8 @@ github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPR
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -284,7 +252,6 @@ github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
@ -313,18 +280,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad h1:GXUy5t8CYdaaEj1lRnE22CbHVY1M5h6Rv4kk0PJQc54=
github.com/open-falcon/rrdlite v0.0.0-20200214140804-bf5829f786ad/go.mod h1:pXROoG0iWVnqq4u2Ii97S0Vt9iCTVypshsl9HXsV6cs=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@ -340,7 +302,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -397,12 +358,9 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -501,7 +459,6 @@ golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -532,13 +489,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo=
golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -546,16 +501,12 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -619,40 +570,26 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE=
@ -679,19 +616,11 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d h1:7Kns6qqhMAQWvGkxYOLSLRZ5hJO0/5pcE5lPGP2fxUw=
k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw=
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8=
xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU=
xorm.io/core v0.7.2/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM=

27
vendor/github.com/Shopify/sarama/.gitignore generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
coverage.txt
profile.out

36
vendor/github.com/Shopify/sarama/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,36 @@
language: go
go:
- 1.9.7
- 1.10.4
- 1.11
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=1.0.0
- KAFKA_VERSION=1.1.0
- KAFKA_VERSION=2.0.0
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install: make install_dependencies
script:
- make test
- make vet
- make errcheck
- if [ "$TRAVIS_GO_VERSION" = "1.11" ]; then make fmt; fi
after_success:
- bash <(curl -s https://codecov.io/bash)
after_script: vagrant/halt_cluster.sh

593
vendor/github.com/Shopify/sarama/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,593 @@
# Changelog
#### Version 1.19.0 (2018-09-27)
New Features:
- Implement a higher-level consumer group
([#1099](https://github.com/Shopify/sarama/pull/1099)).
Improvements:
- Add support for Go 1.11
([#1176](https://github.com/Shopify/sarama/pull/1176)).
Bug Fixes:
- Fix encoding of `MetadataResponse` with version 2 and higher
([#1174](https://github.com/Shopify/sarama/pull/1174)).
- Fix race condition in mock async producer
([#1174](https://github.com/Shopify/sarama/pull/1174)).
#### Version 1.18.0 (2018-09-07)
New Features:
- Make `Partitioner.RequiresConsistency` vary per-message
([#1112](https://github.com/Shopify/sarama/pull/1112)).
- Add customizable partitioner
([#1118](https://github.com/Shopify/sarama/pull/1118)).
- Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
`DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
([#1055](https://github.com/Shopify/sarama/pull/1055)).
Improvements:
- Add support for Kafka 2.0.0
([#1149](https://github.com/Shopify/sarama/pull/1149)).
- Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
([#1123](https://github.com/Shopify/sarama/pull/1123)).
- Simpler offset management
([#1127](https://github.com/Shopify/sarama/pull/1127)).
Bug Fixes:
- Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
([#1110](https://github.com/Shopify/sarama/pull/1110)).
- Fix consumer block when response did not contain all the
expected topic/partition blocks
([#1086](https://github.com/Shopify/sarama/pull/1086)).
- Fix consumer block when response contains only constrol messages
([#1115](https://github.com/Shopify/sarama/pull/1115)).
- Add timeout config for ClusterAdmin requests
([#1142](https://github.com/Shopify/sarama/pull/1142)).
- Add version check when producing message with headers
([#1117](https://github.com/Shopify/sarama/pull/1117)).
- Fix `MetadataRequest` for empty list of topics
([#1132](https://github.com/Shopify/sarama/pull/1132)).
- Fix producer topic metadata on-demand fetch when topic error happens in metadata response
([#1125](https://github.com/Shopify/sarama/pull/1125)).
#### Version 1.17.0 (2018-05-30)
New Features:
- Add support for gzip compression levels
([#1044](https://github.com/Shopify/sarama/pull/1044)).
- Add support for Metadata request/response pairs versions v1 to v5
([#1047](https://github.com/Shopify/sarama/pull/1047),
[#1069](https://github.com/Shopify/sarama/pull/1069)).
- Add versioning to JoinGroup request/response pairs
([#1098](https://github.com/Shopify/sarama/pull/1098))
- Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
([#1065](https://github.com/Shopify/sarama/pull/1065),
[#1096](https://github.com/Shopify/sarama/pull/1096),
[#1027](https://github.com/Shopify/sarama/pull/1027)).
- Add `Controller()` method to Client interface
([#1063](https://github.com/Shopify/sarama/pull/1063)).
Improvements:
- ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
([#1010](https://github.com/Shopify/sarama/pull/1010)).
- Expose missing protocol parts: `msgSet` and `recordBatch`
([#1049](https://github.com/Shopify/sarama/pull/1049)).
- Add support for v1 DeleteTopics Request
([#1052](https://github.com/Shopify/sarama/pull/1052)).
- Add support for Go 1.10
([#1064](https://github.com/Shopify/sarama/pull/1064)).
- Claim support for Kafka 1.1.0
([#1073](https://github.com/Shopify/sarama/pull/1073)).
Bug Fixes:
- Fix FindCoordinatorResponse.encode to allow nil Coordinator
([#1050](https://github.com/Shopify/sarama/pull/1050),
[#1051](https://github.com/Shopify/sarama/pull/1051)).
- Clear all metadata when we have the latest topic info
([#1033](https://github.com/Shopify/sarama/pull/1033)).
- Make `PartitionConsumer.Close` idempotent
([#1092](https://github.com/Shopify/sarama/pull/1092)).
#### Version 1.16.0 (2018-02-12)
New Features:
- Add support for the Create/Delete Topics request/response pairs
([#1007](https://github.com/Shopify/sarama/pull/1007),
[#1008](https://github.com/Shopify/sarama/pull/1008)).
- Add support for the Describe/Create/Delete ACL request/response pairs
([#1009](https://github.com/Shopify/sarama/pull/1009)).
- Add support for the five transaction-related request/response pairs
([#1016](https://github.com/Shopify/sarama/pull/1016)).
Improvements:
- Permit setting version on mock producer responses
([#999](https://github.com/Shopify/sarama/pull/999)).
- Add `NewMockBrokerListener` helper for testing TLS connections
([#1019](https://github.com/Shopify/sarama/pull/1019)).
- Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
which results in much higher throughput in most cases
([#1024](https://github.com/Shopify/sarama/pull/1024)).
- Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
reduce CPU and memory usage when processing many partitions
([#1028](https://github.com/Shopify/sarama/pull/1028)).
- Assign relative offsets to messages in the producer to save the brokers a
recompression pass
([#1002](https://github.com/Shopify/sarama/pull/1002),
[#1015](https://github.com/Shopify/sarama/pull/1015)).
Bug Fixes:
- Fix producing uncompressed batches with the new protocol format
([#1032](https://github.com/Shopify/sarama/issues/1032)).
- Fix consuming compacted topics with the new protocol format
([#1005](https://github.com/Shopify/sarama/issues/1005)).
- Fix consuming topics with a mix of protocol formats
([#1021](https://github.com/Shopify/sarama/issues/1021)).
- Fix consuming when the broker includes multiple batches in a single response
([#1022](https://github.com/Shopify/sarama/issues/1022)).
- Fix detection of `PartialTrailingMessage` when the partial message was
truncated before the magic value indicating its version
([#1030](https://github.com/Shopify/sarama/pull/1030)).
- Fix expectation-checking in the mock of `SyncProducer.SendMessages`
([#1035](https://github.com/Shopify/sarama/pull/1035)).
#### Version 1.15.0 (2017-12-08)
New Features:
- Claim official support for Kafka 1.0, though it did already work
([#984](https://github.com/Shopify/sarama/pull/984)).
- Helper methods for Kafka version numbers to/from strings
([#989](https://github.com/Shopify/sarama/pull/989)).
- Implement CreatePartitions request/response
([#985](https://github.com/Shopify/sarama/pull/985)).
Improvements:
- Add error codes 45-60
([#986](https://github.com/Shopify/sarama/issues/986)).
Bug Fixes:
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
([#982](https://github.com/Shopify/sarama/pull/982)).
- Correctly determine when a FetchResponse contains the new message format
([#990](https://github.com/Shopify/sarama/pull/990)).
- Fix producing with multiple headers
([#996](https://github.com/Shopify/sarama/pull/996)).
- Fix handling of truncated record batches
([#998](https://github.com/Shopify/sarama/pull/998)).
- Fix leaking metrics when closing brokers
([#991](https://github.com/Shopify/sarama/pull/991)).
#### Version 1.14.0 (2017-11-13)
New Features:
- Add support for the new Kafka 0.11 record-batch format, including the wire
protocol and the necessary behavioural changes in the producer and consumer.
Transactions and idempotency are not yet supported, but producing and
consuming should work with all the existing bells and whistles (batching,
compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
of Arista Networks for this work. Part of
([#901](https://github.com/Shopify/sarama/issues/901)).
Bug Fixes:
- Fix encoding of ProduceResponse versions in test
([#970](https://github.com/Shopify/sarama/pull/970)).
- Return partial replicas list when we have it
([#975](https://github.com/Shopify/sarama/pull/975)).
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08)
New Features:
- Added support for the `ApiVersions` request and response pair, and Kafka
version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
that you still need to specify the Kafka version in the Sarama configuration
for the time being.
- Added a `Brokers` method to the Client which returns the complete set of
active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- Added an `InSyncReplicas` method to the Client which returns the set of all
in-sync broker IDs for the given partition, now that the Kafka versions for
which this was misleading are no longer in our supported set
([#872](https://github.com/Shopify/sarama/pull/872)).
- Added a `NewCustomHashPartitioner` method which allows constructing a hash
partitioner with a custom hash method in case the default (FNV-1a) is not
suitable
([#837](https://github.com/Shopify/sarama/pull/837),
[#841](https://github.com/Shopify/sarama/pull/841)).
Improvements:
- Recognize more Kafka error codes
([#859](https://github.com/Shopify/sarama/pull/859)).
Bug Fixes:
- Fix an issue where decoding a malformed FetchRequest would not return the
correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- Respect ordering of group protocols in JoinGroupRequests. This fix is
transparent if you're using the `AddGroupProtocol` or
`AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
([#812](https://github.com/Shopify/sarama/issues/812)).
- Fix an alignment-related issue with atomics on 32-bit architectures
([#859](https://github.com/Shopify/sarama/pull/859)).
#### Version 1.11.0 (2016-12-20)
_Important:_ As of Sarama 1.11 it is necessary to set the config value of
`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
versions would silently override this value when instantiating a SyncProducer
which led to unexpected values and data races.
New Features:
- Metrics! Thanks to Sébastien Launay for all his work on this feature
([#701](https://github.com/Shopify/sarama/pull/701),
[#746](https://github.com/Shopify/sarama/pull/746),
[#766](https://github.com/Shopify/sarama/pull/766)).
- Add support for LZ4 compression
([#786](https://github.com/Shopify/sarama/pull/786)).
- Add support for ListOffsetRequest v1 and Kafka 0.10.1
([#775](https://github.com/Shopify/sarama/pull/775)).
- Added a `HighWaterMarks` method to the Consumer which aggregates the
`HighWaterMarkOffset` values of its child topic/partitions
([#769](https://github.com/Shopify/sarama/pull/769)).
Bug Fixes:
- Fixed producing when using timestamps, compression and Kafka 0.10
([#759](https://github.com/Shopify/sarama/pull/759)).
- Added missing decoder methods to DescribeGroups response
([#756](https://github.com/Shopify/sarama/pull/756)).
- Fix producer shutdown when `Return.Errors` is disabled
([#787](https://github.com/Shopify/sarama/pull/787)).
- Don't mutate configuration in SyncProducer
([#790](https://github.com/Shopify/sarama/pull/790)).
- Fix crash on SASL initialization failure
([#795](https://github.com/Shopify/sarama/pull/795)).
#### Version 1.10.1 (2016-08-30)
Bug Fixes:
- Fix the documentation for `HashPartitioner` which was incorrect
([#717](https://github.com/Shopify/sarama/pull/717)).
- Permit client creation even when it is limited by ACLs
([#722](https://github.com/Shopify/sarama/pull/722)).
- Several fixes to the consumer timer optimization code, regressions introduced
in v1.10.0. Go's timers are finicky
([#730](https://github.com/Shopify/sarama/pull/730),
[#733](https://github.com/Shopify/sarama/pull/733),
[#734](https://github.com/Shopify/sarama/pull/734)).
- Handle consuming compressed relative offsets with Kafka 0.10
([#735](https://github.com/Shopify/sarama/pull/735)).
#### Version 1.10.0 (2016-08-02)
_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
Kafka you are running against (via the `config.Version` value) in order to use
features that may not be compatible with old Kafka versions. If you don't
specify this value it will default to 0.8.2 (the minimum supported), and trying
to use more recent features (like the offset manager) will fail with an error.
_Also:_ The offset-manager's behaviour has been changed to match the upstream
java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
offset-manager, please ensure that you are committing one *greater* than the
last consumed message offset or else you may end up consuming duplicate
messages.
New Features:
- Support for Kafka 0.10
([#672](https://github.com/Shopify/sarama/pull/672),
[#678](https://github.com/Shopify/sarama/pull/678),
[#681](https://github.com/Shopify/sarama/pull/681), and others).
- Support for configuring the target Kafka version
([#676](https://github.com/Shopify/sarama/pull/676)).
- Batch producing support in the SyncProducer
([#677](https://github.com/Shopify/sarama/pull/677)).
- Extend producer mock to allow setting expectations on message contents
([#667](https://github.com/Shopify/sarama/pull/667)).
Improvements:
- Support `nil` compressed messages for deleting in compacted topics
([#634](https://github.com/Shopify/sarama/pull/634)).
- Pre-allocate decoding errors, greatly reducing heap usage and GC time against
misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- Re-use consumer expiry timers, removing one allocation per consumed message
([#707](https://github.com/Shopify/sarama/pull/707)).
Bug Fixes:
- Actually default the client ID to "sarama" like we say we do
([#664](https://github.com/Shopify/sarama/pull/664)).
- Fix a rare issue where `Client.Leader` could return the wrong error
([#685](https://github.com/Shopify/sarama/pull/685)).
- Fix a possible tight loop in the consumer
([#693](https://github.com/Shopify/sarama/pull/693)).
- Match upstream's offset-tracking behaviour
([#705](https://github.com/Shopify/sarama/pull/705)).
- Report UnknownTopicOrPartition errors from the offset manager
([#706](https://github.com/Shopify/sarama/pull/706)).
- Fix possible negative partition value from the HashPartitioner
([#709](https://github.com/Shopify/sarama/pull/709)).
#### Version 1.9.0 (2016-05-16)
New Features:
- Add support for custom offset manager retention durations
([#602](https://github.com/Shopify/sarama/pull/602)).
- Publish low-level mocks to enable testing of third-party producer/consumer
implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- Declare support for Golang 1.6
([#611](https://github.com/Shopify/sarama/pull/611)).
- Support for SASL plain-text auth
([#648](https://github.com/Shopify/sarama/pull/648)).
Improvements:
- Simplified broker locking scheme slightly
([#604](https://github.com/Shopify/sarama/pull/604)).
- Documentation cleanup
([#605](https://github.com/Shopify/sarama/pull/605),
[#621](https://github.com/Shopify/sarama/pull/621),
[#654](https://github.com/Shopify/sarama/pull/654)).
Bug Fixes:
- Fix race condition shutting down the OffsetManager
([#658](https://github.com/Shopify/sarama/pull/658)).
#### Version 1.8.0 (2016-02-01)
New Features:
- Full support for Kafka 0.9:
- All protocol messages and fields
([#586](https://github.com/Shopify/sarama/pull/586),
[#588](https://github.com/Shopify/sarama/pull/588),
[#590](https://github.com/Shopify/sarama/pull/590)).
- Verified that TLS support works
([#581](https://github.com/Shopify/sarama/pull/581)).
- Fixed the OffsetManager compatibility
([#585](https://github.com/Shopify/sarama/pull/585)).
Improvements:
- Optimize for fewer system calls when reading from the network
([#584](https://github.com/Shopify/sarama/pull/584)).
- Automatically retry `InvalidMessage` errors to match upstream behaviour
([#589](https://github.com/Shopify/sarama/pull/589)).
#### Version 1.7.0 (2015-12-11)
New Features:
- Preliminary support for Kafka 0.9
([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
caveats:
- Protocol-layer support is mostly in place
([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
renamed some messages and fields, which we did not in order to preserve API
compatibility.
- The producer and consumer work against 0.9, but the offset manager does
not ([#573](https://github.com/Shopify/sarama/pull/573)).
- TLS support may or may not work
([#581](https://github.com/Shopify/sarama/pull/581)).
Improvements:
- Don't wait for request timeouts on dead brokers, greatly speeding recovery
when the TCP connection is left hanging
([#548](https://github.com/Shopify/sarama/pull/548)).
- Refactored part of the producer. The new version provides a much more elegant
solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
slightly more efficient, and much more precise in calculating batch sizes
when compression is used
([#549](https://github.com/Shopify/sarama/pull/549),
[#550](https://github.com/Shopify/sarama/pull/550),
[#551](https://github.com/Shopify/sarama/pull/551)).
Bug Fixes:
- Fix race condition in consumer test mock
([#553](https://github.com/Shopify/sarama/pull/553)).
#### Version 1.6.1 (2015-09-25)
Bug Fixes:
- Fix panic that could occur if a user-supplied message value failed to encode
([#449](https://github.com/Shopify/sarama/pull/449)).
#### Version 1.6.0 (2015-09-04)
New Features:
- Implementation of a consumer offset manager using the APIs introduced in
Kafka 0.8.2. The API is designed mainly for integration into a future
high-level consumer, not for direct use, although it is *possible* to use it
directly.
([#461](https://github.com/Shopify/sarama/pull/461)).
Improvements:
- CRC32 calculation is much faster on machines with SSE4.2 instructions,
removing a major hotspot from most profiles
([#255](https://github.com/Shopify/sarama/pull/255)).
Bug Fixes:
- Make protocol decoding more robust against some malformed packets generated
by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
[#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
([#528](https://github.com/Shopify/sarama/pull/528)).
- Fix a potential race condition panic in the consumer on shutdown
([#529](https://github.com/Shopify/sarama/pull/529)).
#### Version 1.5.0 (2015-08-17)
New Features:
- TLS-encrypted network connections are now supported. This feature is subject
to change when Kafka releases built-in TLS support, but for now this is
enough to work with TLS-terminating proxies
([#154](https://github.com/Shopify/sarama/pull/154)).
Improvements:
- The consumer will not block if a single partition is not drained by the user;
all other partitions will continue to consume normally
([#485](https://github.com/Shopify/sarama/pull/485)).
- Formatting of error strings has been much improved
([#495](https://github.com/Shopify/sarama/pull/495)).
- Internal refactoring of the producer for code cleanliness and to enable
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
Bug Fixes:
- Fix a potential deadlock in the consumer on shutdown
([#475](https://github.com/Shopify/sarama/pull/475)).
#### Version 1.4.3 (2015-07-21)
Bug Fixes:
- Don't include the partitioner in the producer's "fetch partitions"
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- Don't retry messages until the broker is closed when abandoning a broker in
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- Update the import path for snappy-go, it has moved again and the API has
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
#### Version 1.4.2 (2015-05-27)
Bug Fixes:
- Update the import path for snappy-go, it has moved from google code to github
([#456](https://github.com/Shopify/sarama/pull/456)).
#### Version 1.4.1 (2015-05-25)
Improvements:
- Optimizations when decoding snappy messages, thanks to John Potocny
([#446](https://github.com/Shopify/sarama/pull/446)).
Bug Fixes:
- Fix hypothetical race conditions on producer shutdown
([#450](https://github.com/Shopify/sarama/pull/450),
[#451](https://github.com/Shopify/sarama/pull/451)).
#### Version 1.4.0 (2015-05-01)
New Features:
- The consumer now implements `Topics()` and `Partitions()` methods to enable
users to dynamically choose what topics/partitions to consume without
instantiating a full client
([#431](https://github.com/Shopify/sarama/pull/431)).
- The partition-consumer now exposes the high water mark offset value returned
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- Added a `kafka-console-consumer` tool capable of handling multiple
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
([#439](https://github.com/Shopify/sarama/pull/439),
[#442](https://github.com/Shopify/sarama/pull/442)).
Improvements:
- The producer's logging during retry scenarios is more consistent, more
useful, and slightly less verbose
([#429](https://github.com/Shopify/sarama/pull/429)).
- The client now shuffles its initial list of seed brokers in order to prevent
thundering herd on the first broker in the list
([#441](https://github.com/Shopify/sarama/pull/441)).
Bug Fixes:
- The producer now correctly manages its state if retries occur when it is
shutting down, fixing several instances of confusing behaviour and at least
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- The consumer now handles messages for different partitions asynchronously,
making it much more resilient to specific user code ordering
([#325](https://github.com/Shopify/sarama/pull/325)).
#### Version 1.3.0 (2015-04-16)
New Features:
- The client now tracks consumer group coordinators using
ConsumerMetadataRequests similar to how it tracks partition leadership using
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
This adds two methods to the client API:
- `Coordinator(consumerGroup string) (*Broker, error)`
- `RefreshCoordinator(consumerGroup string) error`
Improvements:
- ConsumerMetadataResponses now automatically create a Broker object out of the
ID/address/port combination for the Coordinator; accessing the fields
individually has been deprecated
([#413](https://github.com/Shopify/sarama/pull/413)).
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
Consumers will fail to start if the provided offset is out of range
([#418](https://github.com/Shopify/sarama/pull/418))
and they will automatically shut down if the offset falls out of range
([#424](https://github.com/Shopify/sarama/pull/424)).
- Small performance improvement in encoding and decoding protocol messages
([#427](https://github.com/Shopify/sarama/pull/427)).
Bug Fixes:
- Fix a rare race condition in the client's background metadata refresher if
it happens to be activated while the client is being closed
([#422](https://github.com/Shopify/sarama/pull/422)).
#### Version 1.2.0 (2015-04-07)
Improvements:
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
([#389](https://github.com/Shopify/sarama/pull/389)).
- The producer is now somewhat more memory-efficient during and after retrying
messages due to an improved queue implementation
([#396](https://github.com/Shopify/sarama/pull/396)).
- The consumer produces much more useful logging output when leadership
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- The client's `GetOffset` method will now automatically refresh metadata and
retry once in the event of stale information or similar
([#394](https://github.com/Shopify/sarama/pull/394)).
- Broker connections now have support for using TCP keepalives
([#407](https://github.com/Shopify/sarama/issues/407)).
Bug Fixes:
- The OffsetCommitRequest message now correctly implements all three possible
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
[#400](https://github.com/Shopify/sarama/pull/400)).
#### Version 1.1.0 (2015-03-20)
Improvements:
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
broken topics don't choke throughput
([#373](https://github.com/Shopify/sarama/pull/373)).
Bug Fixes:
- Fix the producer's internal reference counting in certain unusual scenarios
([#367](https://github.com/Shopify/sarama/pull/367)).
- Fix the consumer's internal reference counting in certain unusual scenarios
([#369](https://github.com/Shopify/sarama/pull/369)).
- Fix a condition where the producer's internal control messages could have
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- Fix an issue where invalid partition lists would be cached when asking for
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
#### Version 1.0.0 (2015-03-17)
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
- All the configuration values have been unified in the `Config` struct.
- Much improved test suite.

20
vendor/github.com/Shopify/sarama/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013 Shopify
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

39
vendor/github.com/Shopify/sarama/README.md generated vendored Normal file
View File

@ -0,0 +1,39 @@
sarama
======
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
### Getting started
- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
- Mocks for testing are available in the [mocks](./mocks) subpackage.
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.8 through 1.11, and Kafka 1.0 through 2.0, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
### Contributing
* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
technical and design details.
* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
contains a wealth of useful information.
* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
* If you have any questions, just ask!

20
vendor/github.com/Shopify/sarama/Vagrantfile generated vendored Normal file
View File

@ -0,0 +1,20 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

119
vendor/github.com/Shopify/sarama/acl_bindings.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package sarama
type Resource struct {
ResourceType AclResourceType
ResourceName string
}
func (r *Resource) encode(pe packetEncoder) error {
pe.putInt8(int8(r.ResourceType))
if err := pe.putString(r.ResourceName); err != nil {
return err
}
return nil
}
func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
r.ResourceType = AclResourceType(resourceType)
if r.ResourceName, err = pd.getString(); err != nil {
return err
}
return nil
}
type Acl struct {
Principal string
Host string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *Acl) encode(pe packetEncoder) error {
if err := pe.putString(a.Principal); err != nil {
return err
}
if err := pe.putString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
if a.Principal, err = pd.getString(); err != nil {
return err
}
if a.Host, err = pd.getString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}
type ResourceAcls struct {
Resource
Acls []*Acl
}
func (r *ResourceAcls) encode(pe packetEncoder) error {
if err := r.Resource.encode(pe); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Acls)); err != nil {
return err
}
for _, acl := range r.Acls {
if err := acl.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
if err := r.Resource.decode(pd, version); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Acls = make([]*Acl, n)
for i := 0; i < n; i++ {
r.Acls[i] = new(Acl)
if err := r.Acls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}

76
vendor/github.com/Shopify/sarama/acl_create_request.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package sarama
type CreateAclsRequest struct {
AclCreations []*AclCreation
}
func (c *CreateAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
return err
}
for _, aclCreation := range c.AclCreations {
if err := aclCreation.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreations = make([]*AclCreation, n)
for i := 0; i < n; i++ {
c.AclCreations[i] = new(AclCreation)
if err := c.AclCreations[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsRequest) key() int16 {
return 30
}
func (d *CreateAclsRequest) version() int16 {
return 0
}
func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreation struct {
Resource
Acl
}
func (a *AclCreation) encode(pe packetEncoder) error {
if err := a.Resource.encode(pe); err != nil {
return err
}
if err := a.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
if err := a.Resource.decode(pd, version); err != nil {
return err
}
if err := a.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,88 @@
package sarama
import "time"
type CreateAclsResponse struct {
ThrottleTime time.Duration
AclCreationResponses []*AclCreationResponse
}
func (c *CreateAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
return err
}
for _, aclCreationResponse := range c.AclCreationResponses {
if err := aclCreationResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreationResponses = make([]*AclCreationResponse, n)
for i := 0; i < n; i++ {
c.AclCreationResponses[i] = new(AclCreationResponse)
if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsResponse) key() int16 {
return 30
}
func (d *CreateAclsResponse) version() int16 {
return 0
}
func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreationResponse struct {
Err KError
ErrMsg *string
}
func (a *AclCreationResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(a.Err))
if err := pe.putNullableString(a.ErrMsg); err != nil {
return err
}
return nil
}
func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
if a.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

48
vendor/github.com/Shopify/sarama/acl_delete_request.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package sarama
type DeleteAclsRequest struct {
Filters []*AclFilter
}
func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Filters)); err != nil {
return err
}
for _, filter := range d.Filters {
if err := filter.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.Filters = make([]*AclFilter, n)
for i := 0; i < n; i++ {
d.Filters[i] = new(AclFilter)
if err := d.Filters[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) key() int16 {
return 31
}
func (d *DeleteAclsRequest) version() int16 {
return 0
}
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

155
vendor/github.com/Shopify/sarama/acl_delete_response.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
package sarama
import "time"
type DeleteAclsResponse struct {
ThrottleTime time.Duration
FilterResponses []*FilterResponse
}
func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
return err
}
for _, filterResponse := range a.FilterResponses {
if err := filterResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.FilterResponses = make([]*FilterResponse, n)
for i := 0; i < n; i++ {
a.FilterResponses[i] = new(FilterResponse)
if err := a.FilterResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) key() int16 {
return 31
}
func (d *DeleteAclsResponse) version() int16 {
return 0
}
func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type FilterResponse struct {
Err KError
ErrMsg *string
MatchingAcls []*MatchingAcl
}
func (f *FilterResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(f.Err))
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
return err
}
for _, matchingAcl := range f.MatchingAcls {
if err := matchingAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(kerr)
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
f.MatchingAcls = make([]*MatchingAcl, n)
for i := 0; i < n; i++ {
f.MatchingAcls[i] = new(MatchingAcl)
if err := f.MatchingAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
type MatchingAcl struct {
Err KError
ErrMsg *string
Resource
Acl
}
func (m *MatchingAcl) encode(pe packetEncoder) error {
pe.putInt16(int16(m.Err))
if err := pe.putNullableString(m.ErrMsg); err != nil {
return err
}
if err := m.Resource.encode(pe); err != nil {
return err
}
if err := m.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
m.Err = KError(kerr)
if m.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
if err := m.Resource.decode(pd, version); err != nil {
return err
}
if err := m.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,25 @@
package sarama
type DescribeAclsRequest struct {
AclFilter
}
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
return d.AclFilter.encode(pe)
}
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
return d.AclFilter.decode(pd, version)
}
func (d *DescribeAclsRequest) key() int16 {
return 29
}
func (d *DescribeAclsRequest) version() int16 {
return 0
}
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,80 @@
package sarama
import "time"
type DescribeAclsResponse struct {
ThrottleTime time.Duration
Err KError
ErrMsg *string
ResourceAcls []*ResourceAcls
}
func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
pe.putInt16(int16(d.Err))
if err := pe.putNullableString(d.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
return err
}
for _, resourceAcl := range d.ResourceAcls {
if err := resourceAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
d.Err = KError(kerr)
errmsg, err := pd.getString()
if err != nil {
return err
}
if errmsg != "" {
d.ErrMsg = &errmsg
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.ResourceAcls = make([]*ResourceAcls, n)
for i := 0; i < n; i++ {
d.ResourceAcls[i] = new(ResourceAcls)
if err := d.ResourceAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) key() int16 {
return 29
}
func (d *DescribeAclsResponse) version() int16 {
return 0
}
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

61
vendor/github.com/Shopify/sarama/acl_filter.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package sarama
type AclFilter struct {
ResourceType AclResourceType
ResourceName *string
Principal *string
Host *string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *AclFilter) encode(pe packetEncoder) error {
pe.putInt8(int8(a.ResourceType))
if err := pe.putNullableString(a.ResourceName); err != nil {
return err
}
if err := pe.putNullableString(a.Principal); err != nil {
return err
}
if err := pe.putNullableString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
a.ResourceType = AclResourceType(resourceType)
if a.ResourceName, err = pd.getNullableString(); err != nil {
return err
}
if a.Principal, err = pd.getNullableString(); err != nil {
return err
}
if a.Host, err = pd.getNullableString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}

42
vendor/github.com/Shopify/sarama/acl_types.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package sarama
type AclOperation int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
const (
AclOperationUnknown AclOperation = 0
AclOperationAny AclOperation = 1
AclOperationAll AclOperation = 2
AclOperationRead AclOperation = 3
AclOperationWrite AclOperation = 4
AclOperationCreate AclOperation = 5
AclOperationDelete AclOperation = 6
AclOperationAlter AclOperation = 7
AclOperationDescribe AclOperation = 8
AclOperationClusterAction AclOperation = 9
AclOperationDescribeConfigs AclOperation = 10
AclOperationAlterConfigs AclOperation = 11
AclOperationIdempotentWrite AclOperation = 12
)
type AclPermissionType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
const (
AclPermissionUnknown AclPermissionType = 0
AclPermissionAny AclPermissionType = 1
AclPermissionDeny AclPermissionType = 2
AclPermissionAllow AclPermissionType = 3
)
type AclResourceType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
const (
AclResourceUnknown AclResourceType = 0
AclResourceAny AclResourceType = 1
AclResourceTopic AclResourceType = 2
AclResourceGroup AclResourceType = 3
AclResourceCluster AclResourceType = 4
AclResourceTransactionalID AclResourceType = 5
)

View File

@ -0,0 +1,52 @@
package sarama
type AddOffsetsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
GroupID string
}
func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putString(a.GroupID); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.GroupID, err = pd.getString(); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) key() int16 {
return 25
}
func (a *AddOffsetsToTxnRequest) version() int16 {
return 0
}
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,44 @@
package sarama
import (
"time"
)
type AddOffsetsToTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
pe.putInt16(int16(a.Err))
return nil
}
func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
return nil
}
func (a *AddOffsetsToTxnResponse) key() int16 {
return 25
}
func (a *AddOffsetsToTxnResponse) version() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,76 @@
package sarama
type AddPartitionsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TopicPartitions map[string][]int32
}
func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
return err
}
for topic, partitions := range a.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.TopicPartitions = make(map[string][]int32)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
a.TopicPartitions[topic] = partitions
}
return nil
}
func (a *AddPartitionsToTxnRequest) key() int16 {
return 24
}
func (a *AddPartitionsToTxnRequest) version() int16 {
return 0
}
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,108 @@
package sarama
import (
"time"
)
type AddPartitionsToTxnResponse struct {
ThrottleTime time.Duration
Errors map[string][]*PartitionError
}
func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Errors)); err != nil {
return err
}
for topic, e := range a.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
a.Errors[topic][j] = new(PartitionError)
if err := a.Errors[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) key() int16 {
return 24
}
func (a *AddPartitionsToTxnResponse) version() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type PartitionError struct {
Partition int32
Err KError
}
func (p *PartitionError) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt16(int16(p.Err))
return nil
}
func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
p.Err = KError(kerr)
return nil
}

382
vendor/github.com/Shopify/sarama/admin.go generated vendored Normal file
View File

@ -0,0 +1,382 @@
package sarama
import "errors"
// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
// Methods with stricter requirements will specify the minimum broker version required.
// You MUST call Close() on a client to avoid leaks
type ClusterAdmin interface {
// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
// It may take several seconds after CreateTopic returns success for all the brokers
// to become aware that the topic has been created. During this time, listTopics
// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
// Delete a topic. It may take several seconds after the DeleteTopic to returns success
// and for all the brokers to become aware that the topics are gone.
// During this time, listTopics may continue to return information about the deleted topic.
// If delete.topic.enable is false on the brokers, deleteTopic will mark
// the topic for deletion, but not actually delete them.
// This operation is supported by brokers with version 0.10.1.0 or higher.
DeleteTopic(topic string) error
// Increase the number of partitions of the topics according to the corresponding values.
// If partitions are increased for a topic that has a key, the partition logic or ordering of
// the messages will be affected. It may take several seconds after this method returns
// success for all the brokers to become aware that the partitions have been created.
// During this time, ClusterAdmin#describeTopics may not return information about the
// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
// Delete records whose offset is smaller than the given offset of the corresponding partition.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteRecords(topic string, partitionOffsets map[int32]int64) error
// Get the configuration for the specified resources.
// The returned configuration includes default values and the Default is true
// can be used to distinguish them from user supplied values.
// Config entries where ReadOnly is true cannot be updated.
// The value of config entries where Sensitive is true is always nil so
// sensitive information is not disclosed.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
// Update the configuration for the specified resources with the default options.
// This operation is supported by brokers with version 0.11.0.0 or higher.
// The resources with their configs (topic is the only resource type with configs
// that can be updated currently Updates are not transactional so they may succeed
// for some resources while fail for others. The configs for a particular resource are updated automatically.
AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
// Creates access control lists (ACLs) which are bound to specific resources.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
CreateACL(resource Resource, acl Acl) error
// Lists access control lists (ACLs) according to the supplied filter.
// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
// This operation is supported by brokers with version 0.11.0.0 or higher.
ListAcls(filter AclFilter) ([]ResourceAcls, error)
// Deletes access control lists (ACLs) according to the supplied filters.
// This operation is not transactional so it may succeed for some ACLs while fail for others.
// This operation is supported by brokers with version 0.11.0.0 or higher.
DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
// Close shuts down the admin and closes underlying client.
Close() error
}
type clusterAdmin struct {
client Client
conf *Config
}
// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
//make sure we can retrieve the controller
_, err = client.Controller()
if err != nil {
return nil, err
}
ca := &clusterAdmin{
client: client,
conf: client.Config(),
}
return ca, nil
}
func (ca *clusterAdmin) Close() error {
return ca.client.Close()
}
func (ca *clusterAdmin) Controller() (*Broker, error) {
return ca.client.Controller()
}
func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
if detail == nil {
return errors.New("You must specify topic details")
}
topicDetails := make(map[string]*TopicDetail)
topicDetails[topic] = detail
request := &CreateTopicsRequest{
TopicDetails: topicDetails,
ValidateOnly: validateOnly,
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
if ca.conf.Version.IsAtLeast(V1_0_0_0) {
request.Version = 2
}
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreateTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
return topicErr.Err
}
return nil
}
func (ca *clusterAdmin) DeleteTopic(topic string) error {
if topic == "" {
return ErrInvalidTopic
}
request := &DeleteTopicsRequest{
Topics: []string{topic},
Timeout: ca.conf.Admin.Timeout,
}
if ca.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 1
}
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.DeleteTopics(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicErrorCodes[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr != ErrNoError {
return topicErr
}
return nil
}
func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
if topic == "" {
return ErrInvalidTopic
}
topicPartitions := make(map[string]*TopicPartition)
topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
request := &CreatePartitionsRequest{
TopicPartitions: topicPartitions,
Timeout: ca.conf.Admin.Timeout,
}
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.CreatePartitions(request)
if err != nil {
return err
}
topicErr, ok := rsp.TopicPartitionErrors[topic]
if !ok {
return ErrIncompleteResponse
}
if topicErr.Err != ErrNoError {
return topicErr.Err
}
return nil
}
func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
if topic == "" {
return ErrInvalidTopic
}
topics := make(map[string]*DeleteRecordsRequestTopic)
topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets}
request := &DeleteRecordsRequest{
Topics: topics,
Timeout: ca.conf.Admin.Timeout,
}
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.DeleteRecords(request)
if err != nil {
return err
}
_, ok := rsp.Topics[topic]
if !ok {
return ErrIncompleteResponse
}
//todo since we are dealing with couple of partitions it would be good if we return slice of errors
//for each partition instead of one error
return nil
}
func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
var entries []ConfigEntry
var resources []*ConfigResource
resources = append(resources, &resource)
request := &DescribeConfigsRequest{
Resources: resources,
}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DescribeConfigs(request)
if err != nil {
return nil, err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == resource.Name {
if rspResource.ErrorMsg != "" {
return nil, errors.New(rspResource.ErrorMsg)
}
for _, cfgEntry := range rspResource.Configs {
entries = append(entries, *cfgEntry)
}
}
}
return entries, nil
}
func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
var resources []*AlterConfigsResource
resources = append(resources, &AlterConfigsResource{
Type: resourceType,
Name: name,
ConfigEntries: entries,
})
request := &AlterConfigsRequest{
Resources: resources,
ValidateOnly: validateOnly,
}
b, err := ca.Controller()
if err != nil {
return err
}
rsp, err := b.AlterConfigs(request)
if err != nil {
return err
}
for _, rspResource := range rsp.Resources {
if rspResource.Name == name {
if rspResource.ErrorMsg != "" {
return errors.New(rspResource.ErrorMsg)
}
}
}
return nil
}
func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
var acls []*AclCreation
acls = append(acls, &AclCreation{resource, acl})
request := &CreateAclsRequest{AclCreations: acls}
b, err := ca.Controller()
if err != nil {
return err
}
_, err = b.CreateAcls(request)
return err
}
func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
request := &DescribeAclsRequest{AclFilter: filter}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DescribeAcls(request)
if err != nil {
return nil, err
}
var lAcls []ResourceAcls
for _, rAcl := range rsp.ResourceAcls {
lAcls = append(lAcls, *rAcl)
}
return lAcls, nil
}
func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
var filters []*AclFilter
filters = append(filters, &filter)
request := &DeleteAclsRequest{Filters: filters}
b, err := ca.Controller()
if err != nil {
return nil, err
}
rsp, err := b.DeleteAcls(request)
if err != nil {
return nil, err
}
var mAcls []MatchingAcl
for _, fr := range rsp.FilterResponses {
for _, mACL := range fr.MatchingAcls {
mAcls = append(mAcls, *mACL)
}
}
return mAcls, nil
}

View File

@ -0,0 +1,120 @@
package sarama
type AlterConfigsRequest struct {
Resources []*AlterConfigsResource
ValidateOnly bool
}
type AlterConfigsResource struct {
Type ConfigResourceType
Name string
ConfigEntries map[string]*string
}
func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(acr.Resources)); err != nil {
return err
}
for _, r := range acr.Resources {
if err := r.encode(pe); err != nil {
return err
}
}
pe.putBool(acr.ValidateOnly)
return nil
}
func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
resourceCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResource, resourceCount)
for i := range acr.Resources {
r := &AlterConfigsResource{}
err = r.decode(pd, version)
if err != nil {
return err
}
acr.Resources[i] = r
}
validateOnly, err := pd.getBool()
if err != nil {
return err
}
acr.ValidateOnly = validateOnly
return nil
}
func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
pe.putInt8(int8(ac.Type))
if err := pe.putString(ac.Name); err != nil {
return err
}
if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range ac.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
t, err := pd.getInt8()
if err != nil {
return err
}
ac.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
ac.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
ac.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return err
}
func (acr *AlterConfigsRequest) key() int16 {
return 33
}
func (acr *AlterConfigsRequest) version() int16 {
return 0
}
func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,95 @@
package sarama
import "time"
type AlterConfigsResponse struct {
ThrottleTime time.Duration
Resources []*AlterConfigsResourceResponse
}
type AlterConfigsResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
}
func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(ct.Resources)); err != nil {
return err
}
for i := range ct.Resources {
pe.putInt16(ct.Resources[i].ErrorCode)
err := pe.putString(ct.Resources[i].ErrorMsg)
if err != nil {
return nil
}
pe.putInt8(int8(ct.Resources[i].Type))
err = pe.putString(ct.Resources[i].Name)
if err != nil {
return nil
}
}
return nil
}
func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
responseCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
for i := range acr.Resources {
acr.Resources[i] = new(AlterConfigsResourceResponse)
errCode, err := pd.getInt16()
if err != nil {
return err
}
acr.Resources[i].ErrorCode = errCode
e, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].ErrorMsg = e
t, err := pd.getInt8()
if err != nil {
return err
}
acr.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].Name = name
}
return nil
}
func (r *AlterConfigsResponse) key() int16 {
return 32
}
func (r *AlterConfigsResponse) version() int16 {
return 0
}
func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,24 @@
package sarama
type ApiVersionsRequest struct {
}
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ApiVersionsRequest) key() int16 {
return 18
}
func (r *ApiVersionsRequest) version() int16 {
return 0
}
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,87 @@
package sarama
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
MaxVersion int16
}
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
pe.putInt16(b.ApiKey)
pe.putInt16(b.MinVersion)
pe.putInt16(b.MaxVersion)
return nil
}
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
var err error
if b.ApiKey, err = pd.getInt16(); err != nil {
return err
}
if b.MinVersion, err = pd.getInt16(); err != nil {
return err
}
if b.MaxVersion, err = pd.getInt16(); err != nil {
return err
}
return nil
}
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
}
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
return err
}
for _, apiVersion := range r.ApiVersions {
if err := apiVersion.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
for i := 0; i < numBlocks; i++ {
block := new(ApiVersionsResponseBlock)
if err := block.decode(pd); err != nil {
return err
}
r.ApiVersions[i] = block
}
return nil
}
func (r *ApiVersionsResponse) key() int16 {
return 18
}
func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

932
vendor/github.com/Shopify/sarama/async_producer.go generated vendored Normal file
View File

@ -0,0 +1,932 @@
package sarama
import (
"encoding/binary"
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
// when both the Errors and Successes channels have been closed. When calling
// AsyncClose, you *must* continue to read from those channels in order to
// drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
// Input is the input channel for the user to write messages to that they
// wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this
// channel or the Producer will deadlock when the channel is full. Alternatively,
// you can set Producer.Return.Errors in your config to false, which prevents
// errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
fin // final message from partitionProducer to brokerProducer and back
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
// The partitioning key for this message. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Key Encoder
// The actual message to store in Kafka. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Value Encoder
// The headers are key-value pairs that are transparently passed
// by Kafka between producers and consumers.
Headers []RecordHeader
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
// pass-through data.
Metadata interface{}
// Below this point are filled in by the producer as the message is processed
// Offset is the offset of the message stored on the broker. This is only
// guaranteed to be defined if the message was successfully delivered and
// RequiredAcks is not NoResponse.
Offset int64
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp is the timestamp assigned to the message by the broker. This
// is only guaranteed to be defined if the message was successfully
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
// least version 0.10.0.
Timestamp time.Time
retries int
flags flagSet
expectation chan *ProducerError
}
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize(version int) int {
var size int
if version >= 2 {
size = maximumRecordOverhead
for _, h := range m.Headers {
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
}
} else {
size = producerMessageOverhead
}
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
} else {
<-p.errors
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
version := 1
if p.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
} else if msg.Headers != nil {
p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
continue
}
if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
var requiresConsistency = false
if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
requiresConsistency = ep.MessageRequiresConsistency(msg)
} else {
requiresConsistency = tp.partitioner.RequiresConsistency()
}
if requiresConsistency {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
if msg.flags&fin == fin {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&fin == fin {
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a fin so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
return nil
})
}
// one per broker; also constructs an associated flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
var (
input = make(chan *ProducerMessage)
bridge = make(chan *produceSet)
responses = make(chan *brokerProducerResponse)
)
bp := &brokerProducer{
parent: p,
broker: broker,
input: input,
output: bridge,
responses: responses,
buffer: newProduceSet(p),
currentRetries: make(map[string]map[int32]error),
}
go withRecover(bp.run)
// minimal bridge to make the network response `select`able
go withRecover(func() {
for set := range bridge {
request := set.buildRequest()
response, err := broker.Produce(request)
responses <- &brokerProducerResponse{
set: set,
err: err,
res: response,
}
}
close(responses)
})
return input
}
type brokerProducerResponse struct {
set *produceSet
err error
res *ProduceResponse
}
// groups messages together into appropriately-sized batches for sending to the broker
// handles state related to retries etc
type brokerProducer struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- *produceSet
responses <-chan *brokerProducerResponse
buffer *produceSet
timer <-chan time.Time
timerFired bool
closing error
currentRetries map[string]map[int32]error
}
func (bp *brokerProducer) run() {
var output chan<- *produceSet
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
for {
select {
case msg := <-bp.input:
if msg == nil {
bp.shutdown()
return
}
if msg.flags&syn == syn {
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
if bp.currentRetries[msg.Topic] == nil {
bp.currentRetries[msg.Topic] = make(map[int32]error)
}
bp.currentRetries[msg.Topic][msg.Partition] = nil
bp.parent.inFlight.Done()
continue
}
if reason := bp.needsRetry(msg); reason != nil {
bp.parent.retryMessage(msg, reason)
if bp.closing == nil && msg.flags&fin == fin {
// we were retrying this partition but we can start processing again
delete(bp.currentRetries[msg.Topic], msg.Partition)
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
}
continue
}
if bp.buffer.wouldOverflow(msg) {
if err := bp.waitForSpace(msg); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if err := bp.buffer.add(msg); err != nil {
bp.parent.returnError(msg, err)
continue
}
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
}
case <-bp.timer:
bp.timerFired = true
case output <- bp.buffer:
bp.rollOver()
case response := <-bp.responses:
bp.handleResponse(response)
}
if bp.timerFired || bp.buffer.readyToFlush() {
output = bp.output
} else {
output = nil
}
}
}
func (bp *brokerProducer) shutdown() {
for !bp.buffer.empty() {
select {
case response := <-bp.responses:
bp.handleResponse(response)
case bp.output <- bp.buffer:
bp.rollOver()
}
}
close(bp.output)
for response := range bp.responses {
bp.handleResponse(response)
}
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
}
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
if bp.closing != nil {
return bp.closing
}
return bp.currentRetries[msg.Topic][msg.Partition]
}
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
for {
select {
case response := <-bp.responses:
bp.handleResponse(response)
// handling a response can change our state, so re-check some things
if reason := bp.needsRetry(msg); reason != nil {
return reason
} else if !bp.buffer.wouldOverflow(msg) {
return nil
}
case bp.output <- bp.buffer:
bp.rollOver()
return nil
}
}
}
func (bp *brokerProducer) rollOver() {
bp.timer = nil
bp.timerFired = false
bp.buffer = newProduceSet(bp.parent)
}
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
if response.err != nil {
bp.handleError(response.set, response.err)
} else {
bp.handleSuccess(response.set, response.res)
}
if bp.buffer.empty() {
bp.rollOver() // this can happen if the response invalidated our buffer
}
}
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
bp.parent.returnSuccesses(msgs)
return
}
block := response.GetBlock(topic, partition)
if block == nil {
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
return
}
switch block.Err {
// Success
case ErrNoError:
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
for _, msg := range msgs {
msg.Timestamp = block.Timestamp
}
}
for i, msg := range msgs {
msg.Offset = block.Offset + int64(i)
}
bp.parent.returnSuccesses(msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
bp.currentRetries[topic][partition] = block.Err
bp.parent.retryMessages(msgs, block.Err)
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
// Other non-retriable errors
default:
bp.parent.returnErrors(msgs, block.Err)
}
})
}
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
switch err.(type) {
case PacketEncodingError:
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.returnErrors(msgs, err)
})
default:
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_ = bp.broker.Close()
bp.closing = err
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.rollOver()
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.returnError(msg, err)
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.retryMessage(msg, err)
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

129
vendor/github.com/Shopify/sarama/balance_strategy.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
package sarama
import (
"math"
"sort"
)
// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
// It contains an allocation of topic/partitions by memberID in the form of
// a `memberID -> topic -> partitions` map.
type BalanceStrategyPlan map[string]map[string][]int32
// Add assigns a topic with a number partitions to a member.
func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
if len(partitions) == 0 {
return
}
if _, ok := p[memberID]; !ok {
p[memberID] = make(map[string][]int32, 1)
}
p[memberID][topic] = append(p[memberID][topic], partitions...)
}
// --------------------------------------------------------------------
// BalanceStrategy is used to balance topics and partitions
// across memebers of a consumer group
type BalanceStrategy interface {
// Name uniquely identifies the strategy.
Name() string
// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
// and returns a distribution plan.
Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
}
// --------------------------------------------------------------------
// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
// Example with one topic T with six partitions (0..5) and two members (M1, M2):
// M1: {T: [0, 1, 2]}
// M2: {T: [3, 4, 5]}
var BalanceStrategyRange = &balanceStrategy{
name: "range",
coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
step := float64(len(partitions)) / float64(len(memberIDs))
for i, memberID := range memberIDs {
pos := float64(i)
min := int(math.Floor(pos*step + 0.5))
max := int(math.Floor((pos+1)*step + 0.5))
plan.Add(memberID, topic, partitions[min:max]...)
}
},
}
// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
// Example with topic T with six partitions (0..5) and two members (M1, M2):
// M1: {T: [0, 2, 4]}
// M2: {T: [1, 3, 5]}
var BalanceStrategyRoundRobin = &balanceStrategy{
name: "roundrobin",
coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
for i, part := range partitions {
memberID := memberIDs[i%len(memberIDs)]
plan.Add(memberID, topic, part)
}
},
}
// --------------------------------------------------------------------
type balanceStrategy struct {
name string
coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
}
// Name implements BalanceStrategy.
func (s *balanceStrategy) Name() string { return s.name }
// Balance implements BalanceStrategy.
func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
// Build members by topic map
mbt := make(map[string][]string)
for memberID, meta := range members {
for _, topic := range meta.Topics {
mbt[topic] = append(mbt[topic], memberID)
}
}
// Sort members for each topic
for topic, memberIDs := range mbt {
sort.Sort(&balanceStrategySortable{
topic: topic,
memberIDs: memberIDs,
})
}
// Assemble plan
plan := make(BalanceStrategyPlan, len(members))
for topic, memberIDs := range mbt {
s.coreFn(plan, memberIDs, topic, topics[topic])
}
return plan, nil
}
type balanceStrategySortable struct {
topic string
memberIDs []string
}
func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
func (p balanceStrategySortable) Swap(i, j int) {
p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
}
func (p balanceStrategySortable) Less(i, j int) bool {
return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
}
func balanceStrategyHashValue(vv ...string) uint32 {
h := uint32(2166136261)
for _, s := range vv {
for _, c := range s {
h ^= uint32(c)
h *= 16777619
}
}
return h
}

884
vendor/github.com/Shopify/sarama/broker.go generated vendored Normal file
View File

@ -0,0 +1,884 @@
package sarama
import (
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
type Broker struct {
id int32
addr string
rack *string
conf *Config
correlationID int32
conn net.Conn
connErr error
lock sync.Mutex
opened int32
responses chan responsePromise
done chan bool
incomingByteRate metrics.Meter
requestRate metrics.Meter
requestSize metrics.Histogram
requestLatency metrics.Histogram
outgoingByteRate metrics.Meter
responseRate metrics.Meter
responseSize metrics.Histogram
brokerIncomingByteRate metrics.Meter
brokerRequestRate metrics.Meter
brokerRequestSize metrics.Histogram
brokerRequestLatency metrics.Histogram
brokerOutgoingByteRate metrics.Meter
brokerResponseRate metrics.Meter
brokerResponseSize metrics.Histogram
}
type responsePromise struct {
requestTime time.Time
correlationID int32
packets chan []byte
errors chan error
}
// NewBroker creates and returns a Broker targeting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr}
}
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
// waiting for the connection to complete. This means that any subsequent operations on the broker will
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
func (b *Broker) Open(conf *Config) error {
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
return ErrAlreadyConnected
}
if conf == nil {
conf = NewConfig()
}
err := conf.Validate()
if err != nil {
return err
}
b.lock.Lock()
go withRecover(func() {
defer b.lock.Unlock()
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
LocalAddr: conf.Net.LocalAddr,
}
if conf.Net.TLS.Enable {
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
} else {
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
}
if b.connErr != nil {
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
b.conn = newBufConn(b.conn)
b.conf = conf
// Create or reuse the global metrics shared between brokers
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
// the same id (-1) and are already exposed through the global metrics above
if b.id >= 0 {
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
}
if conf.Net.SASL.Enable {
b.connErr = b.sendAndReceiveSASLPlainAuth()
if b.connErr != nil {
err = b.conn.Close()
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
}
b.done = make(chan bool)
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
if b.id >= 0 {
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
} else {
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
}
go withRecover(b.responseReceiver)
})
return nil
}
// Connected returns true if the broker is connected and false otherwise. If the broker is not
// connected but it had tried to connect, the error from that connection attempt is also returned.
func (b *Broker) Connected() (bool, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.conn != nil, b.connErr
}
func (b *Broker) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
return ErrNotConnected
}
close(b.responses)
<-b.done
err := b.conn.Close()
b.conn = nil
b.connErr = nil
b.done = nil
b.responses = nil
if b.id >= 0 {
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
}
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
atomic.StoreInt32(&b.opened, 0)
return err
}
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
func (b *Broker) ID() int32 {
return b.id
}
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
func (b *Broker) Addr() string {
return b.addr
}
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
response := new(MetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
response := new(ConsumerMetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
response := new(FindCoordinatorResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
response := new(OffsetResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
var response *ProduceResponse
var err error
if request.RequiredAcks == NoResponse {
err = b.sendAndReceive(request, nil)
} else {
response = new(ProduceResponse)
err = b.sendAndReceive(request, response)
}
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
response := new(FetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
response := new(OffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
response := new(OffsetFetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
response := new(JoinGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
response := new(SyncGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
response := new(LeaveGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
response := new(HeartbeatResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
response := new(ListGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
response := new(DescribeGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
response := new(CreateTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
response := new(DeleteTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
response := new(CreatePartitionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
response := new(DeleteRecordsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
response := new(DescribeAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
response := new(CreateAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
response := new(DeleteAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
response := new(InitProducerIDResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
response := new(AddPartitionsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
response := new(AddOffsetsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
response := new(EndTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
response := new(TxnOffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
response := new(DescribeConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
response := new(AlterConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
response := new(DeleteGroupsResponse)
if err := b.sendAndReceive(request, response); err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
if b.connErr != nil {
return nil, b.connErr
}
return nil, ErrNotConnected
}
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
return nil, ErrUnsupportedVersion
}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return nil, err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return nil, err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
return nil, err
}
b.correlationID++
if !promiseResponse {
// Record request latency without the response
b.updateRequestLatencyMetrics(time.Since(requestTime))
return nil, nil
}
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
b.responses <- promise
return &promise, nil
}
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
promise, err := b.send(req, res != nil)
if err != nil {
return err
}
if promise == nil {
return nil
}
select {
case buf := <-promise.packets:
return versionedDecode(buf, res, req.version())
case err = <-promise.errors:
return err
}
}
func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
b.id, err = pd.getInt32()
if err != nil {
return err
}
host, err := pd.getString()
if err != nil {
return err
}
port, err := pd.getInt32()
if err != nil {
return err
}
if version >= 1 {
b.rack, err = pd.getNullableString()
if err != nil {
return err
}
}
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
if _, _, err := net.SplitHostPort(b.addr); err != nil {
return err
}
return nil
}
func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
host, portstr, err := net.SplitHostPort(b.addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portstr)
if err != nil {
return err
}
pe.putInt32(b.id)
err = pe.putString(host)
if err != nil {
return err
}
pe.putInt32(int32(port))
if version >= 1 {
err = pe.putNullableString(b.rack)
if err != nil {
return err
}
}
return nil
}
func (b *Broker) responseReceiver() {
var dead error
header := make([]byte, 8)
for response := range b.responses {
if dead != nil {
response.errors <- dead
continue
}
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
if err != nil {
dead = err
response.errors <- err
continue
}
bytesReadHeader, err := io.ReadFull(b.conn, header)
requestLatency := time.Since(response.requestTime)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
decodedHeader := responseHeader{}
err = decode(header, &decodedHeader)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
if decodedHeader.correlationID != response.correlationID {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
// TODO if decoded ID < cur ID, discard until we catch up
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
response.errors <- dead
continue
}
buf := make([]byte, decodedHeader.length-4)
bytesReadBody, err := io.ReadFull(b.conn, buf)
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
if err != nil {
dead = err
response.errors <- err
continue
}
response.packets <- buf
}
close(b.done)
}
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
rb := &SaslHandshakeRequest{"PLAIN"}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
return err
}
b.correlationID++
//wait for the response
header := make([]byte, 8) // response header
_, err = io.ReadFull(b.conn, header)
if err != nil {
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
return err
}
length := binary.BigEndian.Uint32(header[:4])
payload := make([]byte, length-4)
n, err := io.ReadFull(b.conn, payload)
if err != nil {
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
return err
}
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
res := &SaslHandshakeResponse{}
err = versionedDecode(payload, res, 0)
if err != nil {
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
return err
}
if res.Err != ErrNoError {
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
return res.Err
}
Logger.Print("Successful SASL handshake")
return nil
}
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
//
// In SASL Plain, Kafka expects the auth header to be in the following format
// Message format (from https://tools.ietf.org/html/rfc4616):
//
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
// authcid = 1*SAFE ; MUST accept up to 255 octets
// authzid = 1*SAFE ; MUST accept up to 255 octets
// passwd = 1*SAFE ; MUST accept up to 255 octets
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
//
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
// ;; any UTF-8 encoded Unicode character except NUL
//
// When credentials are valid, Kafka returns a 4 byte array of null characters.
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
// of responding to bad credentials but thats how its being done today.
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
if b.conf.Net.SASL.Handshake {
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
if handshakeErr != nil {
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
return handshakeErr
}
}
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
authBytes := make([]byte, length+4) //4 byte length header + auth data
binary.BigEndian.PutUint32(authBytes, uint32(length))
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
return err
}
requestTime := time.Now()
bytesWritten, err := b.conn.Write(authBytes)
b.updateOutgoingCommunicationMetrics(bytesWritten)
if err != nil {
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
return err
}
header := make([]byte, 4)
n, err := io.ReadFull(b.conn, header)
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
// If the credentials are valid, we would get a 4 byte response filled with null characters.
// Otherwise, the broker closes the connection and we get an EOF
if err != nil {
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
return nil
}
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
b.updateRequestLatencyMetrics(requestLatency)
b.responseRate.Mark(1)
if b.brokerResponseRate != nil {
b.brokerResponseRate.Mark(1)
}
responseSize := int64(bytes)
b.incomingByteRate.Mark(responseSize)
if b.brokerIncomingByteRate != nil {
b.brokerIncomingByteRate.Mark(responseSize)
}
b.responseSize.Update(responseSize)
if b.brokerResponseSize != nil {
b.brokerResponseSize.Update(responseSize)
}
}
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
requestLatencyInMs := int64(requestLatency / time.Millisecond)
b.requestLatency.Update(requestLatencyInMs)
if b.brokerRequestLatency != nil {
b.brokerRequestLatency.Update(requestLatencyInMs)
}
}
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
b.requestRate.Mark(1)
if b.brokerRequestRate != nil {
b.brokerRequestRate.Mark(1)
}
requestSize := int64(bytes)
b.outgoingByteRate.Mark(requestSize)
if b.brokerOutgoingByteRate != nil {
b.brokerOutgoingByteRate.Mark(requestSize)
}
b.requestSize.Update(requestSize)
if b.brokerRequestSize != nil {
b.brokerRequestSize.Update(requestSize)
}
}

876
vendor/github.com/Shopify/sarama/client.go generated vendored Normal file
View File

@ -0,0 +1,876 @@
package sarama
import (
"math/rand"
"sort"
"sync"
"time"
)
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
// automatically when it passes out of scope. It is safe to share a client amongst many
// users, however Kafka will process requests from a single client strictly in serial,
// so it is generally more efficient to use the default one client per producer/consumer.
type Client interface {
// Config returns the Config struct of the client. This struct should not be
// altered after it has been created.
Config() *Config
// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
Controller() (*Broker, error)
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
// Topics returns the set of available topics as retrieved from cluster metadata.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
Partitions(topic string) ([]int32, error)
// WritablePartitions returns the sorted list of all writable partition IDs for
// the given topic, where "writable" means "having a valid leader accepting
// writes".
WritablePartitions(topic string) ([]int32, error)
// Leader returns the broker object that is the leader of the current
// topic/partition, as determined by querying the cluster metadata.
Leader(topic string, partitionID int32) (*Broker, error)
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time (in milliseconds) on the topic/partition combination.
// Time should be OffsetOldest for the earliest available offset,
// OffsetNewest for the offset of the message that will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
// return a locally cached value if it's available. You can call
// RefreshCoordinator to update the cached value. This function only works on
// Kafka 0.8.2 and higher.
Coordinator(consumerGroup string) (*Broker, error)
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
// in local cache. This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// Close shuts down all broker connections managed by this client. It is required
// to call this function before a client object passes out of scope, as it will
// otherwise leak memory. You must close any Producers or Consumers using a client
// before you close the client.
Close() error
// Closed returns true if the client has already had Close called on it
Closed() bool
}
const (
// OffsetNewest stands for the log head offset, i.e. the offset that will be
// assigned to the next message that will be produced to the partition. You
// can send this to a client's GetOffset method to get this offset, or when
// calling ConsumePartition to start consuming new messages.
OffsetNewest int64 = -1
// OffsetOldest stands for the oldest offset available on the broker for a
// partition. You can send this to a client's GetOffset method to get this
// offset, or when calling ConsumePartition to start consuming from the
// oldest offset that is still available on the broker.
OffsetOldest int64 = -2
)
type client struct {
conf *Config
closer, closed chan none // for shutting down background metadata updater
// the broker addresses given to us through the constructor are not guaranteed to be returned in
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
// so we store them separately
seedBrokers []*Broker
deadSeeds []*Broker
controllerID int32 // cluster controller broker id
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
metadataTopics map[string]none // topics that need to collect metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
cachedPartitionsResults map[string][maxPartitionIndex][]int32
lock sync.RWMutex // protects access to the maps that hold cluster state.
}
// NewClient creates a new Client. It connects to one of the given broker addresses
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
// be retrieved from any of the given broker addresses, the client is not created.
func NewClient(addrs []string, conf *Config) (Client, error) {
Logger.Println("Initializing new client")
if conf == nil {
conf = NewConfig()
}
if err := conf.Validate(); err != nil {
return nil, err
}
if len(addrs) < 1 {
return nil, ConfigurationError("You must provide at least one broker address")
}
client := &client{
conf: conf,
closer: make(chan none),
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
metadataTopics: make(map[string]none),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, index := range random.Perm(len(addrs)) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
}
go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client")
return client, nil
}
func (client *client) Config() *Config {
return client.conf
}
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
// so we go ahead and log the event in this case.
Logger.Printf("Close() called on already closed client")
return ErrClosedClient
}
// shutdown and wait for the background thread before we take the lock, to avoid races
close(client.closer)
<-client.closed
client.lock.Lock()
defer client.lock.Unlock()
Logger.Println("Closing Client")
for _, broker := range client.brokers {
safeAsyncClose(broker)
}
for _, broker := range client.seedBrokers {
safeAsyncClose(broker)
}
client.brokers = nil
client.metadata = nil
client.metadataTopics = nil
return nil
}
func (client *client) Closed() bool {
return client.brokers == nil
}
func (client *client) Topics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadata))
for topic := range client.metadata {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) MetadataTopics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadataTopics))
for topic := range client.metadataTopics {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, allPartitions)
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) WritablePartitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, writablePartitions)
// len==0 catches when it's nil (no such topic) and the odd case when every single
// partition is undergoing leader election simultaneously. Callers have to be able to handle
// this function returning an empty slice (which is a valid return value) but catching it
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
// a metadata refresh as a nicety so callers can just try again and don't have to manually
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, writablePartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return dupInt32Slice(metadata.Replicas), metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return dupInt32Slice(metadata.Isr), metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
leader, err := client.cachedLeader(topic, partitionID)
if leader == nil {
err = client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
leader, err = client.cachedLeader(topic, partitionID)
}
return leader, err
}
func (client *client) RefreshMetadata(topics ...string) error {
if client.Closed() {
return ErrClosedClient
}
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
// error. This handles the case by returning an error instead of sending it
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
for _, topic := range topics {
if len(topic) == 0 {
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
if client.Closed() {
return -1, ErrClosedClient
}
offset, err := client.getOffset(topic, partitionID, time)
if err != nil {
if err := client.RefreshMetadata(topic); err != nil {
return -1, err
}
return client.getOffset(topic, partitionID, time)
}
return offset, err
}
func (client *client) Controller() (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
if !client.conf.Version.IsAtLeast(V0_10_0_0) {
return nil, ErrUnsupportedVersion
}
controller := client.cachedController()
if controller == nil {
if err := client.refreshMetadata(); err != nil {
return nil, err
}
controller = client.cachedController()
}
if controller == nil {
return nil, ErrControllerNotAvailable
}
_ = controller.Open(client.conf)
return controller, nil
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
coordinator := client.cachedCoordinator(consumerGroup)
if coordinator == nil {
if err := client.RefreshCoordinator(consumerGroup); err != nil {
return nil, err
}
coordinator = client.cachedCoordinator(consumerGroup)
}
if coordinator == nil {
return nil, ErrConsumerCoordinatorNotAvailable
}
_ = coordinator.Open(client.conf)
return coordinator, nil
}
func (client *client) RefreshCoordinator(consumerGroup string) error {
if client.Closed() {
return ErrClosedClient
}
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
if err != nil {
return err
}
client.lock.Lock()
defer client.lock.Unlock()
client.registerBroker(response.Coordinator)
client.coordinators[consumerGroup] = response.Coordinator.ID()
return nil
}
// private broker management helpers
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
// deregisterBroker removes a broker from the seedsBroker list, and if it's
// not the seedbroker, removes it from brokers map completely.
func (client *client) deregisterBroker(broker *Broker) {
client.lock.Lock()
defer client.lock.Unlock()
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
client.deadSeeds = append(client.deadSeeds, broker)
client.seedBrokers = client.seedBrokers[1:]
} else {
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
// but we really shouldn't have to; once that loop is made better this case can be
// removed, and the function generally can be renamed from `deregisterBroker` to
// `nextSeedBroker` or something
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
delete(client.brokers, broker.ID())
}
}
func (client *client) resurrectDeadBrokers() {
client.lock.Lock()
defer client.lock.Unlock()
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
client.deadSeeds = nil
}
func (client *client) any() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if len(client.seedBrokers) > 0 {
_ = client.seedBrokers[0].Open(client.conf)
return client.seedBrokers[0]
}
// not guaranteed to be random *or* deterministic
for _, broker := range client.brokers {
_ = broker.Open(client.conf)
return broker
}
return nil
}
// private caching/lazy metadata helpers
type partitionType int
const (
allPartitions partitionType = iota
writablePartitions
// If you add any more types, update the partition cache in update()
// Ensure this is the last partition type value
maxPartitionIndex
)
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
return partitions[partitionID]
}
return nil
}
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
client.lock.RLock()
defer client.lock.RUnlock()
partitions, exists := client.cachedPartitionsResults[topic]
if !exists {
return nil
}
return partitions[partitionSet]
}
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
partitions := client.metadata[topic]
if partitions == nil {
return nil
}
ret := make([]int32, 0, len(partitions))
for _, partition := range partitions {
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
continue
}
ret = append(ret, partition.ID)
}
sort.Sort(int32Slice(ret))
return ret
}
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
metadata, ok := partitions[partitionID]
if ok {
if metadata.Err == ErrLeaderNotAvailable {
return nil, ErrLeaderNotAvailable
}
b := client.brokers[metadata.Leader]
if b == nil {
return nil, ErrLeaderNotAvailable
}
_ = b.Open(client.conf)
return b, nil
}
}
return nil, ErrUnknownTopicOrPartition
}
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
broker, err := client.Leader(topic, partitionID)
if err != nil {
return -1, err
}
request := &OffsetRequest{}
if client.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 1
}
request.AddBlock(topic, partitionID, time, 1)
response, err := broker.GetAvailableOffsets(request)
if err != nil {
_ = broker.Close()
return -1, err
}
block := response.GetBlock(topic, partitionID)
if block == nil {
_ = broker.Close()
return -1, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return -1, block.Err
}
if len(block.Offsets) != 1 {
return -1, ErrOffsetOutOfRange
}
return block.Offsets[0], nil
}
// core metadata update logic
func (client *client) backgroundMetadataUpdater() {
defer close(client.closed)
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
return
}
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := client.refreshMetadata(); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
return
}
}
}
func (client *client) refreshMetadata() error {
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.MetadataTopics(); err != nil {
return err
} else if len(specificTopics) == 0 {
return ErrNoTopicsToUpdateMetadata
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
return err
}
return nil
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
req := &MetadataRequest{Topics: topics}
if client.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 1
}
response, err := broker.GetMetadata(req)
switch err.(type) {
case nil:
allKnownMetaData := len(topics) == 0
// valid response, use it
shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
}
return err
case PacketEncodingError:
// didn't even send, return the error
return err
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
client.controllerID = data.ControllerID
if allKnownMetaData {
client.metadata = make(map[string]map[int32]*PartitionMetadata)
client.metadataTopics = make(map[string]none)
client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
}
for _, topic := range data.Topics {
// topics must be added firstly to `metadataTopics` to guarantee that all
// requested topics must be recorded to keep them trackable for periodically
// metadata refresh.
if _, exists := client.metadataTopics[topic.Name]; !exists {
client.metadataTopics[topic.Name] = none{}
}
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
err = topic.Err
continue
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
err = topic.Err
retry = true
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
continue
}
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
for _, partition := range topic.Partitions {
client.metadata[topic.Name][partition.ID] = partition
if partition.Err == ErrLeaderNotAvailable {
retry = true
}
}
var partitionCache [maxPartitionIndex][]int32
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
client.cachedPartitionsResults[topic.Name] = partitionCache
}
return
}
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
return client.brokers[coordinatorID]
}
return nil
}
func (client *client) cachedController() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
return client.brokers[client.controllerID]
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
retry := func(err error) (*FindCoordinatorResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
}
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
request := new(FindCoordinatorRequest)
request.CoordinatorKey = consumerGroup
request.CoordinatorType = CoordinatorGroup
response, err := broker.FindCoordinator(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
switch err.(type) {
case PacketEncodingError:
return nil, err
default:
_ = broker.Close()
client.deregisterBroker(broker)
continue
}
}
switch response.Err {
case ErrNoError:
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
return response, nil
case ErrConsumerCoordinatorNotAvailable:
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
// This is very ugly, but this scenario will only happen once per cluster.
// The __consumer_offsets topic only has to be created one time.
// The number of partitions not configurable, but partition 0 should always exist.
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
time.Sleep(2 * time.Second)
}
return retry(ErrConsumerCoordinatorNotAvailable)
default:
return nil, response.Err
}
}
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}

563
vendor/github.com/Shopify/sarama/config.go generated vendored Normal file
View File

@ -0,0 +1,563 @@
package sarama
import (
"compress/gzip"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
)
const defaultClientID = "sarama"
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
Admin struct {
// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
Timeout time.Duration
}
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
// How many outstanding requests a connection is allowed to have before
// sending on it blocks (default 5).
MaxOpenRequests int
// All three of the below configurations are similar to the
// `socket.timeout.ms` setting in JVM kafka. All of them default
// to 30 seconds.
DialTimeout time.Duration // How long to wait for the initial connection.
ReadTimeout time.Duration // How long to wait for a response.
WriteTimeout time.Duration // How long to wait for a transmit.
TLS struct {
// Whether or not to use TLS when connecting to the broker
// (defaults to false).
Enable bool
// The TLS configuration to use for secure connections if
// enabled (defaults to nil).
Config *tls.Config
}
// SASL based authentication with broker. While there are multiple SASL authentication methods
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
SASL struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
//username and password for SASL/PLAIN authentication
User string
Password string
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
KeepAlive time.Duration
// LocalAddr is the local address to use when dialing an
// address. The address must be of a compatible type for the
// network being dialed.
// If nil, a local address is automatically chosen.
LocalAddr net.Addr
}
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
// The total number of times to retry a metadata request when the
// cluster is in the middle of a leader election (default 3).
Max int
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
}
// Producer is the namespace for configuration related to producing messages,
// used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be
// set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
// JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of
// RequiredAcks (defaults to 10 seconds). This is only relevant when
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
// millisecond resolution, nanoseconds will be truncated. Equivalent to
// the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// The level of compression to use on messages. The meaning depends
// on the actual compression type used and defaults to default compression
// level for the codec.
CompressionLevel int
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the
// Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and
// sent to the broker. By default, messages are sent as fast as possible, and
// all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
// The best-effort number of bytes needed to trigger a flush. Use the
// global sarama.MaxRequestSize to set a hard upper limit.
Bytes int
// The best-effort number of messages needed to trigger a flush. Use
// `MaxMessages` to set a hard upper limit.
Messages int
// The best-effort frequency of flushes. Equivalent to
// `queue.buffering.max.ms` setting of JVM producer.
Frequency time.Duration
// The maximum number of messages the producer will send in a single
// broker request. Defaults to 0 for unlimited. Similar to
// `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
Consumer struct {
// Group is the namespace for configuring consumer group.
Group struct {
Session struct {
// The timeout used to detect consumer failures when using Kafka's group management facility.
// The consumer sends periodic heartbeats to indicate its liveness to the broker.
// If no heartbeats are received by the broker before the expiration of this session timeout,
// then the broker will remove this consumer from the group and initiate a rebalance.
// Note that the value must be in the allowable range as configured in the broker configuration
// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
Timeout time.Duration
}
Heartbeat struct {
// The expected time between heartbeats to the consumer coordinator when using Kafka's group
// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
// to facilitate rebalancing when new consumers join or leave the group.
// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
// higher than 1/3 of that value.
// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
Interval time.Duration
}
Rebalance struct {
// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
Strategy BalanceStrategy
// The maximum allowed time for each worker to join the group once a rebalance has begun.
// This is basically a limit on the amount of time needed for all tasks to flush any pending
// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
// the group, which will cause offset commit failures (default 60s).
Timeout time.Duration
Retry struct {
// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
// the load to assign partitions to each consumer. If the set of consumers changes while
// this assignment is taking place the rebalance will fail and retry. This setting controls
// the maximum number of attempts before giving up (default 4).
Max int
// Backoff time between retries during rebalance (default 2s)
Backoff time.Duration
}
}
Member struct {
// Custom metadata to include when joining the group. The user data for all joined members
// can be retrieved by sending a DescribeGroupRequest to the broker that is the
// coordinator for the group.
UserData []byte
}
}
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
// given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker
// will wait until at least this many are available. The default is 1,
// as 0 causes the consumer to spin when no messages are available.
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 1MB). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a
// single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure
// this is at least as large as your largest message. Defaults to 0
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
// global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
// bytes to become available before it returns fewer than that anyways. The
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to
// process for the user. If writing to the Messages channel takes longer
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
// you must read from them to prevent deadlock.
Return struct {
// If enabled, any errors that occurred while consuming are returned on
// the Errors channel (default disabled).
Errors bool
}
// Offsets specifies configuration for how and when to commit consumed
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// How frequently to commit updated offsets. Defaults to 1s.
CommitInterval time.Duration
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
// The retention duration for committed offsets. If zero, disabled
// (in which case the `offsets.retention.minutes` option on the
// broker will be used). Kafka only supports precision up to
// milliseconds; nanoseconds will be truncated. Requires Kafka
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
Retry struct {
// The total number of times to retry failing commit
// requests during OffsetManager shutdown (default 3).
Max int
}
}
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
// The version of Kafka that Sarama will assume it is running against.
// Defaults to the oldest supported stable version. Since Kafka provides
// backwards-compatibility, setting it to a version older than you have
// will not break anything, although it may prevent you from using the
// latest features. Setting it to a version greater than you are actually
// running may lead to random breakage.
Version KafkaVersion
// The registry to define metrics into.
// Defaults to a local registry.
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
// prior to starting Sarama.
// See Examples on how to use the metrics registry
MetricRegistry metrics.Registry
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Admin.Timeout = 3 * time.Second
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Producer.CompressionLevel = CompressionLevelDefault
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 1024 * 1024
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.CommitInterval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.Consumer.Offsets.Retry.Max = 3
c.Consumer.Group.Session.Timeout = 10 * time.Second
c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
c.Consumer.Group.Rebalance.Retry.Max = 4
c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = MinVersion
c.MetricRegistry = metrics.NewRegistry()
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Net.SASL.Enable == false {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
if c.Net.SASL.Password != "" {
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
}
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
// validate the Admin values
switch {
case c.Admin.Timeout <= 0:
return ConfigurationError("Admin.Timeout must be > 0")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
if c.Producer.Compression == CompressionGZIP {
if c.Producer.CompressionLevel != CompressionLevelDefault {
if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
}
}
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.CommitInterval <= 0:
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
case c.Consumer.Offsets.Retry.Max < 0:
return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
}
// validate the Consumer Group values
switch {
case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
case c.Consumer.Group.Rebalance.Strategy == nil:
return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
case c.Consumer.Group.Rebalance.Retry.Max < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
case !validID.MatchString(c.ClientID):
return ConfigurationError("ClientID is invalid")
}
return nil
}

View File

@ -0,0 +1,15 @@
package sarama
type ConfigResourceType int8
// Taken from :
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
const (
UnknownResource ConfigResourceType = 0
AnyResource ConfigResourceType = 1
TopicResource ConfigResourceType = 2
GroupResource ConfigResourceType = 3
ClusterResource ConfigResourceType = 4
BrokerResource ConfigResourceType = 5
)

807
vendor/github.com/Shopify/sarama/consumer.go generated vendored Normal file
View File

@ -0,0 +1,807 @@
package sarama
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Headers []*RecordHeader // only set if kafka is version 0.11+
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
responseResult error
closeOnce sync.Once
fetchSize int32
offset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
for i, msg := range msgs {
messageSelect:
select {
case child.messages <- msg:
firstAttempt = true
case <-expiryTicker.C:
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
firstAttempt = false
goto messageSelect
}
}
}
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
child.offset += 1
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
messages := []*ConsumerMessage{}
for _, records := range block.RecordsSet {
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
if control, err := records.isControl(); err != nil || control {
continue
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
break
}
}
}
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = ReadUncommitted // We don't support yet transactions.
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

774
vendor/github.com/Shopify/sarama/consumer_group.go generated vendored Normal file
View File

@ -0,0 +1,774 @@
package sarama
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"time"
)
// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
// ConsumerGroup is responsible for dividing up processing of topics and partitions
// over a collection of processes (the members of the consumer group).
type ConsumerGroup interface {
// Consume joins a cluster of consumers for a given list of topics and
// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
//
// The life-cycle of a session is represented by the following steps:
//
// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
// and is assigned their "fair share" of partitions, aka 'claims'.
// 2. Before processing starts, the handler's Setup() hook is called to notify the user
// of the claims and allow any necessary preparation or alteration of state.
// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
// in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
// from concurrent reads/writes.
// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
// parent context is cancelled or when a server-side rebalance cycle is initiated.
// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
// to allow the user to perform any final tasks before a rebalance.
// 6. Finally, marked offsets are committed one last time before claims are released.
//
// Please note, that once a relance is triggered, sessions must be completed within
// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
// commit failures.
Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
// Errors returns a read channel of errors that occurred during the consumer life-cycle.
// By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan error
// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
// this function before the object passes out of scope, as it will otherwise leak memory.
Close() error
}
type consumerGroup struct {
client Client
ownClient bool
config *Config
consumer Consumer
groupID string
memberID string
errors chan error
lock sync.Mutex
closed chan none
closeOnce sync.Once
}
// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerGroupFromClient(groupID, client)
if err != nil {
_ = client.Close()
return nil, err
}
c.(*consumerGroup).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer group using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
// PLEASE NOTE: consumer groups can only re-use but not share clients.
func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
config := client.Config()
if !config.Version.IsAtLeast(V0_10_2_0) {
return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
}
consumer, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
return &consumerGroup{
client: client,
consumer: consumer,
config: config,
groupID: groupID,
errors: make(chan error, config.ChannelBufferSize),
closed: make(chan none),
}, nil
}
// Errors implements ConsumerGroup.
func (c *consumerGroup) Errors() <-chan error { return c.errors }
// Close implements ConsumerGroup.
func (c *consumerGroup) Close() (err error) {
c.closeOnce.Do(func() {
close(c.closed)
c.lock.Lock()
defer c.lock.Unlock()
// leave group
if e := c.leave(); e != nil {
err = e
}
// drain errors
go func() {
close(c.errors)
}()
for e := range c.errors {
err = e
}
if c.ownClient {
if e := c.client.Close(); e != nil {
err = e
}
}
})
return
}
// Consume implements ConsumerGroup.
func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
// Ensure group is not closed
select {
case <-c.closed:
return ErrClosedConsumerGroup
default:
}
c.lock.Lock()
defer c.lock.Unlock()
// Quick exit when no topics are provided
if len(topics) == 0 {
return fmt.Errorf("no topics provided")
}
// Refresh metadata for requested topics
if err := c.client.RefreshMetadata(topics...); err != nil {
return err
}
// Get coordinator
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
return err
}
// Init session
sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
if err == ErrClosedClient {
return ErrClosedConsumerGroup
} else if err != nil {
return err
}
// Wait for session exit signal
<-sess.ctx.Done()
// Gracefully release session claims
return sess.release(true)
}
func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
// Join consumer group
join, err := c.joinGroupRequest(coordinator, topics)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch join.Err {
case ErrNoError:
c.memberID = join.MemberId
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, coordinator, topics, handler, retries)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, join.Err
}
select {
case <-c.closed:
return nil, ErrClosedConsumerGroup
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
}
return c.newSession(ctx, coordinator, topics, handler, retries-1)
default:
return nil, join.Err
}
// Prepare distribution plan if we joined as the leader
var plan BalanceStrategyPlan
if join.LeaderId == join.MemberId {
members, err := join.GetMembers()
if err != nil {
return nil, err
}
plan, err = c.balance(members)
if err != nil {
return nil, err
}
}
// Sync consumer group
sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
if err != nil {
_ = coordinator.Close()
return nil, err
}
switch sync.Err {
case ErrNoError:
case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
c.memberID = ""
return c.newSession(ctx, coordinator, topics, handler, retries)
case ErrRebalanceInProgress: // retry after backoff
if retries <= 0 {
return nil, sync.Err
}
select {
case <-c.closed:
return nil, ErrClosedConsumerGroup
case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
}
return c.newSession(ctx, coordinator, topics, handler, retries-1)
default:
return nil, sync.Err
}
// Retrieve and sort claims
var claims map[string][]int32
if len(sync.MemberAssignment) > 0 {
members, err := sync.GetMemberAssignment()
if err != nil {
return nil, err
}
claims = members.Topics
for _, partitions := range claims {
sort.Sort(int32Slice(partitions))
}
}
return newConsumerGroupSession(c, ctx, claims, join.MemberId, join.GenerationId, handler)
}
func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
req := &JoinGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
ProtocolType: "consumer",
}
if c.config.Version.IsAtLeast(V0_10_1_0) {
req.Version = 1
req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
}
meta := &ConsumerGroupMemberMetadata{
Topics: topics,
UserData: c.config.Consumer.Group.Member.UserData,
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
return nil, err
}
return coordinator.JoinGroup(req)
}
func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
req := &SyncGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
GenerationId: generationID,
}
for memberID, topics := range plan {
err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
Topics: topics,
})
if err != nil {
return nil, err
}
}
return coordinator.SyncGroup(req)
}
func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
req := &HeartbeatRequest{
GroupId: c.groupID,
MemberId: memberID,
GenerationId: generationID,
}
return coordinator.Heartbeat(req)
}
func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
topics := make(map[string][]int32)
for _, meta := range members {
for _, topic := range meta.Topics {
topics[topic] = nil
}
}
for topic := range topics {
partitions, err := c.client.Partitions(topic)
if err != nil {
return nil, err
}
topics[topic] = partitions
}
strategy := c.config.Consumer.Group.Rebalance.Strategy
return strategy.Plan(members, topics)
}
// Leaves the cluster, called by Close, protected by lock.
func (c *consumerGroup) leave() error {
if c.memberID == "" {
return nil
}
coordinator, err := c.client.Coordinator(c.groupID)
if err != nil {
return err
}
resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
GroupId: c.groupID,
MemberId: c.memberID,
})
if err != nil {
_ = coordinator.Close()
return err
}
// Unset memberID
c.memberID = ""
// Check response
switch resp.Err {
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
return nil
default:
return resp.Err
}
}
func (c *consumerGroup) handleError(err error, topic string, partition int32) {
select {
case <-c.closed:
return
default:
}
if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
err = &ConsumerError{
Topic: topic,
Partition: partition,
Err: err,
}
}
if c.config.Consumer.Return.Errors {
select {
case c.errors <- err:
default:
}
} else {
Logger.Println(err)
}
}
// --------------------------------------------------------------------
// ConsumerGroupSession represents a consumer group member session.
type ConsumerGroupSession interface {
// Claims returns information about the claimed partitions by topic.
Claims() map[string][]int32
// MemberID returns the cluster member ID.
MemberID() string
// GenerationID returns the current generation ID.
GenerationID() int32
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(topic string, partition int32, offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(topic string, partition int32, offset int64, metadata string)
// MarkMessage marks a message as consumed.
MarkMessage(msg *ConsumerMessage, metadata string)
// Context returns the session context.
Context() context.Context
}
type consumerGroupSession struct {
parent *consumerGroup
memberID string
generationID int32
handler ConsumerGroupHandler
claims map[string][]int32
offsets *offsetManager
ctx context.Context
cancel func()
waitGroup sync.WaitGroup
releaseOnce sync.Once
hbDying, hbDead chan none
}
func newConsumerGroupSession(parent *consumerGroup, ctx context.Context, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
// init offset manager
offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
if err != nil {
return nil, err
}
// init context
ctx, cancel := context.WithCancel(ctx)
// init session
sess := &consumerGroupSession{
parent: parent,
memberID: memberID,
generationID: generationID,
handler: handler,
offsets: offsets,
claims: claims,
ctx: ctx,
cancel: cancel,
hbDying: make(chan none),
hbDead: make(chan none),
}
// start heartbeat loop
go sess.heartbeatLoop()
// create a POM for each claim
for topic, partitions := range claims {
for _, partition := range partitions {
pom, err := offsets.ManagePartition(topic, partition)
if err != nil {
_ = sess.release(false)
return nil, err
}
// handle POM errors
go func(topic string, partition int32) {
for err := range pom.Errors() {
sess.parent.handleError(err, topic, partition)
}
}(topic, partition)
}
}
// perform setup
if err := handler.Setup(sess); err != nil {
_ = sess.release(true)
return nil, err
}
// start consuming
for topic, partitions := range claims {
for _, partition := range partitions {
sess.waitGroup.Add(1)
go func(topic string, partition int32) {
defer sess.waitGroup.Done()
// cancel the as session as soon as the first
// goroutine exits
defer sess.cancel()
// consume a single topic/partition, blocking
sess.consume(topic, partition)
}(topic, partition)
}
}
return sess, nil
}
func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
func (s *consumerGroupSession) MemberID() string { return s.memberID }
func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.MarkOffset(offset, metadata)
}
}
func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
if pom := s.offsets.findPOM(topic, partition); pom != nil {
pom.ResetOffset(offset, metadata)
}
}
func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
}
func (s *consumerGroupSession) Context() context.Context {
return s.ctx
}
func (s *consumerGroupSession) consume(topic string, partition int32) {
// quick exit if rebalance is due
select {
case <-s.ctx.Done():
return
case <-s.parent.closed:
return
default:
}
// get next offset
offset := s.parent.config.Consumer.Offsets.Initial
if pom := s.offsets.findPOM(topic, partition); pom != nil {
offset, _ = pom.NextOffset()
}
// create new claim
claim, err := newConsumerGroupClaim(s, topic, partition, offset)
if err != nil {
s.parent.handleError(err, topic, partition)
return
}
// handle errors
go func() {
for err := range claim.Errors() {
s.parent.handleError(err, topic, partition)
}
}()
// trigger close when session is done
go func() {
select {
case <-s.ctx.Done():
case <-s.parent.closed:
}
claim.AsyncClose()
}()
// start processing
if err := s.handler.ConsumeClaim(s, claim); err != nil {
s.parent.handleError(err, topic, partition)
}
// ensure consumer is clased & drained
claim.AsyncClose()
for _, err := range claim.waitClosed() {
s.parent.handleError(err, topic, partition)
}
}
func (s *consumerGroupSession) release(withCleanup bool) (err error) {
// signal release, stop heartbeat
s.cancel()
// wait for consumers to exit
s.waitGroup.Wait()
// perform release
s.releaseOnce.Do(func() {
if withCleanup {
if e := s.handler.Cleanup(s); e != nil {
s.parent.handleError(err, "", -1)
err = e
}
}
if e := s.offsets.Close(); e != nil {
err = e
}
close(s.hbDying)
<-s.hbDead
})
return
}
func (s *consumerGroupSession) heartbeatLoop() {
defer close(s.hbDead)
defer s.cancel() // trigger the end of the session on exit
pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
defer pause.Stop()
retries := s.parent.config.Metadata.Retry.Max
for {
coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
if err != nil {
if retries <= 0 {
s.parent.handleError(err, "", -1)
return
}
select {
case <-s.hbDying:
return
case <-time.After(s.parent.config.Metadata.Retry.Backoff):
retries--
}
continue
}
resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
if err != nil {
_ = coordinator.Close()
retries--
continue
}
switch resp.Err {
case ErrNoError:
retries = s.parent.config.Metadata.Retry.Max
case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
return
default:
s.parent.handleError(err, "", -1)
return
}
select {
case <-pause.C:
case <-s.hbDying:
return
}
}
}
// --------------------------------------------------------------------
// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
// It also provides hooks for your consumer group session life-cycle and allow you to
// trigger logic before or after the consume loop(s).
//
// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
// ensure that all state is safely protected against race conditions.
type ConsumerGroupHandler interface {
// Setup is run at the beginning of a new session, before ConsumeClaim.
Setup(ConsumerGroupSession) error
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exites
// but before the offsets are committed for the very last time.
Cleanup(ConsumerGroupSession) error
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
// Once the Messages() channel is closed, the Handler must finish its processing
// loop and exit.
ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
}
// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
type ConsumerGroupClaim interface {
// Topic returns the consumed topic name.
Topic() string
// Partition returns the consumed partition.
Partition() int32
// InitialOffset returns the initial offset that was used as a starting point for this claim.
InitialOffset() int64
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
// Messages returns the read channel for the messages that are returned by
// the broker. The messages channel will be closed when a new rebalance cycle
// is due. You must finish processing and mark offsets within
// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
// re-assigned to another group member.
Messages() <-chan *ConsumerMessage
}
type consumerGroupClaim struct {
topic string
partition int32
offset int64
PartitionConsumer
}
func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
if err == ErrOffsetOutOfRange {
offset = sess.parent.config.Consumer.Offsets.Initial
pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
}
if err != nil {
return nil, err
}
go func() {
for err := range pcm.Errors() {
sess.parent.handleError(err, topic, partition)
}
}()
return &consumerGroupClaim{
topic: topic,
partition: partition,
offset: offset,
PartitionConsumer: pcm,
}, nil
}
func (c *consumerGroupClaim) Topic() string { return c.topic }
func (c *consumerGroupClaim) Partition() int32 { return c.partition }
func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
// Drains messages and errors, ensures the claim is fully closed.
func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
go func() {
for range c.Messages() {
}
}()
for err := range c.Errors() {
errs = append(errs, err)
}
return
}

View File

@ -0,0 +1,94 @@
package sarama
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
UserData []byte
}
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putStringArray(m.Topics); err != nil {
return err
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
if m.Topics, err = pd.getStringArray(); err != nil {
return
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32
UserData []byte
}
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}

View File

@ -0,0 +1,33 @@
package sarama
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
tmp := new(FindCoordinatorRequest)
tmp.CoordinatorKey = r.ConsumerGroup
tmp.CoordinatorType = CoordinatorGroup
return tmp.encode(pe)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorRequest)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.ConsumerGroup = tmp.CoordinatorKey
return nil
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View File

@ -0,0 +1,77 @@
package sarama
import (
"net"
"strconv"
)
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorResponse)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.Err = tmp.Err
r.Coordinator = tmp.Coordinator
if tmp.Coordinator == nil {
return nil
}
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
if r.Coordinator == nil {
r.Coordinator = new(Broker)
r.Coordinator.id = r.CoordinatorID
r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
}
tmp := &FindCoordinatorResponse{
Version: 0,
Err: r.Err,
Coordinator: r.Coordinator,
}
if err := tmp.encode(pe); err != nil {
return err
}
return nil
}
func (r *ConsumerMetadataResponse) key() int16 {
return 10
}
func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

69
vendor/github.com/Shopify/sarama/crc32_field.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package sarama
import (
"encoding/binary"
"fmt"
"hash/crc32"
)
type crcPolynomial int8
const (
crcIEEE crcPolynomial = iota
crcCastagnoli
)
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
polynomial crcPolynomial
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
return &crc32Field{polynomial: polynomial}
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil
}
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
var tab *crc32.Table
switch c.polynomial {
case crcIEEE:
tab = crc32.IEEETable
case crcCastagnoli:
tab = castagnoliTable
default:
return 0, PacketDecodingError{"invalid CRC type"}
}
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
}

View File

@ -0,0 +1,121 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,94 @@
package sarama
import "time"
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,174 @@
package sarama
import (
"time"
)
type CreateTopicsRequest struct {
Version int16
TopicDetails map[string]*TopicDetail
Timeout time.Duration
ValidateOnly bool
}
func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
return err
}
for topic, detail := range c.TopicDetails {
if err := pe.putString(topic); err != nil {
return err
}
if err := detail.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
if c.Version >= 1 {
pe.putBool(c.ValidateOnly)
}
return nil
}
func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicDetails = make(map[string]*TopicDetail, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicDetails[topic] = new(TopicDetail)
if err = c.TopicDetails[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if version >= 1 {
c.ValidateOnly, err = pd.getBool()
if err != nil {
return err
}
c.Version = version
}
return nil
}
func (c *CreateTopicsRequest) key() int16 {
return 19
}
func (c *CreateTopicsRequest) version() int16 {
return c.Version
}
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicDetail struct {
NumPartitions int32
ReplicationFactor int16
ReplicaAssignment map[int32][]int32
ConfigEntries map[string]*string
}
func (t *TopicDetail) encode(pe packetEncoder) error {
pe.putInt32(t.NumPartitions)
pe.putInt16(t.ReplicationFactor)
if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
return err
}
for partition, assignment := range t.ReplicaAssignment {
pe.putInt32(partition)
if err := pe.putInt32Array(assignment); err != nil {
return err
}
}
if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range t.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
if t.NumPartitions, err = pd.getInt32(); err != nil {
return err
}
if t.ReplicationFactor, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ReplicaAssignment = make(map[int32][]int32, n)
for i := 0; i < n; i++ {
replica, err := pd.getInt32()
if err != nil {
return err
}
if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
return err
}
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,112 @@
package sarama
import "time"
type CreateTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrors map[string]*TopicError
}
func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
if c.Version >= 2 {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
return err
}
for topic, topicError := range c.TopicErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := topicError.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
if version >= 2 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicErrors = make(map[string]*TopicError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicErrors[topic] = new(TopicError)
if err := c.TopicErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) key() int16 {
return 19
}
func (c *CreateTopicsResponse) version() int16 {
return c.Version
}
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicError struct {
Err KError
ErrMsg *string
}
func (t *TopicError) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(t.Err))
if version >= 1 {
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
}
return nil
}
func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
if version >= 1 {
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,30 @@
package sarama
type DeleteGroupsRequest struct {
Groups []string
}
func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DeleteGroupsRequest) key() int16 {
return 42
}
func (r *DeleteGroupsRequest) version() int16 {
return 0
}
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
return V1_1_0_0
}
func (r *DeleteGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,70 @@
package sarama
import (
"time"
)
type DeleteGroupsResponse struct {
ThrottleTime time.Duration
GroupErrorCodes map[string]KError
}
func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
return err
}
for groupID, errorCode := range r.GroupErrorCodes {
if err := pe.putString(groupID); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
groupID, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
r.GroupErrorCodes[groupID] = KError(errorCode)
}
return nil
}
func (r *DeleteGroupsResponse) key() int16 {
return 42
}
func (r *DeleteGroupsResponse) version() int16 {
return 0
}
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
return V1_1_0_0
}

View File

@ -0,0 +1,126 @@
package sarama
import (
"sort"
"time"
)
// request message format is:
// [topic] timeout(int32)
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) offset(int64)
type DeleteRecordsRequest struct {
Topics map[string]*DeleteRecordsRequestTopic
Timeout time.Duration
}
func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsRequestTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (d *DeleteRecordsRequest) key() int16 {
return 21
}
func (d *DeleteRecordsRequest) version() int16 {
return 0
}
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsRequestTopic struct {
PartitionOffsets map[int32]int64 // partition => offset
}
func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
return err
}
keys := make([]int32, 0, len(t.PartitionOffsets))
for partition := range t.PartitionOffsets {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
pe.putInt64(t.PartitionOffsets[partition])
}
return nil
}
func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.PartitionOffsets = make(map[int32]int64, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
offset, err := pd.getInt64()
if err != nil {
return err
}
t.PartitionOffsets[partition] = offset
}
}
return nil
}

View File

@ -0,0 +1,158 @@
package sarama
import (
"sort"
"time"
)
// response message format is:
// throttleMs(int32) [topic]
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) low_watermark(int64) error_code(int16)
type DeleteRecordsResponse struct {
Version int16
ThrottleTime time.Duration
Topics map[string]*DeleteRecordsResponseTopic
}
func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
d.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsResponseTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
return nil
}
func (d *DeleteRecordsResponse) key() int16 {
return 21
}
func (d *DeleteRecordsResponse) version() int16 {
return 0
}
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsResponseTopic struct {
Partitions map[int32]*DeleteRecordsResponsePartition
}
func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.Partitions)); err != nil {
return err
}
keys := make([]int32, 0, len(t.Partitions))
for partition := range t.Partitions {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
if err := t.Partitions[partition].encode(pe); err != nil {
return err
}
}
return nil
}
func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
details := new(DeleteRecordsResponsePartition)
if err = details.decode(pd, version); err != nil {
return err
}
t.Partitions[partition] = details
}
}
return nil
}
type DeleteRecordsResponsePartition struct {
LowWatermark int64
Err KError
}
func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
pe.putInt64(t.LowWatermark)
pe.putInt16(int16(t.Err))
return nil
}
func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
lowWatermark, err := pd.getInt64()
if err != nil {
return err
}
t.LowWatermark = lowWatermark
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
return nil
}

View File

@ -0,0 +1,48 @@
package sarama
import "time"
type DeleteTopicsRequest struct {
Version int16
Topics []string
Timeout time.Duration
}
func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putStringArray(d.Topics); err != nil {
return err
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
if d.Topics, err = pd.getStringArray(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
d.Version = version
return nil
}
func (d *DeleteTopicsRequest) key() int16 {
return 20
}
func (d *DeleteTopicsRequest) version() int16 {
return d.Version
}
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,78 @@
package sarama
import "time"
type DeleteTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrorCodes map[string]KError
}
func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
if d.Version >= 1 {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
return err
}
for topic, errorCode := range d.TopicErrorCodes {
if err := pe.putString(topic); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
d.Version = version
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.TopicErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
d.TopicErrorCodes[topic] = KError(errorCode)
}
return nil
}
func (d *DeleteTopicsResponse) key() int16 {
return 20
}
func (d *DeleteTopicsResponse) version() int16 {
return d.Version
}
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View File

@ -0,0 +1,91 @@
package sarama
type ConfigResource struct {
Type ConfigResourceType
Name string
ConfigNames []string
}
type DescribeConfigsRequest struct {
Resources []*ConfigResource
}
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
pe.putInt8(int8(c.Type))
if err := pe.putString(c.Name); err != nil {
return err
}
if len(c.ConfigNames) == 0 {
pe.putInt32(-1)
continue
}
if err := pe.putStringArray(c.ConfigNames); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ConfigResource, n)
for i := 0; i < n; i++ {
r.Resources[i] = &ConfigResource{}
t, err := pd.getInt8()
if err != nil {
return err
}
r.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Resources[i].Name = name
confLength, err := pd.getArrayLength()
if err != nil {
return err
}
if confLength == -1 {
continue
}
cfnames := make([]string, confLength)
for i := 0; i < confLength; i++ {
s, err := pd.getString()
if err != nil {
return err
}
cfnames[i] = s
}
r.Resources[i].ConfigNames = cfnames
}
return nil
}
func (r *DescribeConfigsRequest) key() int16 {
return 32
}
func (r *DescribeConfigsRequest) version() int16 {
return 0
}
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,188 @@
package sarama
import "time"
type DescribeConfigsResponse struct {
ThrottleTime time.Duration
Resources []*ResourceResponse
}
type ResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
Configs []*ConfigEntry
}
type ConfigEntry struct {
Name string
Value string
ReadOnly bool
Default bool
Sensitive bool
}
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err = pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ResourceResponse, n)
for i := 0; i < n; i++ {
rr := &ResourceResponse{}
if err := rr.decode(pd, version); err != nil {
return err
}
r.Resources[i] = rr
}
return nil
}
func (r *DescribeConfigsResponse) key() int16 {
return 32
}
func (r *DescribeConfigsResponse) version() int16 {
return 0
}
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
pe.putInt16(r.ErrorCode)
if err = pe.putString(r.ErrorMsg); err != nil {
return err
}
pe.putInt8(int8(r.Type))
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putArrayLength(len(r.Configs)); err != nil {
return err
}
for _, c := range r.Configs {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
ec, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = ec
em, err := pd.getString()
if err != nil {
return err
}
r.ErrorMsg = em
t, err := pd.getInt8()
if err != nil {
return err
}
r.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Configs = make([]*ConfigEntry, n)
for i := 0; i < n; i++ {
c := &ConfigEntry{}
if err := c.decode(pd, version); err != nil {
return err
}
r.Configs[i] = c
}
return nil
}
func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putString(r.Value); err != nil {
return err
}
pe.putBool(r.ReadOnly)
pe.putBool(r.Default)
pe.putBool(r.Sensitive)
return nil
}
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
value, err := pd.getString()
if err != nil {
return err
}
r.Value = value
read, err := pd.getBool()
if err != nil {
return err
}
r.ReadOnly = read
de, err := pd.getBool()
if err != nil {
return err
}
r.Default = de
sensitive, err := pd.getBool()
if err != nil {
return err
}
r.Sensitive = sensitive
return nil
}

View File

@ -0,0 +1,30 @@
package sarama
type DescribeGroupsRequest struct {
Groups []string
}
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DescribeGroupsRequest) key() int16 {
return 15
}
func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *DescribeGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,187 @@
package sarama
type DescribeGroupsResponse struct {
Groups []*GroupDescription
}
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for _, groupDescription := range r.Groups {
if err := groupDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Groups = make([]*GroupDescription, n)
for i := 0; i < n; i++ {
r.Groups[i] = new(GroupDescription)
if err := r.Groups[i].decode(pd); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) key() int16 {
return 15
}
func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}
type GroupDescription struct {
Err KError
GroupId string
State string
ProtocolType string
Protocol string
Members map[string]*GroupMemberDescription
}
func (gd *GroupDescription) encode(pe packetEncoder) error {
pe.putInt16(int16(gd.Err))
if err := pe.putString(gd.GroupId); err != nil {
return err
}
if err := pe.putString(gd.State); err != nil {
return err
}
if err := pe.putString(gd.ProtocolType); err != nil {
return err
}
if err := pe.putString(gd.Protocol); err != nil {
return err
}
if err := pe.putArrayLength(len(gd.Members)); err != nil {
return err
}
for memberId, groupMemberDescription := range gd.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := groupMemberDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil {
return
}
if gd.State, err = pd.getString(); err != nil {
return
}
if gd.ProtocolType, err = pd.getString(); err != nil {
return
}
if gd.Protocol, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
gd.Members = make(map[string]*GroupMemberDescription)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
gd.Members[memberId] = new(GroupMemberDescription)
if err := gd.Members[memberId].decode(pd); err != nil {
return err
}
}
return nil
}
type GroupMemberDescription struct {
ClientId string
ClientHost string
MemberMetadata []byte
MemberAssignment []byte
}
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
if err := pe.putString(gmd.ClientId); err != nil {
return err
}
if err := pe.putString(gmd.ClientHost); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
return err
}
return nil
}
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
if gmd.ClientId, err = pd.getString(); err != nil {
return
}
if gmd.ClientHost, err = pd.getString(); err != nil {
return
}
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
return
}
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
return
}
return nil
}
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(gmd.MemberAssignment, assignment)
return assignment, err
}
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
metadata := new(ConsumerGroupMemberMetadata)
err := decode(gmd.MemberMetadata, metadata)
return metadata, err
}

10
vendor/github.com/Shopify/sarama/dev.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
name: sarama
up:
- go:
version: '1.11'
commands:
test:
run: make test
desc: 'run unit tests'

89
vendor/github.com/Shopify/sarama/encoder_decoder.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
import (
"fmt"
"github.com/rcrowley/go-metrics"
)
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
realEnc.registry = metricRegistry
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
type versionedDecoder interface {
decode(pd packetDecoder, version int16) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper, version)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

50
vendor/github.com/Shopify/sarama/end_txn_request.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package sarama
type EndTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TransactionResult bool
}
func (a *EndTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
pe.putBool(a.TransactionResult)
return nil
}
func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.TransactionResult, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (a *EndTxnRequest) key() int16 {
return 26
}
func (a *EndTxnRequest) version() int16 {
return 0
}
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

44
vendor/github.com/Shopify/sarama/end_txn_response.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package sarama
import (
"time"
)
type EndTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (e *EndTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
pe.putInt16(int16(e.Err))
return nil
}
func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
e.Err = KError(kerr)
return nil
}
func (e *EndTxnResponse) key() int16 {
return 25
}
func (e *EndTxnResponse) version() int16 {
return 0
}
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

281
vendor/github.com/Shopify/sarama/errors.go generated vendored Normal file
View File

@ -0,0 +1,281 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
// a RecordBatch.
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
// is lower than 0.10.0.0.
var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
// the metadata.
var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica information not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrNetworkException:
return "kafka server: The server disconnected before a response was received."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
case ErrInvalidRequiredAcks:
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
case ErrIllegalGeneration:
return "kafka server: The provided generation id is not the current generation."
case ErrInconsistentGroupProtocol:
return "kafka server: The provider group protocol type is incompatible with the other members."
case ErrInvalidGroupId:
return "kafka server: The provided group id was empty."
case ErrUnknownMemberId:
return "kafka server: The provided member is not known in the current generation."
case ErrInvalidSessionTimeout:
return "kafka server: The provided session timeout is outside the allowed range."
case ErrRebalanceInProgress:
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
case ErrInvalidCommitOffsetSize:
return "kafka server: The provided commit metadata was too large."
case ErrTopicAuthorizationFailed:
return "kafka server: The client is not authorized to access this topic."
case ErrGroupAuthorizationFailed:
return "kafka server: The client is not authorized to access this group."
case ErrClusterAuthorizationFailed:
return "kafka server: The client is not authorized to send this request type."
case ErrInvalidTimestamp:
return "kafka server: The timestamp of the message is out of acceptable range."
case ErrUnsupportedSASLMechanism:
return "kafka server: The broker does not support the requested SASL mechanism."
case ErrIllegalSASLState:
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

170
vendor/github.com/Shopify/sarama/fetch_request.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(b.fetchOffset)
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
Isolation IsolationLevel
blocks map[string]map[int32]*fetchRequestBlock
}
type IsolationLevel int8
const (
ReadUncommitted IsolationLevel = 0
ReadCommitted IsolationLevel = 1
)
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version >= 3 {
pe.putInt32(r.MaxBytes)
}
if r.Version >= 4 {
pe.putInt8(int8(r.Isolation))
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, blocks := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version >= 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
if r.Version >= 4 {
isolation, err := pd.getInt8()
if err != nil {
return err
}
r.Isolation = IsolationLevel(isolation)
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
return nil
}
func (r *FetchRequest) key() int16 {
return 1
}
func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
r.blocks[topic][partitionID] = tmp
}

396
vendor/github.com/Shopify/sarama/fetch_response.go generated vendored Normal file
View File

@ -0,0 +1,396 @@
package sarama
import (
"time"
)
type AbortedTransaction struct {
ProducerID int64
FirstOffset int64
}
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
return nil
}
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
pe.putInt64(t.ProducerID)
pe.putInt64(t.FirstOffset)
return nil
}
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
LastStableOffset int64
AbortedTransactions []*AbortedTransaction
Records *Records // deprecated: use FetchResponseBlock.Records
RecordsSet []*Records
Partial bool
}
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 4 {
b.LastStableOffset, err = pd.getInt64()
if err != nil {
return err
}
numTransact, err := pd.getArrayLength()
if err != nil {
return err
}
if numTransact >= 0 {
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
}
for i := 0; i < numTransact; i++ {
transact := new(AbortedTransaction)
if err = transact.decode(pd); err != nil {
return err
}
b.AbortedTransactions[i] = transact
}
}
recordsSize, err := pd.getInt32()
if err != nil {
return err
}
recordsDecoder, err := pd.getSubset(int(recordsSize))
if err != nil {
return err
}
b.RecordsSet = []*Records{}
for recordsDecoder.remaining() > 0 {
records := &Records{}
if err := records.decode(recordsDecoder); err != nil {
// If we have at least one decoded records, this is not an error
if err == ErrInsufficientData {
if len(b.RecordsSet) == 0 {
b.Partial = true
}
break
}
return err
}
partial, err := records.isPartial()
if err != nil {
return err
}
n, err := records.numRecords()
if err != nil {
return err
}
if n > 0 || (partial && len(b.RecordsSet) == 0) {
b.RecordsSet = append(b.RecordsSet, records)
if b.Records == nil {
b.Records = records
}
}
overflow, err := records.isOverflow()
if err != nil {
return err
}
if partial || overflow {
break
}
}
return nil
}
func (b *FetchResponseBlock) numRecords() (int, error) {
sum := 0
for _, records := range b.RecordsSet {
count, err := records.numRecords()
if err != nil {
return 0, err
}
sum += count
}
return sum, nil
}
func (b *FetchResponseBlock) isPartial() (bool, error) {
if b.Partial {
return true, nil
}
if len(b.RecordsSet) == 1 {
return b.RecordsSet[0].isPartial()
}
return false, nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
if version >= 4 {
pe.putInt64(b.LastStableOffset)
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
return err
}
for _, transact := range b.AbortedTransactions {
if err = transact.encode(pe); err != nil {
return err
}
}
}
pe.push(&lengthField{})
for _, records := range b.RecordsSet {
err = records.encode(pe)
if err != nil {
return err
}
}
return pe.pop()
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
Version int16 // v1 requires 0.9+, v2 requires 0.10+
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
throttle, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchResponse) key() int16 {
return 1
}
func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
return frb
}
func encodeKV(key, value Encoder) ([]byte, []byte) {
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
return kb, vb
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
if len(frb.RecordsSet) == 0 {
records := newLegacyRecords(&MessageSet{})
frb.RecordsSet = []*Records{&records}
}
set := frb.RecordsSet[0].MsgSet
set.Messages = append(set.Messages, msgBlock)
}
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.addRecord(rec)
}
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
frb := r.getOrCreateBlock(topic, partition)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.LastOffsetDelta = offset
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset
}

View File

@ -0,0 +1,61 @@
package sarama
type CoordinatorType int8
const (
CoordinatorGroup CoordinatorType = 0
CoordinatorTransaction CoordinatorType = 1
)
type FindCoordinatorRequest struct {
Version int16
CoordinatorKey string
CoordinatorType CoordinatorType
}
func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
if err := pe.putString(f.CoordinatorKey); err != nil {
return err
}
if f.Version >= 1 {
pe.putInt8(int8(f.CoordinatorType))
}
return nil
}
func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
if f.CoordinatorKey, err = pd.getString(); err != nil {
return err
}
if version >= 1 {
f.Version = version
coordinatorType, err := pd.getInt8()
if err != nil {
return err
}
f.CoordinatorType = CoordinatorType(coordinatorType)
}
return nil
}
func (f *FindCoordinatorRequest) key() int16 {
return 10
}
func (f *FindCoordinatorRequest) version() int16 {
return f.Version
}
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

View File

@ -0,0 +1,92 @@
package sarama
import (
"time"
)
var NoNode = &Broker{id: -1, addr: ":-1"}
type FindCoordinatorResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
Coordinator *Broker
}
func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
f.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(tmp)
if version >= 1 {
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
coordinator := new(Broker)
// The version is hardcoded to 0, as version 1 of the Broker-decode
// contains the rack-field which is not present in the FindCoordinatorResponse.
if err := coordinator.decode(pd, 0); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
f.Coordinator = coordinator
return nil
}
func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
if f.Version >= 1 {
pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
}
pe.putInt16(int16(f.Err))
if f.Version >= 1 {
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
}
coordinator := f.Coordinator
if coordinator == nil {
coordinator = NoNode
}
if err := coordinator.encode(pe, 0); err != nil {
return err
}
return nil
}
func (f *FindCoordinatorResponse) key() int16 {
return 10
}
func (f *FindCoordinatorResponse) version() int16 {
return f.Version
}
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

47
vendor/github.com/Shopify/sarama/heartbeat_request.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package sarama
type HeartbeatRequest struct {
GroupId string
GenerationId int32
MemberId string
}
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *HeartbeatRequest) key() int16 {
return 12
}
func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

32
vendor/github.com/Shopify/sarama/heartbeat_response.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package sarama
type HeartbeatResponse struct {
Err KError
}
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *HeartbeatResponse) key() int16 {
return 12
}
func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,43 @@
package sarama
import "time"
type InitProducerIDRequest struct {
TransactionalID *string
TransactionTimeout time.Duration
}
func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
if err := pe.putNullableString(i.TransactionalID); err != nil {
return err
}
pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
return nil
}
func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
if i.TransactionalID, err = pd.getNullableString(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (i *InitProducerIDRequest) key() int16 {
return 22
}
func (i *InitProducerIDRequest) version() int16 {
return 0
}
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View File

@ -0,0 +1,55 @@
package sarama
import "time"
type InitProducerIDResponse struct {
ThrottleTime time.Duration
Err KError
ProducerID int64
ProducerEpoch int16
}
func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
pe.putInt16(int16(i.Err))
pe.putInt64(i.ProducerID)
pe.putInt16(i.ProducerEpoch)
return nil
}
func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
i.Err = KError(kerr)
if i.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if i.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
return nil
}
func (i *InitProducerIDResponse) key() int16 {
return 22
}
func (i *InitProducerIDResponse) version() int16 {
return 0
}
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

163
vendor/github.com/Shopify/sarama/join_group_request.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
Version int16
GroupId string
SessionTimeout int32
RebalanceTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.SessionTimeout)
if r.Version >= 1 {
pe.putInt32(r.RebalanceTimeout)
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putString(r.ProtocolType); err != nil {
return err
}
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.SessionTimeout, err = pd.getInt32(); err != nil {
return
}
if version >= 1 {
if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
return err
}
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
if r.ProtocolType, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
}
func (r *JoinGroupRequest) key() int16 {
return 11
}
func (r *JoinGroupRequest) version() int16 {
return r.Version
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
bin, err := encode(metadata, nil)
if err != nil {
return err
}
r.AddGroupProtocol(name, bin)
return nil
}

135
vendor/github.com/Shopify/sarama/join_group_response.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
package sarama
type JoinGroupResponse struct {
Version int16
ThrottleTime int32
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
for id, bin := range r.Members {
meta := new(ConsumerGroupMemberMetadata)
if err := decode(bin, meta); err != nil {
return nil, err
}
members[id] = *meta
}
return members, nil
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
if r.Version >= 2 {
pe.putInt32(r.ThrottleTime)
}
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
if err := pe.putString(r.GroupProtocol); err != nil {
return err
}
if err := pe.putString(r.LeaderId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Members)); err != nil {
return err
}
for memberId, memberMetadata := range r.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberMetadata); err != nil {
return err
}
}
return nil
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 2 {
if r.ThrottleTime, err = pd.getInt32(); err != nil {
return
}
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.GroupProtocol, err = pd.getString(); err != nil {
return
}
if r.LeaderId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Members = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberMetadata, err := pd.getBytes()
if err != nil {
return err
}
r.Members[memberId] = memberMetadata
}
return nil
}
func (r *JoinGroupResponse) key() int16 {
return 11
}
func (r *JoinGroupResponse) version() int16 {
return r.Version
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}

View File

@ -0,0 +1,40 @@
package sarama
type LeaveGroupRequest struct {
GroupId string
MemberId string
}
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *LeaveGroupRequest) key() int16 {
return 13
}
func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,32 @@
package sarama
type LeaveGroupResponse struct {
Err KError
}
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *LeaveGroupResponse) key() int16 {
return 13
}
func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

82
vendor/github.com/Shopify/sarama/length_field.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package sarama
import "encoding/binary"
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
length int32
}
func (l *lengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getInt32()
if err != nil {
return err
}
if l.length > int32(pd.remaining()) {
return ErrInsufficientData
}
return nil
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if int32(curOffset-l.startOffset-4) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}
type varintLengthField struct {
startOffset int
length int64
}
func (l *varintLengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getVarint()
return err
}
func (l *varintLengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *varintLengthField) adjustLength(currOffset int) int {
oldFieldSize := l.reserveLength()
l.length = int64(currOffset - l.startOffset - oldFieldSize)
return l.reserveLength() - oldFieldSize
}
func (l *varintLengthField) reserveLength() int {
var tmp [binary.MaxVarintLen64]byte
return binary.PutVarint(tmp[:], l.length)
}
func (l *varintLengthField) run(curOffset int, buf []byte) error {
binary.PutVarint(buf[l.startOffset:], l.length)
return nil
}
func (l *varintLengthField) check(curOffset int, buf []byte) error {
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@ -0,0 +1,24 @@
package sarama
type ListGroupsRequest struct {
}
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ListGroupsRequest) key() int16 {
return 16
}
func (r *ListGroupsRequest) version() int16 {
return 0
}
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,69 @@
package sarama
type ListGroupsResponse struct {
Err KError
Groups map[string]string
}
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for groupId, protocolType := range r.Groups {
if err := pe.putString(groupId); err != nil {
return err
}
if err := pe.putString(protocolType); err != nil {
return err
}
}
return nil
}
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Groups = make(map[string]string)
for i := 0; i < n; i++ {
groupId, err := pd.getString()
if err != nil {
return err
}
protocolType, err := pd.getString()
if err != nil {
return err
}
r.Groups[groupId] = protocolType
}
return nil
}
func (r *ListGroupsResponse) key() int16 {
return 16
}
func (r *ListGroupsResponse) version() int16 {
return 0
}
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

223
vendor/github.com/Shopify/sarama/message.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
// only the last two bits are really used
const compressionCodecMask int8 = 0x03
const (
CompressionNone CompressionCodec = 0
CompressionGZIP CompressionCodec = 1
CompressionSnappy CompressionCodec = 2
CompressionLZ4 CompressionCodec = 3
)
func (cc CompressionCodec) String() string {
return []string{
"none",
"gzip",
"snappy",
"lz4",
}[int(cc)]
}
// CompressionLevelDefault is the constant to use in CompressionLevel
// to have the default compression level for any codec. The value is picked
// that we don't use any existing compression levels.
const CompressionLevelDefault = -1000
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
CompressionLevel int // compression level
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
compressedCache []byte
compressedSize int // used for computing the compression ratio metrics
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(newCRC32Field(crcIEEE))
pe.putInt8(m.Version)
attributes := int8(m.Codec) & compressionCodecMask
pe.putInt8(attributes)
if m.Version >= 1 {
if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
return err
}
}
err := pe.putBytes(m.Key)
if err != nil {
return err
}
var payload []byte
if m.compressedCache != nil {
payload = m.compressedCache
m.compressedCache = nil
} else if m.Value != nil {
switch m.Codec {
case CompressionNone:
payload = m.Value
case CompressionGZIP:
var buf bytes.Buffer
var writer *gzip.Writer
if m.CompressionLevel != CompressionLevelDefault {
writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel)
if err != nil {
return err
}
} else {
writer = gzip.NewWriter(&buf)
}
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
case CompressionSnappy:
tmp := snappy.Encode(m.Value)
m.compressedCache = tmp
payload = m.compressedCache
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
}
// Keep in mind the compressed payload size for metric gathering
m.compressedSize = len(payload)
}
if err = pe.putBytes(payload); err != nil {
return err
}
return pe.pop()
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(newCRC32Field(crcIEEE))
if err != nil {
return err
}
m.Version, err = pd.getInt8()
if err != nil {
return err
}
if m.Version > 1 {
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version == 1 {
if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
return err
}
}
m.Key, err = pd.getBytes()
if err != nil {
return err
}
m.Value, err = pd.getBytes()
if err != nil {
return err
}
// Required for deep equal assertion during tests but might be useful
// for future metrics about the compression ratio in fetch requests
m.compressedSize = len(m.Value)
switch m.Codec {
case CompressionNone:
// nothing to do
case CompressionGZIP:
if m.Value == nil {
break
}
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
if err != nil {
return err
}
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionSnappy:
if m.Value == nil {
break
}
if m.Value, err = snappy.Decode(m.Value); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionLZ4:
if m.Value == nil {
break
}
reader := lz4.NewReader(bytes.NewReader(m.Value))
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
}
return pd.pop()
}
// decodes a message set from a previousy encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}
return m.Set.decode(&pd)
}

108
vendor/github.com/Shopify/sarama/message_set.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
package sarama
type MessageBlock struct {
Offset int64
Msg *Message
}
// Messages convenience helper which returns either all the
// messages that are wrapped in this block
func (msb *MessageBlock) Messages() []*MessageBlock {
if msb.Msg.Set != nil {
return msb.Msg.Set.Messages
}
return []*MessageBlock{msb}
}
func (msb *MessageBlock) encode(pe packetEncoder) error {
pe.putInt64(msb.Offset)
pe.push(&lengthField{})
err := msb.Msg.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
if msb.Offset, err = pd.getInt64(); err != nil {
return err
}
if err = pd.push(&lengthField{}); err != nil {
return err
}
msb.Msg = new(Message)
if err = msb.Msg.decode(pd); err != nil {
return err
}
if err = pd.pop(); err != nil {
return err
}
return nil
}
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
OverflowMessage bool // whether the set on the wire contained an overflow message
Messages []*MessageBlock
}
func (ms *MessageSet) encode(pe packetEncoder) error {
for i := range ms.Messages {
err := ms.Messages[i].encode(pe)
if err != nil {
return err
}
}
return nil
}
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
magic, err := magicValue(pd)
if err != nil {
if err == ErrInsufficientData {
ms.PartialTrailingMessage = true
return nil
}
return err
}
if magic > 1 {
return nil
}
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
case nil:
ms.Messages = append(ms.Messages, msb)
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
if msb.Offset == -1 {
// This is an overflow message caused by chunked down conversion
ms.OverflowMessage = true
} else {
ms.PartialTrailingMessage = true
}
return nil
default:
return err
}
}
return nil
}
func (ms *MessageSet) addMessage(msg *Message) {
block := new(MessageBlock)
block.Msg = msg
ms.Messages = append(ms.Messages, block)
}

88
vendor/github.com/Shopify/sarama/metadata_request.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
package sarama
type MetadataRequest struct {
Version int16
Topics []string
AllowAutoTopicCreation bool
}
func (r *MetadataRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 5 {
return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
}
if r.Version == 0 || len(r.Topics) > 0 {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if err != nil {
return err
}
}
} else {
pe.putInt32(-1)
}
if r.Version > 3 {
pe.putBool(r.AllowAutoTopicCreation)
}
return nil
}
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
size, err := pd.getInt32()
if err != nil {
return err
}
if size < 0 {
return nil
} else {
topicCount := size
if topicCount == 0 {
return nil
}
r.Topics = make([]string, topicCount)
for i := range r.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
r.Topics[i] = topic
}
}
if r.Version > 3 {
autoCreation, err := pd.getBool()
if err != nil {
return err
}
r.AllowAutoTopicCreation = autoCreation
}
return nil
}
func (r *MetadataRequest) key() int16 {
return 3
}
func (r *MetadataRequest) version() int16 {
return r.Version
}
func (r *MetadataRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}

321
vendor/github.com/Shopify/sarama/metadata_response.go generated vendored Normal file
View File

@ -0,0 +1,321 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
OfflineReplicas []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pm.Err = KError(tmp)
pm.ID, err = pd.getInt32()
if err != nil {
return err
}
pm.Leader, err = pd.getInt32()
if err != nil {
return err
}
pm.Replicas, err = pd.getInt32Array()
if err != nil {
return err
}
pm.Isr, err = pd.getInt32Array()
if err != nil {
return err
}
if version >= 5 {
pm.OfflineReplicas, err = pd.getInt32Array()
if err != nil {
return err
}
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
err = pe.putInt32Array(pm.Replicas)
if err != nil {
return err
}
err = pe.putInt32Array(pm.Isr)
if err != nil {
return err
}
if version >= 5 {
err = pe.putInt32Array(pm.OfflineReplicas)
if err != nil {
return err
}
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
IsInternal bool // Only valid for Version >= 1
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
tm.Err = KError(tmp)
tm.Name, err = pd.getString()
if err != nil {
return err
}
if version >= 1 {
tm.IsInternal, err = pd.getBool()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd, version)
if err != nil {
return err
}
}
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
if err != nil {
return err
}
if version >= 1 {
pe.putBool(tm.IsInternal)
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe, version)
if err != nil {
return err
}
}
return nil
}
type MetadataResponse struct {
Version int16
ThrottleTimeMs int32
Brokers []*Broker
ClusterID *string
ControllerID int32
Topics []*TopicMetadata
}
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 3 {
r.ThrottleTimeMs, err = pd.getInt32()
if err != nil {
return err
}
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
r.Brokers[i] = new(Broker)
err = r.Brokers[i].decode(pd, version)
if err != nil {
return err
}
}
if version >= 2 {
r.ClusterID, err = pd.getNullableString()
if err != nil {
return err
}
}
if version >= 1 {
r.ControllerID, err = pd.getInt32()
if err != nil {
return err
}
} else {
r.ControllerID = -1
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
r.Topics[i] = new(TopicMetadata)
err = r.Topics[i].decode(pd, version)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) encode(pe packetEncoder) error {
if r.Version >= 3 {
pe.putInt32(r.ThrottleTimeMs)
}
err := pe.putArrayLength(len(r.Brokers))
if err != nil {
return err
}
for _, broker := range r.Brokers {
err = broker.encode(pe, r.Version)
if err != nil {
return err
}
}
if r.Version >= 2 {
err := pe.putNullableString(r.ClusterID)
if err != nil {
return err
}
}
if r.Version >= 1 {
pe.putInt32(r.ControllerID)
}
err = pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for _, tm := range r.Topics {
err = tm.encode(pe, r.Version)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) key() int16 {
return 3
}
func (r *MetadataResponse) version() int16 {
return r.Version
}
func (r *MetadataResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_0_0
case 2:
return V0_10_1_0
case 3, 4:
return V0_11_0_0
case 5:
return V1_0_0_0
default:
return MinVersion
}
}
// testing API
func (r *MetadataResponse) AddBroker(addr string, id int32) {
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
}
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
var tmatch *TopicMetadata
for _, tm := range r.Topics {
if tm.Name == topic {
tmatch = tm
goto foundTopic
}
}
tmatch = new(TopicMetadata)
tmatch.Name = topic
r.Topics = append(r.Topics, tmatch)
foundTopic:
tmatch.Err = err
return tmatch
}
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
tmatch := r.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
for _, pm := range tmatch.Partitions {
if pm.ID == partition {
pmatch = pm
goto foundPartition
}
}
pmatch = new(PartitionMetadata)
pmatch.ID = partition
tmatch.Partitions = append(tmatch.Partitions, pmatch)
foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.Err = err
}

51
vendor/github.com/Shopify/sarama/metrics.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
import (
"fmt"
"strings"
"github.com/rcrowley/go-metrics"
)
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
const (
metricsReservoirSize = 1028
metricsAlphaFactor = 0.015
)
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
return r.GetOrRegister(name, func() metrics.Histogram {
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
}).(metrics.Histogram)
}
func getMetricNameForBroker(name string, broker *Broker) string {
// Use broker id like the Java client as it does not contain '.' or ':' characters that
// can be interpreted as special character by monitoring tool (e.g. Graphite)
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
}
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
}
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
}
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
}

330
vendor/github.com/Shopify/sarama/mockbroker.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
package sarama
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
)
const (
expectationTimeout = 500 * time.Millisecond
)
type requestHandlerFunc func(req *request) (res encoder)
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
// and will provides the number of bytes read and written.
type RequestNotifierFunc func(bytesRead, bytesWritten int)
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
// to facilitate testing of higher level or specialized consumers and producers
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
// but rather provides a facility to do that. It takes care of the TCP
// transport, request unmarshaling, response marshaling, and makes it the test
// writer responsibility to program correct according to the Kafka API protocol
// MockBroker behaviour.
//
// MockBroker is implemented as a TCP server listening on a kernel-selected
// localhost port that can accept many connections. It reads Kafka requests
// from that connection and returns responses programmed by the SetHandlerByMap
// function. If a MockBroker receives a request that it has no programmed
// response for, then it returns nothing and the request times out.
//
// A set of MockRequest builders to define mappings used by MockBroker is
// provided by Sarama. But users can develop MockRequests of their own and use
// them along with or instead of the standard ones.
//
// When running tests with MockBroker it is strongly recommended to specify
// a timeout to `go test` so that if the broker hangs waiting for a response,
// the test panics.
//
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type MockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoder
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
}
// RequestResponse represents a Request/Response pair processed by MockBroker.
type RequestResponse struct {
Request protocolBody
Response encoder
}
// SetLatency makes broker pause for the specified period every time before
// replying.
func (b *MockBroker) SetLatency(latency time.Duration) {
b.latency = latency
}
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
// request is received by the broker, it looks up the request type in the map
// and uses the found MockResponse instance to generate an appropriate reply.
// If the request type is not found in the map then nothing is sent.
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.setHandler(func(req *request) (res encoder) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
return nil
}
return mockResponse.For(req.body)
})
}
// SetNotifier set a function that will get invoked whenever a request has been
// processed successfully and will provide the number of bytes read and written
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
b.lock.Lock()
b.notifier = notifier
b.lock.Unlock()
}
// BrokerID returns broker ID assigned to the broker.
func (b *MockBroker) BrokerID() int32 {
return b.brokerID
}
// History returns a slice of RequestResponse pairs in the order they were
// processed by the broker. Note that in case of multiple connections to the
// broker the order expected by a test can be different from the order recorded
// in the history, unless some synchronization is implemented in the test.
func (b *MockBroker) History() []RequestResponse {
b.lock.Lock()
history := make([]RequestResponse, len(b.history))
copy(history, b.history)
b.lock.Unlock()
return history
}
// Port returns the TCP port number the broker is listening for requests on.
func (b *MockBroker) Port() int32 {
return b.port
}
// Addr returns the broker connection string in the form "<address>:<port>".
func (b *MockBroker) Addr() string {
return b.listener.Addr().String()
}
// Close terminates the broker blocking until it stops internal goroutines and
// releases all resources.
func (b *MockBroker) Close() {
close(b.expectations)
if len(b.expectations) > 0 {
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
for e := range b.expectations {
_, _ = buf.WriteString(spew.Sdump(e))
}
b.t.Error(buf.String())
}
close(b.closing)
<-b.stopper
}
// setHandler sets the specified function as the request handler. Whenever
// a mock broker reads a request from the wire it passes the request to the
// function and sends back whatever the handler function returns.
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
b.lock.Lock()
b.handler = handler
b.lock.Unlock()
}
func (b *MockBroker) serverLoop() {
defer close(b.stopper)
var err error
var conn net.Conn
go func() {
<-b.closing
err := b.listener.Close()
if err != nil {
b.t.Error(err)
}
}()
wg := &sync.WaitGroup{}
i := 0
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
wg.Add(1)
go b.handleRequests(conn, i, wg)
i++
}
wg.Wait()
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
}()
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
var err error
abort := make(chan none)
defer close(abort)
go func() {
select {
case <-b.closing:
_ = conn.Close()
case <-abort:
}
}()
resHeader := make([]byte, 8)
for {
req, bytesRead, err := decodeRequest(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
}
b.lock.Unlock()
continue
}
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
}
b.lock.Unlock()
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
select {
case res, ok := <-b.expectations:
if !ok {
return nil
}
return res
case <-time.After(expectationTimeout):
return nil
}
}
func (b *MockBroker) serverError(err error) {
isConnectionClosedError := false
if _, ok := err.(*net.OpError); ok {
isConnectionClosedError = true
} else if err == io.EOF {
isConnectionClosedError = true
} else if err.Error() == "use of closed network connection" {
isConnectionClosedError = true
}
if isConnectionClosedError {
return
}
b.t.Errorf(err.Error())
}
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
// test framework and a channel of responses to use. If an error occurs it is
// simply logged to the TestReporter and the broker exits.
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
return NewMockBrokerAddr(t, brokerID, "localhost:0")
}
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
listener, err := net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
return NewMockBrokerListener(t, brokerID, listener)
}
// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
var err error
broker := &MockBroker{
closing: make(chan none),
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
listener: listener,
}
broker.handler = broker.defaultRequestHandler
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
t.Fatal(err)
}
tmp, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
t.Fatal(err)
}
broker.port = int32(tmp)
go broker.serverLoop()
return broker
}
func (b *MockBroker) Returns(e encoder) {
b.expectations <- e
}

727
vendor/github.com/Shopify/sarama/mockresponses.go generated vendored Normal file
View File

@ -0,0 +1,727 @@
package sarama
import (
"fmt"
)
// TestReporter has methods matching go's testing.T to avoid importing
// `testing` in the main part of the library.
type TestReporter interface {
Error(...interface{})
Errorf(string, ...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
}
// MockResponse is a response builder interface it defines one method that
// allows generating a response based on a request body. MockResponses are used
// to program behavior of MockBroker in tests.
type MockResponse interface {
For(reqBody versionedDecoder) (res encoder)
}
// MockWrapper is a mock response builder that returns a particular concrete
// response regardless of the actual request passed to the `For` method.
type MockWrapper struct {
res encoder
}
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
return mw.res
}
func NewMockWrapper(res encoder) *MockWrapper {
return &MockWrapper{res: res}
}
// MockSequence is a mock response builder that is created from a sequence of
// concrete responses. Every time when a `MockBroker` calls its `For` method
// the next response from the sequence is returned. When the end of the
// sequence is reached the last element from the sequence is returned.
type MockSequence struct {
responses []MockResponse
}
func NewMockSequence(responses ...interface{}) *MockSequence {
ms := &MockSequence{}
ms.responses = make([]MockResponse, len(responses))
for i, res := range responses {
switch res := res.(type) {
case MockResponse:
ms.responses[i] = res
case encoder:
ms.responses[i] = NewMockWrapper(res)
default:
panic(fmt.Sprintf("Unexpected response type: %T", res))
}
}
return ms
}
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
res = mc.responses[0].For(reqBody)
if len(mc.responses) > 1 {
mc.responses = mc.responses[1:]
}
return res
}
// MockMetadataResponse is a `MetadataResponse` builder.
type MockMetadataResponse struct {
controllerID int32
leaders map[string]map[int32]int32
brokers map[string]int32
t TestReporter
}
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
return &MockMetadataResponse{
leaders: make(map[string]map[int32]int32),
brokers: make(map[string]int32),
t: t,
}
}
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
partitions := mmr.leaders[topic]
if partitions == nil {
partitions = make(map[int32]int32)
mmr.leaders[topic] = partitions
}
partitions[partition] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
mmr.brokers[addr] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
mmr.controllerID = brokerID
return mmr
}
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
metadataRequest := reqBody.(*MetadataRequest)
metadataResponse := &MetadataResponse{
Version: metadataRequest.version(),
ControllerID: mmr.controllerID,
}
for addr, brokerID := range mmr.brokers {
metadataResponse.AddBroker(addr, brokerID)
}
if len(metadataRequest.Topics) == 0 {
for topic, partitions := range mmr.leaders {
for partition, brokerID := range partitions {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
for _, topic := range metadataRequest.Topics {
for partition, brokerID := range mmr.leaders[topic] {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
// MockOffsetResponse is an `OffsetResponse` builder.
type MockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t TestReporter
version int16
}
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
return &MockOffsetResponse{
offsets: make(map[string]map[int32]map[int64]int64),
t: t,
}
}
func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
mor.version = version
return mor
}
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]int64)
mor.offsets[topic] = partitions
}
times := partitions[partition]
if times == nil {
times = make(map[int64]int64)
partitions[partition] = times
}
times[time] = offset
return mor
}
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{Version: mor.version}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
offsetResponse.AddTopicPartition(topic, partition, offset)
}
}
return offsetResponse
}
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
partitions := mor.offsets[topic]
if partitions == nil {
mor.t.Errorf("missing topic: %s", topic)
}
times := partitions[partition]
if times == nil {
mor.t.Errorf("missing partition: %d", partition)
}
offset, ok := times[time]
if !ok {
mor.t.Errorf("missing time: %d", time)
}
return offset
}
// MockFetchResponse is a `FetchResponse` builder.
type MockFetchResponse struct {
messages map[string]map[int32]map[int64]Encoder
highWaterMarks map[string]map[int32]int64
t TestReporter
batchSize int
version int16
}
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
return &MockFetchResponse{
messages: make(map[string]map[int32]map[int64]Encoder),
highWaterMarks: make(map[string]map[int32]int64),
t: t,
batchSize: batchSize,
}
}
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
mfr.version = version
return mfr
}
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]Encoder)
mfr.messages[topic] = partitions
}
messages := partitions[partition]
if messages == nil {
messages = make(map[int64]Encoder)
partitions[partition] = messages
}
messages[offset] = msg
return mfr
}
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
partitions = make(map[int32]int64)
mfr.highWaterMarks[topic] = partitions
}
partitions[partition] = offset
return mfr
}
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{
Version: mfr.version,
}
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset
offset := initialOffset
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
for i := 0; i < mfr.batchSize && offset < maxOffset; {
msg := mfr.getMessage(topic, partition, offset)
if msg != nil {
res.AddMessage(topic, partition, nil, msg, offset)
i++
}
offset++
}
fb := res.GetBlock(topic, partition)
if fb == nil {
res.AddError(topic, partition, ErrNoError)
fb = res.GetBlock(topic, partition)
}
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
}
}
return res
}
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
partitions := mfr.messages[topic]
if partitions == nil {
return nil
}
messages := partitions[partition]
if messages == nil {
return nil
}
return messages[offset]
}
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
partitions := mfr.messages[topic]
if partitions == nil {
return 0
}
messages := partitions[partition]
if messages == nil {
return 0
}
return len(messages)
}
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
return 0
}
return partitions[partition]
}
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
type MockConsumerMetadataResponse struct {
coordinators map[string]interface{}
t TestReporter
}
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
return &MockConsumerMetadataResponse{
coordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
mr.coordinators[group] = broker
return mr
}
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
mr.coordinators[group] = kerror
return mr
}
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ConsumerMetadataRequest)
group := req.ConsumerGroup
res := &ConsumerMetadataResponse{}
v := mr.coordinators[group]
switch v := v.(type) {
case *MockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
type MockFindCoordinatorResponse struct {
groupCoordinators map[string]interface{}
transCoordinators map[string]interface{}
t TestReporter
}
func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
return &MockFindCoordinatorResponse{
groupCoordinators: make(map[string]interface{}),
transCoordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
switch coordinatorType {
case CoordinatorGroup:
mr.groupCoordinators[group] = broker
case CoordinatorTransaction:
mr.transCoordinators[group] = broker
}
return mr
}
func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
switch coordinatorType {
case CoordinatorGroup:
mr.groupCoordinators[group] = kerror
case CoordinatorTransaction:
mr.transCoordinators[group] = kerror
}
return mr
}
func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*FindCoordinatorRequest)
res := &FindCoordinatorResponse{}
var v interface{}
switch req.CoordinatorType {
case CoordinatorGroup:
v = mr.groupCoordinators[req.CoordinatorKey]
case CoordinatorTransaction:
v = mr.transCoordinators[req.CoordinatorKey]
}
switch v := v.(type) {
case *MockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
type MockOffsetCommitResponse struct {
errors map[string]map[string]map[int32]KError
t TestReporter
}
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
return &MockOffsetCommitResponse{t: t}
}
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[string]map[int32]KError)
}
topics := mr.errors[group]
if topics == nil {
topics = make(map[string]map[int32]KError)
mr.errors[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]KError)
topics[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetCommitRequest)
group := req.ConsumerGroup
res := &OffsetCommitResponse{}
for topic, partitions := range req.blocks {
for partition := range partitions {
res.AddError(topic, partition, mr.getError(group, topic, partition))
}
}
return res
}
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
topics := mr.errors[group]
if topics == nil {
return ErrNoError
}
partitions := topics[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockProduceResponse is a `ProduceResponse` builder.
type MockProduceResponse struct {
version int16
errors map[string]map[int32]KError
t TestReporter
}
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
return &MockProduceResponse{t: t}
}
func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
mr.version = version
return mr
}
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
}
partitions := mr.errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
mr.errors[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{
Version: mr.version,
}
for topic, partitions := range req.records {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}
}
return res
}
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
partitions := mr.errors[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
type MockOffsetFetchResponse struct {
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
t TestReporter
}
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
return &MockOffsetFetchResponse{t: t}
}
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
if mr.offsets == nil {
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
}
topics := mr.offsets[group]
if topics == nil {
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
mr.offsets[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
topics[topic] = partitions
}
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
return mr
}
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetFetchRequest)
group := req.ConsumerGroup
res := &OffsetFetchResponse{}
for topic, partitions := range mr.offsets[group] {
for partition, block := range partitions {
res.AddBlock(topic, partition, block)
}
}
return res
}
type MockCreateTopicsResponse struct {
t TestReporter
}
func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
return &MockCreateTopicsResponse{t: t}
}
func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*CreateTopicsRequest)
res := &CreateTopicsResponse{}
res.TopicErrors = make(map[string]*TopicError)
for topic, _ := range req.TopicDetails {
res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
}
return res
}
type MockDeleteTopicsResponse struct {
t TestReporter
}
func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
return &MockDeleteTopicsResponse{t: t}
}
func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*DeleteTopicsRequest)
res := &DeleteTopicsResponse{}
res.TopicErrorCodes = make(map[string]KError)
for _, topic := range req.Topics {
res.TopicErrorCodes[topic] = ErrNoError
}
return res
}
type MockCreatePartitionsResponse struct {
t TestReporter
}
func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
return &MockCreatePartitionsResponse{t: t}
}
func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*CreatePartitionsRequest)
res := &CreatePartitionsResponse{}
res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
for topic, _ := range req.TopicPartitions {
res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
}
return res
}
type MockDeleteRecordsResponse struct {
t TestReporter
}
func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
return &MockDeleteRecordsResponse{t: t}
}
func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*DeleteRecordsRequest)
res := &DeleteRecordsResponse{}
res.Topics = make(map[string]*DeleteRecordsResponseTopic)
for topic, deleteRecordRequestTopic := range req.Topics {
partitions := make(map[int32]*DeleteRecordsResponsePartition)
for partition, _ := range deleteRecordRequestTopic.PartitionOffsets {
partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
}
res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
}
return res
}
type MockDescribeConfigsResponse struct {
t TestReporter
}
func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
return &MockDescribeConfigsResponse{t: t}
}
func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*DescribeConfigsRequest)
res := &DescribeConfigsResponse{}
var configEntries []*ConfigEntry
configEntries = append(configEntries, &ConfigEntry{Name: "my_topic",
Value: "my_topic",
ReadOnly: true,
Default: true,
Sensitive: false,
})
for _, r := range req.Resources {
res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries})
}
return res
}
type MockAlterConfigsResponse struct {
t TestReporter
}
func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
return &MockAlterConfigsResponse{t: t}
}
func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*AlterConfigsRequest)
res := &AlterConfigsResponse{}
for _, r := range req.Resources {
res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
Type: TopicResource,
ErrorMsg: "",
})
}
return res
}
type MockCreateAclsResponse struct {
t TestReporter
}
func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
return &MockCreateAclsResponse{t: t}
}
func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*CreateAclsRequest)
res := &CreateAclsResponse{}
for range req.AclCreations {
res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
}
return res
}
type MockListAclsResponse struct {
t TestReporter
}
func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
return &MockListAclsResponse{t: t}
}
func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*DescribeAclsRequest)
res := &DescribeAclsResponse{}
res.Err = ErrNoError
acl := &ResourceAcls{}
acl.Resource.ResourceName = *req.ResourceName
acl.Resource.ResourceType = req.ResourceType
acl.Acls = append(acl.Acls, &Acl{})
res.ResourceAcls = append(res.ResourceAcls, acl)
return res
}
type MockDeleteAclsResponse struct {
t TestReporter
}
func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
return &MockDeleteAclsResponse{t: t}
}
func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*DeleteAclsRequest)
res := &DeleteAclsResponse{}
for range req.Filters {
response := &FilterResponse{Err: ErrNoError}
response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
res.FilterResponses = append(res.FilterResponses, response)
}
return res
}

View File

@ -0,0 +1,204 @@
package sarama
import "errors"
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
// tells the broker to set the timestamp to the time at which the request was received.
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
const ReceiveTime int64 = -1
// GroupGenerationUndefined is a special value for the group generation field of
// Offset Commit Requests that should be used when a consumer group does not rely
// on Kafka for partition management.
const GroupGenerationUndefined = -1
type offsetCommitRequestBlock struct {
offset int64
timestamp int64
metadata string
}
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(b.offset)
if version == 1 {
pe.putInt64(b.timestamp)
} else if b.timestamp != 0 {
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
}
return pe.putString(b.metadata)
}
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.offset, err = pd.getInt64(); err != nil {
return err
}
if version == 1 {
if b.timestamp, err = pd.getInt64(); err != nil {
return err
}
}
b.metadata, err = pd.getString()
return err
}
type OffsetCommitRequest struct {
ConsumerGroup string
ConsumerGroupGeneration int32 // v1 or later
ConsumerID string // v1 or later
RetentionTime int64 // v2 or later
// Version can be:
// - 0 (kafka 0.8.1 and later)
// - 1 (kafka 0.8.2 and later)
// - 2 (kafka 0.9.0 and later)
Version int16
blocks map[string]map[int32]*offsetCommitRequestBlock
}
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 2 {
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
}
if err := pe.putString(r.ConsumerGroup); err != nil {
return err
}
if r.Version >= 1 {
pe.putInt32(r.ConsumerGroupGeneration)
if err := pe.putString(r.ConsumerID); err != nil {
return err
}
} else {
if r.ConsumerGroupGeneration != 0 {
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
}
if r.ConsumerID != "" {
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
}
}
if r.Version >= 2 {
pe.putInt64(r.RetentionTime)
} else if r.RetentionTime != 0 {
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
}
if err := pe.putArrayLength(len(r.blocks)); err != nil {
return err
}
for topic, partitions := range r.blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
if r.Version >= 1 {
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
return err
}
if r.ConsumerID, err = pd.getString(); err != nil {
return err
}
}
if r.Version >= 2 {
if r.RetentionTime, err = pd.getInt64(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetCommitRequestBlock{}
if err := block.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetCommitRequest) key() int16 {
return 8
}
func (r *OffsetCommitRequest) version() int16 {
return r.Version
}
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_9_0_0
default:
return MinVersion
}
}
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
}
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
}
func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
partitions := r.blocks[topic]
if partitions == nil {
return 0, "", errors.New("No such offset")
}
block := partitions[partitionID]
if block == nil {
return 0, "", errors.New("No such offset")
}
return block.offset, block.metadata, nil
}

View File

@ -0,0 +1,85 @@
package sarama
type OffsetCommitResponse struct {
Errors map[string]map[int32]KError
}
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]KError)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
r.Errors[topic] = partitions
}
partitions[partition] = kerror
}
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Errors)); err != nil {
return err
}
for topic, partitions := range r.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, kerror := range partitions {
pe.putInt32(partition)
pe.putInt16(int16(kerror))
}
}
return nil
}
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Errors = make(map[string]map[int32]KError, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numErrors, err := pd.getArrayLength()
if err != nil {
return err
}
r.Errors[name] = make(map[int32]KError, numErrors)
for j := 0; j < numErrors; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Errors[name][id] = KError(tmp)
}
}
return nil
}
func (r *OffsetCommitResponse) key() int16 {
return 8
}
func (r *OffsetCommitResponse) version() int16 {
return 0
}
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
return MinVersion
}

View File

@ -0,0 +1,81 @@
package sarama
type OffsetFetchRequest struct {
ConsumerGroup string
Version int16
partitions map[string][]int32
}
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
if r.Version < 0 || r.Version > 1 {
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
}
if err = pe.putString(r.ConsumerGroup); err != nil {
return err
}
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
if partitionCount == 0 {
return nil
}
r.partitions = make(map[string][]int32)
for i := 0; i < partitionCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
r.partitions[topic] = partitions
}
return nil
}
func (r *OffsetFetchRequest) key() int16 {
return 9
}
func (r *OffsetFetchRequest) version() int16 {
return r.Version
}
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
default:
return MinVersion
}
}
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
if r.partitions == nil {
r.partitions = make(map[string][]int32)
}
r.partitions[topic] = append(r.partitions[topic], partitionID)
}

View File

@ -0,0 +1,143 @@
package sarama
type OffsetFetchResponseBlock struct {
Offset int64
Metadata string
Err KError
}
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
b.Metadata, err = pd.getString()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
return nil
}
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt64(b.Offset)
err = pe.putString(b.Metadata)
if err != nil {
return err
}
pe.putInt16(int16(b.Err))
return nil
}
type OffsetFetchResponse struct {
Blocks map[string]map[int32]*OffsetFetchResponseBlock
}
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetFetchResponse) key() int16 {
return 9
}
func (r *OffsetFetchResponse) version() int16 {
return 0
}
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
return MinVersion
}
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
}
partitions := r.Blocks[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
r.Blocks[topic] = partitions
}
partitions[partition] = block
}

572
vendor/github.com/Shopify/sarama/offset_manager.go generated vendored Normal file
View File

@ -0,0 +1,572 @@
package sarama
import (
"sync"
"time"
)
// Offset Manager
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
type OffsetManager interface {
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
// It will return an error if this OffsetManager is already managing the given
// topic/partition.
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
// Close stops the OffsetManager from managing offsets. It is required to call
// this function before an OffsetManager object passes out of scope, as it
// will otherwise leak memory. You must call this after all the
// PartitionOffsetManagers are closed.
Close() error
}
type offsetManager struct {
client Client
conf *Config
group string
ticker *time.Ticker
memberID string
generation int32
broker *Broker
brokerLock sync.RWMutex
poms map[string]map[int32]*partitionOffsetManager
pomsLock sync.RWMutex
closeOnce sync.Once
closing chan none
closed chan none
}
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
}
func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
conf := client.Config()
om := &offsetManager{
client: client,
conf: conf,
group: group,
ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
poms: make(map[string]map[int32]*partitionOffsetManager),
memberID: memberID,
generation: generation,
closing: make(chan none),
closed: make(chan none),
}
go withRecover(om.mainLoop)
return om, nil
}
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
pom, err := om.newPartitionOffsetManager(topic, partition)
if err != nil {
return nil, err
}
om.pomsLock.Lock()
defer om.pomsLock.Unlock()
topicManagers := om.poms[topic]
if topicManagers == nil {
topicManagers = make(map[int32]*partitionOffsetManager)
om.poms[topic] = topicManagers
}
if topicManagers[partition] != nil {
return nil, ConfigurationError("That topic/partition is already being managed")
}
topicManagers[partition] = pom
return pom, nil
}
func (om *offsetManager) Close() error {
om.closeOnce.Do(func() {
// exit the mainLoop
close(om.closing)
<-om.closed
// mark all POMs as closed
om.asyncClosePOMs()
// flush one last time
for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
om.flushToBroker()
if om.releasePOMs(false) == 0 {
break
}
}
om.releasePOMs(true)
om.brokerLock.Lock()
om.broker = nil
om.brokerLock.Unlock()
})
return nil
}
func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
broker, err := om.coordinator()
if err != nil {
if retries <= 0 {
return 0, "", err
}
return om.fetchInitialOffset(topic, partition, retries-1)
}
req := new(OffsetFetchRequest)
req.Version = 1
req.ConsumerGroup = om.group
req.AddPartition(topic, partition)
resp, err := broker.FetchOffset(req)
if err != nil {
if retries <= 0 {
return 0, "", err
}
om.releaseCoordinator(broker)
return om.fetchInitialOffset(topic, partition, retries-1)
}
block := resp.GetBlock(topic, partition)
if block == nil {
return 0, "", ErrIncompleteResponse
}
switch block.Err {
case ErrNoError:
return block.Offset, block.Metadata, nil
case ErrNotCoordinatorForConsumer:
if retries <= 0 {
return 0, "", block.Err
}
om.releaseCoordinator(broker)
return om.fetchInitialOffset(topic, partition, retries-1)
case ErrOffsetsLoadInProgress:
if retries <= 0 {
return 0, "", block.Err
}
select {
case <-om.closing:
return 0, "", block.Err
case <-time.After(om.conf.Metadata.Retry.Backoff):
}
return om.fetchInitialOffset(topic, partition, retries-1)
default:
return 0, "", block.Err
}
}
func (om *offsetManager) coordinator() (*Broker, error) {
om.brokerLock.RLock()
broker := om.broker
om.brokerLock.RUnlock()
if broker != nil {
return broker, nil
}
om.brokerLock.Lock()
defer om.brokerLock.Unlock()
if broker := om.broker; broker != nil {
return broker, nil
}
if err := om.client.RefreshCoordinator(om.group); err != nil {
return nil, err
}
broker, err := om.client.Coordinator(om.group)
if err != nil {
return nil, err
}
om.broker = broker
return broker, nil
}
func (om *offsetManager) releaseCoordinator(b *Broker) {
om.brokerLock.Lock()
if om.broker == b {
om.broker = nil
}
om.brokerLock.Unlock()
}
func (om *offsetManager) mainLoop() {
defer om.ticker.Stop()
defer close(om.closed)
for {
select {
case <-om.ticker.C:
om.flushToBroker()
om.releasePOMs(false)
case <-om.closing:
return
}
}
}
func (om *offsetManager) flushToBroker() {
req := om.constructRequest()
if req == nil {
return
}
broker, err := om.coordinator()
if err != nil {
om.handleError(err)
return
}
resp, err := broker.CommitOffset(req)
if err != nil {
om.handleError(err)
om.releaseCoordinator(broker)
_ = broker.Close()
return
}
om.handleResponse(broker, req, resp)
}
func (om *offsetManager) constructRequest() *OffsetCommitRequest {
var r *OffsetCommitRequest
var perPartitionTimestamp int64
if om.conf.Consumer.Offsets.Retention == 0 {
perPartitionTimestamp = ReceiveTime
r = &OffsetCommitRequest{
Version: 1,
ConsumerGroup: om.group,
ConsumerID: om.memberID,
ConsumerGroupGeneration: om.generation,
}
} else {
r = &OffsetCommitRequest{
Version: 2,
RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
ConsumerGroup: om.group,
ConsumerID: om.memberID,
ConsumerGroupGeneration: om.generation,
}
}
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.lock.Lock()
if pom.dirty {
r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
}
pom.lock.Unlock()
}
}
if len(r.blocks) > 0 {
return r
}
return nil
}
func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
continue
}
var err KError
var ok bool
if resp.Errors[pom.topic] == nil {
pom.handleError(ErrIncompleteResponse)
continue
}
if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
pom.handleError(ErrIncompleteResponse)
continue
}
switch err {
case ErrNoError:
block := req.blocks[pom.topic][pom.partition]
pom.updateCommitted(block.offset, block.metadata)
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
// not a critical error, we just need to redispatch
om.releaseCoordinator(broker)
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
// nothing we can do about this, just tell the user and carry on
pom.handleError(err)
case ErrOffsetsLoadInProgress:
// nothing wrong but we didn't commit, we'll get it next time round
break
case ErrUnknownTopicOrPartition:
// let the user know *and* try redispatching - if topic-auto-create is
// enabled, redispatching should trigger a metadata req and create the
// topic; if not then re-dispatching won't help, but we've let the user
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
fallthrough
default:
// dunno, tell the user and try redispatching
pom.handleError(err)
om.releaseCoordinator(broker)
}
}
}
}
func (om *offsetManager) handleError(err error) {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.handleError(err)
}
}
}
func (om *offsetManager) asyncClosePOMs() {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
for _, topicManagers := range om.poms {
for _, pom := range topicManagers {
pom.AsyncClose()
}
}
}
// Releases/removes closed POMs once they are clean (or when forced)
func (om *offsetManager) releasePOMs(force bool) (remaining int) {
om.pomsLock.Lock()
defer om.pomsLock.Unlock()
for topic, topicManagers := range om.poms {
for partition, pom := range topicManagers {
pom.lock.Lock()
releaseDue := pom.done && (force || !pom.dirty)
pom.lock.Unlock()
if releaseDue {
pom.release()
delete(om.poms[topic], partition)
if len(om.poms[topic]) == 0 {
delete(om.poms, topic)
}
}
}
remaining += len(om.poms[topic])
}
return
}
func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
om.pomsLock.RLock()
defer om.pomsLock.RUnlock()
if partitions, ok := om.poms[topic]; ok {
if pom, ok := partitions[partition]; ok {
return pom
}
}
return nil
}
// Partition Offset Manager
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
// out of scope.
type PartitionOffsetManager interface {
// NextOffset returns the next offset that should be consumed for the managed
// partition, accompanied by metadata which can be used to reconstruct the state
// of the partition consumer when it resumes. NextOffset() will return
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
// was committed for this partition yet.
NextOffset() (int64, string)
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(offset int64, metadata string)
// Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
// return immediately, after which you should wait until the 'errors' channel has
// been drained and closed. It is required to call this function, or Close before
// a consumer object passes out of scope, as it will otherwise leak memory. You
// must call this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionOffsetManager from managing offsets. It is required to
// call this function (or AsyncClose) before a PartitionOffsetManager object
// passes out of scope, as it will otherwise leak memory. You must call this
// before calling Close on the underlying client.
Close() error
}
type partitionOffsetManager struct {
parent *offsetManager
topic string
partition int32
lock sync.Mutex
offset int64
metadata string
dirty bool
done bool
releaseOnce sync.Once
errors chan *ConsumerError
}
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
if err != nil {
return nil, err
}
return &partitionOffsetManager{
parent: om,
topic: topic,
partition: partition,
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
offset: offset,
metadata: metadata,
}, nil
}
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
return pom.errors
}
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset > pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset <= pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset == offset && pom.metadata == metadata {
pom.dirty = false
}
}
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset >= 0 {
return pom.offset, pom.metadata
}
return pom.parent.conf.Consumer.Offsets.Initial, ""
}
func (pom *partitionOffsetManager) AsyncClose() {
pom.lock.Lock()
pom.done = true
pom.lock.Unlock()
}
func (pom *partitionOffsetManager) Close() error {
pom.AsyncClose()
var errors ConsumerErrors
for err := range pom.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (pom *partitionOffsetManager) handleError(err error) {
cErr := &ConsumerError{
Topic: pom.topic,
Partition: pom.partition,
Err: err,
}
if pom.parent.conf.Consumer.Return.Errors {
pom.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (pom *partitionOffsetManager) release() {
pom.releaseOnce.Do(func() {
go close(pom.errors)
})
}

132
vendor/github.com/Shopify/sarama/offset_request.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package sarama
type offsetRequestBlock struct {
time int64
maxOffsets int32 // Only used in version 0
}
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(int64(b.time))
if version == 0 {
pe.putInt32(b.maxOffsets)
}
return nil
}
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.time, err = pd.getInt64(); err != nil {
return err
}
if version == 0 {
if b.maxOffsets, err = pd.getInt32(); err != nil {
return err
}
}
return nil
}
type OffsetRequest struct {
Version int16
blocks map[string]map[int32]*offsetRequestBlock
}
func (r *OffsetRequest) encode(pe packetEncoder) error {
pe.putInt32(-1) // replica ID is always -1 for clients
err := pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, partitions := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
// Ignore replica ID
if _, err := pd.getInt32(); err != nil {
return err
}
blockCount, err := pd.getArrayLength()
if err != nil {
return err
}
if blockCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
for i := 0; i < blockCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetRequestBlock{}
if err := block.decode(pd, version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetRequest) key() int16 {
return 2
}
func (r *OffsetRequest) version() int16 {
return r.Version
}
func (r *OffsetRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return MinVersion
}
}
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
}
tmp := new(offsetRequestBlock)
tmp.time = time
if r.Version == 0 {
tmp.maxOffsets = maxOffsets
}
r.blocks[topic][partitionID] = tmp
}

174
vendor/github.com/Shopify/sarama/offset_response.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package sarama
type OffsetResponseBlock struct {
Err KError
Offsets []int64 // Version 0
Offset int64 // Version 1
Timestamp int64 // Version 1
}
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
if version == 0 {
b.Offsets, err = pd.getInt64Array()
return err
}
b.Timestamp, err = pd.getInt64()
if err != nil {
return err
}
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
// For backwards compatibility put the offset in the offsets array too
b.Offsets = []int64{b.Offset}
return nil
}
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
if version == 0 {
return pe.putInt64Array(b.Offsets)
}
pe.putInt64(b.Timestamp)
pe.putInt64(b.Offset)
return nil
}
type OffsetResponse struct {
Version int16
Blocks map[string]map[int32]*OffsetResponseBlock
}
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
/*
// [0 0 0 1 ntopics
0 8 109 121 95 116 111 112 105 99 topic
0 0 0 1 npartitions
0 0 0 0 id
0 0
0 0 0 1 0 0 0 0
0 1 1 1 0 0 0 1
0 8 109 121 95 116 111 112
105 99 0 0 0 1 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 1 1 1] <nil>
*/
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.version()); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetResponse) key() int16 {
return 2
}
func (r *OffsetResponse) version() int16 {
return r.Version
}
func (r *OffsetResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return MinVersion
}
}
// testing API
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*OffsetResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
}

60
vendor/github.com/Shopify/sarama/packet_decoder.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package sarama
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
// Types implementing Decoder only need to worry about calling methods like GetString,
// not about how a string is represented in Kafka.
type packetDecoder interface {
// Primitives
getInt8() (int8, error)
getInt16() (int16, error)
getInt32() (int32, error)
getInt64() (int64, error)
getVarint() (int64, error)
getArrayLength() (int, error)
getBool() (bool, error)
// Collections
getBytes() ([]byte, error)
getVarintBytes() ([]byte, error)
getRawBytes(length int) ([]byte, error)
getString() (string, error)
getNullableString() (*string, error)
getInt32Array() ([]int32, error)
getInt64Array() ([]int64, error)
getStringArray() ([]string, error)
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
// Stacks, see PushDecoder
push(in pushDecoder) error
pop() error
}
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
// depend upon have been decoded.
type pushDecoder interface {
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and check the field.
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
check(curOffset int, buf []byte) error
}
// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
// fields itself is unknown until its value was decoded (for instance varint encoded length
// fields).
// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
type dynamicPushDecoder interface {
pushDecoder
decoder
}

65
vendor/github.com/Shopify/sarama/packet_encoder.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
package sarama
import "github.com/rcrowley/go-metrics"
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
// Types implementing Encoder only need to worry about calling methods like PutString,
// not about how a string is represented in Kafka.
type packetEncoder interface {
// Primitives
putInt8(in int8)
putInt16(in int16)
putInt32(in int32)
putInt64(in int64)
putVarint(in int64)
putArrayLength(in int) error
putBool(in bool)
// Collections
putBytes(in []byte) error
putVarintBytes(in []byte) error
putRawBytes(in []byte) error
putString(in string) error
putNullableString(in *string) error
putStringArray(in []string) error
putInt32Array(in []int32) error
putInt64Array(in []int64) error
// Provide the current offset to record the batch size metric
offset() int
// Stacks, see PushEncoder
push(in pushEncoder)
pop() error
// To record metrics when provided
metricRegistry() metrics.Registry
}
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
// depend upon have been written.
type pushEncoder interface {
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and write the field.
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
// of data to the saved offset, based on the data between the saved offset and curOffset.
run(curOffset int, buf []byte) error
}
// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
// fields itself is unknown until its value was computed (for instance varint encoded length
// fields).
type dynamicPushEncoder interface {
pushEncoder
// Called during pop() to adjust the length of the field.
// It should return the difference in bytes between the last computed length and current length.
adjustLength(currOffset int) int
}

217
vendor/github.com/Shopify/sarama/partitioner.go generated vendored Normal file
View File

@ -0,0 +1,217 @@
package sarama
import (
"hash"
"hash/fnv"
"math/rand"
"time"
)
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
// as simple default implementations.
type Partitioner interface {
// Partition takes a message and partition count and chooses a partition
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
// RequiresConsistency indicates to the user of the partitioner whether the
// mapping of key->partition is consistent or not. Specifically, if a
// partitioner requires consistency then it must be allowed to choose from all
// partitions (even ones known to be unavailable), and its choice must be
// respected by the caller. The obvious example is the HashPartitioner.
RequiresConsistency() bool
}
// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
// in order to allow more flexibility than is originally allowed by the
// RequiresConsistency method in the Partitioner interface. This allows
// partitioners to require consistency sometimes, but not all times. It's useful
// for, e.g., the HashPartitioner, which does not require consistency if the
// message key is nil.
type DynamicConsistencyPartitioner interface {
Partitioner
// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
// but takes in the message being partitioned so that the partitioner can
// make a per-message determination.
MessageRequiresConsistency(message *ProducerMessage) bool
}
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
type PartitionerConstructor func(topic string) Partitioner
type manualPartitioner struct{}
// HashPartitionOption lets you modify default values of the partitioner
type HashPartitionerOption func(*hashPartitioner)
// WithAbsFirst means that the partitioner handles absolute values
// in the same way as the reference Java implementation
func WithAbsFirst() HashPartitionerOption {
return func(hp *hashPartitioner) {
hp.referenceAbs = true
}
}
// WithCustomHashFunction lets you specify what hash function to use for the partitioning
func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
return func(hp *hashPartitioner) {
hp.hasher = hasher()
}
}
// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
return func(hp *hashPartitioner) {
hp.random = hp
}
}
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
// ProducerMessage's Partition field as the partition to produce to.
func NewManualPartitioner(topic string) Partitioner {
return new(manualPartitioner)
}
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return message.Partition, nil
}
func (p *manualPartitioner) RequiresConsistency() bool {
return true
}
type randomPartitioner struct {
generator *rand.Rand
}
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
func NewRandomPartitioner(topic string) Partitioner {
p := new(randomPartitioner)
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
return p
}
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return int32(p.generator.Intn(int(numPartitions))), nil
}
func (p *randomPartitioner) RequiresConsistency() bool {
return false
}
type roundRobinPartitioner struct {
partition int32
}
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
func NewRoundRobinPartitioner(topic string) Partitioner {
return &roundRobinPartitioner{}
}
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if p.partition >= numPartitions {
p.partition = 0
}
ret := p.partition
p.partition++
return ret, nil
}
func (p *roundRobinPartitioner) RequiresConsistency() bool {
return false
}
type hashPartitioner struct {
random Partitioner
hasher hash.Hash32
referenceAbs bool
}
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = hasher()
p.referenceAbs = false
return p
}
}
// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
p.referenceAbs = false
for _, option := range options {
option(p)
}
return p
}
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
// modulus the number of partitions. This ensures that messages with the same key always end up on the
// same partition.
func NewHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
p.referenceAbs = false
return p
}
// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
// that but it had a mistake and now there are people depending on both behaviours. This will
// all go away on the next major version bump.
func NewReferenceHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
p.referenceAbs = true
return p
}
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if message.Key == nil {
return p.random.Partition(message, numPartitions)
}
bytes, err := message.Key.Encode()
if err != nil {
return -1, err
}
p.hasher.Reset()
_, err = p.hasher.Write(bytes)
if err != nil {
return -1, err
}
var partition int32
// Turns out we were doing our absolute value in a subtly different way from the upstream
// implementation, but now we need to maintain backwards compat for people who started using
// the old version; if referenceAbs is set we are compatible with the reference java client
// but not past Sarama versions
if p.referenceAbs {
partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
} else {
partition = int32(p.hasher.Sum32()) % numPartitions
if partition < 0 {
partition = -partition
}
}
return partition, nil
}
func (p *hashPartitioner) RequiresConsistency() bool {
return true
}
func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
return message.Key != nil
}

153
vendor/github.com/Shopify/sarama/prep_encoder.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package sarama
import (
"encoding/binary"
"fmt"
"math"
"github.com/rcrowley/go-metrics"
)
type prepEncoder struct {
stack []pushEncoder
length int
}
// primitives
func (pe *prepEncoder) putInt8(in int8) {
pe.length++
}
func (pe *prepEncoder) putInt16(in int16) {
pe.length += 2
}
func (pe *prepEncoder) putInt32(in int32) {
pe.length += 4
}
func (pe *prepEncoder) putInt64(in int64) {
pe.length += 8
}
func (pe *prepEncoder) putVarint(in int64) {
var buf [binary.MaxVarintLen64]byte
pe.length += binary.PutVarint(buf[:], in)
}
func (pe *prepEncoder) putArrayLength(in int) error {
if in > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
}
pe.length += 4
return nil
}
func (pe *prepEncoder) putBool(in bool) {
pe.length++
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {
pe.length += 4
if in == nil {
return nil
}
return pe.putRawBytes(in)
}
func (pe *prepEncoder) putVarintBytes(in []byte) error {
if in == nil {
pe.putVarint(-1)
return nil
}
pe.putVarint(int64(len(in)))
return pe.putRawBytes(in)
}
func (pe *prepEncoder) putRawBytes(in []byte) error {
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putNullableString(in *string) error {
if in == nil {
pe.length += 2
return nil
}
return pe.putString(*in)
}
func (pe *prepEncoder) putString(in string) error {
pe.length += 2
if len(in) > math.MaxInt16 {
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putStringArray(in []string) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
for _, str := range in {
if err := pe.putString(str); err != nil {
return err
}
}
return nil
}
func (pe *prepEncoder) putInt32Array(in []int32) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 4 * len(in)
return nil
}
func (pe *prepEncoder) putInt64Array(in []int64) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 8 * len(in)
return nil
}
func (pe *prepEncoder) offset() int {
return pe.length
}
// stackable
func (pe *prepEncoder) push(in pushEncoder) {
in.saveOffset(pe.length)
pe.length += in.reserveLength()
pe.stack = append(pe.stack, in)
}
func (pe *prepEncoder) pop() error {
in := pe.stack[len(pe.stack)-1]
pe.stack = pe.stack[:len(pe.stack)-1]
if dpe, ok := in.(dynamicPushEncoder); ok {
pe.length += dpe.adjustLength(pe.length)
}
return nil
}
// we do not record metrics during the prep encoder pass
func (pe *prepEncoder) metricRegistry() metrics.Registry {
return nil
}

252
vendor/github.com/Shopify/sarama/produce_request.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package sarama
import "github.com/rcrowley/go-metrics"
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
// it must see before responding. Any of the constants defined here are valid. On broker versions
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
// by setting the `min.isr` value in the brokers configuration).
type RequiredAcks int16
const (
// NoResponse doesn't send any response, the TCP ACK is all you get.
NoResponse RequiredAcks = 0
// WaitForLocal waits for only the local commit to succeed before responding.
WaitForLocal RequiredAcks = 1
// WaitForAll waits for all in-sync replicas to commit before responding.
// The minimum number of in-sync replicas is configured on the broker via
// the `min.insync.replicas` configuration key.
WaitForAll RequiredAcks = -1
)
type ProduceRequest struct {
TransactionalID *string
RequiredAcks RequiredAcks
Timeout int32
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
records map[string]map[int32]Records
}
func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
topicCompressionRatioMetric metrics.Histogram) int64 {
var topicRecordCount int64
for _, messageBlock := range msgSet.Messages {
// Is this a fake "message" wrapping real messages?
if messageBlock.Msg.Set != nil {
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
} else {
// A single uncompressed message
topicRecordCount++
}
// Better be safe than sorry when computing the compression ratio
if messageBlock.Msg.compressedSize != 0 {
compressionRatio := float64(len(messageBlock.Msg.Value)) /
float64(messageBlock.Msg.compressedSize)
// Histogram do not support decimal values, let's multiple it by 100 for better precision
intCompressionRatio := int64(100 * compressionRatio)
compressionRatioMetric.Update(intCompressionRatio)
topicCompressionRatioMetric.Update(intCompressionRatio)
}
}
return topicRecordCount
}
func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
topicCompressionRatioMetric metrics.Histogram) int64 {
if recordBatch.compressedRecords != nil {
compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
compressionRatioMetric.Update(compressionRatio)
topicCompressionRatioMetric.Update(compressionRatio)
}
return int64(len(recordBatch.Records))
}
func (r *ProduceRequest) encode(pe packetEncoder) error {
if r.Version >= 3 {
if err := pe.putNullableString(r.TransactionalID); err != nil {
return err
}
}
pe.putInt16(int16(r.RequiredAcks))
pe.putInt32(r.Timeout)
metricRegistry := pe.metricRegistry()
var batchSizeMetric metrics.Histogram
var compressionRatioMetric metrics.Histogram
if metricRegistry != nil {
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
}
totalRecordCount := int64(0)
err := pe.putArrayLength(len(r.records))
if err != nil {
return err
}
for topic, partitions := range r.records {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
topicRecordCount := int64(0)
var topicCompressionRatioMetric metrics.Histogram
if metricRegistry != nil {
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
}
for id, records := range partitions {
startOffset := pe.offset()
pe.putInt32(id)
pe.push(&lengthField{})
err = records.encode(pe)
if err != nil {
return err
}
err = pe.pop()
if err != nil {
return err
}
if metricRegistry != nil {
if r.Version >= 3 {
topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
} else {
topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
}
batchSize := int64(pe.offset() - startOffset)
batchSizeMetric.Update(batchSize)
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
}
}
if topicRecordCount > 0 {
getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
totalRecordCount += topicRecordCount
}
}
if totalRecordCount > 0 {
metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
}
return nil
}
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
if version >= 3 {
id, err := pd.getNullableString()
if err != nil {
return err
}
r.TransactionalID = id
}
requiredAcks, err := pd.getInt16()
if err != nil {
return err
}
r.RequiredAcks = RequiredAcks(requiredAcks)
if r.Timeout, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.records = make(map[string]map[int32]Records)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.records[topic] = make(map[int32]Records)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
size, err := pd.getInt32()
if err != nil {
return err
}
recordsDecoder, err := pd.getSubset(int(size))
if err != nil {
return err
}
var records Records
if err := records.decode(recordsDecoder); err != nil {
return err
}
r.records[topic][partition] = records
}
}
return nil
}
func (r *ProduceRequest) key() int16 {
return 0
}
func (r *ProduceRequest) version() int16 {
return r.Version
}
func (r *ProduceRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
if r.records == nil {
r.records = make(map[string]map[int32]Records)
}
if r.records[topic] == nil {
r.records[topic] = make(map[int32]Records)
}
}
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
r.ensureRecords(topic, partition)
set := r.records[topic][partition].MsgSet
if set == nil {
set = new(MessageSet)
r.records[topic][partition] = newLegacyRecords(set)
}
set.addMessage(msg)
}
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
r.ensureRecords(topic, partition)
r.records[topic][partition] = newLegacyRecords(set)
}
func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
r.ensureRecords(topic, partition)
r.records[topic][partition] = newDefaultRecords(batch)
}

183
vendor/github.com/Shopify/sarama/produce_response.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
package sarama
import (
"fmt"
"time"
)
type ProduceResponseBlock struct {
Err KError
Offset int64
// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
Timestamp time.Time
}
func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 2 {
if millis, err := pd.getInt64(); err != nil {
return err
} else if millis != -1 {
b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
}
return nil
}
func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.Offset)
if version >= 2 {
timestamp := int64(-1)
if !b.Timestamp.Before(time.Unix(0, 0)) {
timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !b.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
}
pe.putInt64(timestamp)
}
return nil
}
type ProduceResponse struct {
Blocks map[string]map[int32]*ProduceResponseBlock
Version int16
ThrottleTime time.Duration // only provided if Version >= 1
}
func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(ProduceResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
if r.Version >= 1 {
millis, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(millis) * time.Millisecond
}
return nil
}
func (r *ProduceResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, prb := range partitions {
pe.putInt32(id)
err = prb.encode(pe, r.Version)
if err != nil {
return err
}
}
}
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
return nil
}
func (r *ProduceResponse) key() int16 {
return 0
}
func (r *ProduceResponse) version() int16 {
return r.Version
}
func (r *ProduceResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
// Testing API
func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*ProduceResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &ProduceResponseBlock{Err: err}
}

252
vendor/github.com/Shopify/sarama/produce_set.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package sarama
import (
"encoding/binary"
"time"
)
type partitionSet struct {
msgs []*ProducerMessage
recordsToSend Records
bufferBytes int
}
type produceSet struct {
parent *asyncProducer
msgs map[string]map[int32]*partitionSet
bufferBytes int
bufferCount int
}
func newProduceSet(parent *asyncProducer) *produceSet {
return &produceSet{
msgs: make(map[string]map[int32]*partitionSet),
parent: parent,
}
}
func (ps *produceSet) add(msg *ProducerMessage) error {
var err error
var key, val []byte
if msg.Key != nil {
if key, err = msg.Key.Encode(); err != nil {
return err
}
}
if msg.Value != nil {
if val, err = msg.Value.Encode(); err != nil {
return err
}
}
timestamp := msg.Timestamp
if msg.Timestamp.IsZero() {
timestamp = time.Now()
}
partitions := ps.msgs[msg.Topic]
if partitions == nil {
partitions = make(map[int32]*partitionSet)
ps.msgs[msg.Topic] = partitions
}
var size int
set := partitions[msg.Partition]
if set == nil {
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
batch := &RecordBatch{
FirstTimestamp: timestamp,
Version: 2,
ProducerID: -1, /* No producer id */
Codec: ps.parent.conf.Producer.Compression,
CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
}
set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
size = recordBatchOverhead
} else {
set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
}
partitions[msg.Partition] = set
}
set.msgs = append(set.msgs, msg)
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
// We are being conservative here to avoid having to prep encode the record
size += maximumRecordOverhead
rec := &Record{
Key: key,
Value: val,
TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
}
size += len(key) + len(val)
if len(msg.Headers) > 0 {
rec.Headers = make([]*RecordHeader, len(msg.Headers))
for i := range msg.Headers {
rec.Headers[i] = &msg.Headers[i]
size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
}
}
set.recordsToSend.RecordBatch.addRecord(rec)
} else {
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
msgToSend.Timestamp = timestamp
msgToSend.Version = 1
}
set.recordsToSend.MsgSet.addMessage(msgToSend)
size = producerMessageOverhead + len(key) + len(val)
}
set.bufferBytes += size
ps.bufferBytes += size
ps.bufferCount++
return nil
}
func (ps *produceSet) buildRequest() *ProduceRequest {
req := &ProduceRequest{
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 2
}
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
req.Version = 3
}
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
if req.Version >= 3 {
// If the API version we're hitting is 3 or greater, we need to calculate
// offsets for each record in the batch relative to FirstOffset.
// Additionally, we must set LastOffsetDelta to the value of the last offset
// in the batch. Since the OffsetDelta of the first record is 0, we know that the
// final record of any batch will have an offset of (# of records in batch) - 1.
// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
// under the RecordBatch section for details.)
rb := set.recordsToSend.RecordBatch
if len(rb.Records) > 0 {
rb.LastOffsetDelta = int32(len(rb.Records) - 1)
for i, record := range rb.Records {
record.OffsetDelta = int64(i)
}
}
req.AddBatch(topic, partition, rb)
continue
}
if ps.parent.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, set.recordsToSend.MsgSet)
} else {
// When compression is enabled, the entire set for each partition is compressed
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
// If our version is 0.10 or later, assign relative offsets
// to the inner messages. This lets the broker avoid
// recompressing the message set.
// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
// for details on relative offsets.)
for i, msg := range set.recordsToSend.MsgSet.Messages {
msg.Offset = int64(i)
}
}
payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
compMsg := &Message{
Codec: ps.parent.conf.Producer.Compression,
CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
Key: nil,
Value: payload,
Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
compMsg.Version = 1
compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
}
req.AddMessage(topic, partition, compMsg)
}
}
}
return req
}
func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
cb(topic, partition, set.msgs)
}
}
}
func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
if ps.msgs[topic] == nil {
return nil
}
set := ps.msgs[topic][partition]
if set == nil {
return nil
}
ps.bufferBytes -= set.bufferBytes
ps.bufferCount -= len(set.msgs)
delete(ps.msgs[topic], partition)
return set.msgs
}
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
version := 1
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
}
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch for this partition?
case ps.parent.conf.Producer.Compression != CompressionNone &&
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
return true
default:
return false
}
}
func (ps *produceSet) readyToFlush() bool {
switch {
// If we don't have any messages, nothing else matters
case ps.empty():
return false
// If all three config values are 0, we always flush as-fast-as-possible
case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
return true
// If we've passed the message trigger-point
case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
return true
// If we've passed the byte trigger-point
case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
return true
default:
return false
}
}
func (ps *produceSet) empty() bool {
return ps.bufferCount == 0
}

324
vendor/github.com/Shopify/sarama/real_decoder.go generated vendored Normal file
View File

@ -0,0 +1,324 @@
package sarama
import (
"encoding/binary"
"math"
)
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
var errVarintOverflow = PacketDecodingError{"varint overflow"}
var errInvalidBool = PacketDecodingError{"invalid bool"}
type realDecoder struct {
raw []byte
off int
stack []pushDecoder
}
// primitives
func (rd *realDecoder) getInt8() (int8, error) {
if rd.remaining() < 1 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int8(rd.raw[rd.off])
rd.off++
return tmp, nil
}
func (rd *realDecoder) getInt16() (int16, error) {
if rd.remaining() < 2 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
rd.off += 2
return tmp, nil
}
func (rd *realDecoder) getInt32() (int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
return tmp, nil
}
func (rd *realDecoder) getInt64() (int64, error) {
if rd.remaining() < 8 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
return tmp, nil
}
func (rd *realDecoder) getVarint() (int64, error) {
tmp, n := binary.Varint(rd.raw[rd.off:])
if n == 0 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
if n < 0 {
rd.off -= n
return -1, errVarintOverflow
}
rd.off += n
return tmp, nil
}
func (rd *realDecoder) getArrayLength() (int, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
rd.off += 4
if tmp > rd.remaining() {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
} else if tmp > 2*math.MaxUint16 {
return -1, errInvalidArrayLength
}
return tmp, nil
}
func (rd *realDecoder) getBool() (bool, error) {
b, err := rd.getInt8()
if err != nil || b == 0 {
return false, err
}
if b != 1 {
return false, errInvalidBool
}
return true, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
tmp, err := rd.getInt32()
if err != nil {
return nil, err
}
if tmp == -1 {
return nil, nil
}
return rd.getRawBytes(int(tmp))
}
func (rd *realDecoder) getVarintBytes() ([]byte, error) {
tmp, err := rd.getVarint()
if err != nil {
return nil, err
}
if tmp == -1 {
return nil, nil
}
return rd.getRawBytes(int(tmp))
}
func (rd *realDecoder) getStringLength() (int, error) {
length, err := rd.getInt16()
if err != nil {
return 0, err
}
n := int(length)
switch {
case n < -1:
return 0, errInvalidStringLength
case n > rd.remaining():
rd.off = len(rd.raw)
return 0, ErrInsufficientData
}
return n, nil
}
func (rd *realDecoder) getString() (string, error) {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return "", err
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getNullableString() (*string, error) {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return nil, err
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return &tmpStr, err
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 4*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int32, n)
for i := range ret {
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
}
return ret, nil
}
func (rd *realDecoder) getInt64Array() ([]int64, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 8*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int64, n)
for i := range ret {
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
}
return ret, nil
}
func (rd *realDecoder) getStringArray() ([]string, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]string, n)
for i := range ret {
str, err := rd.getString()
if err != nil {
return nil, err
}
ret[i] = str
}
return ret, nil
}
// subsets
func (rd *realDecoder) remaining() int {
return len(rd.raw) - rd.off
}
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
buf, err := rd.getRawBytes(length)
if err != nil {
return nil, err
}
return &realDecoder{raw: buf}, nil
}
func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
if length < 0 {
return nil, errInvalidByteSliceLength
} else if length > rd.remaining() {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
start := rd.off
rd.off += length
return rd.raw[start:rd.off], nil
}
func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
if rd.remaining() < offset+length {
return nil, ErrInsufficientData
}
off := rd.off + offset
return &realDecoder{raw: rd.raw[off : off+length]}, nil
}
// stacks
func (rd *realDecoder) push(in pushDecoder) error {
in.saveOffset(rd.off)
var reserve int
if dpd, ok := in.(dynamicPushDecoder); ok {
if err := dpd.decode(rd); err != nil {
return err
}
} else {
reserve = in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
}
}
rd.stack = append(rd.stack, in)
rd.off += reserve
return nil
}
func (rd *realDecoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := rd.stack[len(rd.stack)-1]
rd.stack = rd.stack[:len(rd.stack)-1]
return in.check(rd.off, rd.raw)
}

156
vendor/github.com/Shopify/sarama/real_encoder.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
package sarama
import (
"encoding/binary"
"github.com/rcrowley/go-metrics"
)
type realEncoder struct {
raw []byte
off int
stack []pushEncoder
registry metrics.Registry
}
// primitives
func (re *realEncoder) putInt8(in int8) {
re.raw[re.off] = byte(in)
re.off++
}
func (re *realEncoder) putInt16(in int16) {
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
re.off += 2
}
func (re *realEncoder) putInt32(in int32) {
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
re.off += 4
}
func (re *realEncoder) putInt64(in int64) {
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
re.off += 8
}
func (re *realEncoder) putVarint(in int64) {
re.off += binary.PutVarint(re.raw[re.off:], in)
}
func (re *realEncoder) putArrayLength(in int) error {
re.putInt32(int32(in))
return nil
}
func (re *realEncoder) putBool(in bool) {
if in {
re.putInt8(1)
return
}
re.putInt8(0)
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putBytes(in []byte) error {
if in == nil {
re.putInt32(-1)
return nil
}
re.putInt32(int32(len(in)))
return re.putRawBytes(in)
}
func (re *realEncoder) putVarintBytes(in []byte) error {
if in == nil {
re.putVarint(-1)
return nil
}
re.putVarint(int64(len(in)))
return re.putRawBytes(in)
}
func (re *realEncoder) putString(in string) error {
re.putInt16(int16(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putNullableString(in *string) error {
if in == nil {
re.putInt16(-1)
return nil
}
return re.putString(*in)
}
func (re *realEncoder) putStringArray(in []string) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
if err := re.putString(val); err != nil {
return err
}
}
return nil
}
func (re *realEncoder) putInt32Array(in []int32) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt32(val)
}
return nil
}
func (re *realEncoder) putInt64Array(in []int64) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt64(val)
}
return nil
}
func (re *realEncoder) offset() int {
return re.off
}
// stacks
func (re *realEncoder) push(in pushEncoder) {
in.saveOffset(re.off)
re.off += in.reserveLength()
re.stack = append(re.stack, in)
}
func (re *realEncoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := re.stack[len(re.stack)-1]
re.stack = re.stack[:len(re.stack)-1]
return in.run(re.off, re.raw)
}
// we do record metrics during the real encoder pass
func (re *realEncoder) metricRegistry() metrics.Registry {
return re.registry
}

113
vendor/github.com/Shopify/sarama/record.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
package sarama
import (
"encoding/binary"
"time"
)
const (
controlMask = 0x20
maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
)
type RecordHeader struct {
Key []byte
Value []byte
}
func (h *RecordHeader) encode(pe packetEncoder) error {
if err := pe.putVarintBytes(h.Key); err != nil {
return err
}
return pe.putVarintBytes(h.Value)
}
func (h *RecordHeader) decode(pd packetDecoder) (err error) {
if h.Key, err = pd.getVarintBytes(); err != nil {
return err
}
if h.Value, err = pd.getVarintBytes(); err != nil {
return err
}
return nil
}
type Record struct {
Attributes int8
TimestampDelta time.Duration
OffsetDelta int64
Key []byte
Value []byte
Headers []*RecordHeader
length varintLengthField
}
func (r *Record) encode(pe packetEncoder) error {
pe.push(&r.length)
pe.putInt8(r.Attributes)
pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
pe.putVarint(r.OffsetDelta)
if err := pe.putVarintBytes(r.Key); err != nil {
return err
}
if err := pe.putVarintBytes(r.Value); err != nil {
return err
}
pe.putVarint(int64(len(r.Headers)))
for _, h := range r.Headers {
if err := h.encode(pe); err != nil {
return err
}
}
return pe.pop()
}
func (r *Record) decode(pd packetDecoder) (err error) {
if err = pd.push(&r.length); err != nil {
return err
}
if r.Attributes, err = pd.getInt8(); err != nil {
return err
}
timestamp, err := pd.getVarint()
if err != nil {
return err
}
r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
if r.OffsetDelta, err = pd.getVarint(); err != nil {
return err
}
if r.Key, err = pd.getVarintBytes(); err != nil {
return err
}
if r.Value, err = pd.getVarintBytes(); err != nil {
return err
}
numHeaders, err := pd.getVarint()
if err != nil {
return err
}
if numHeaders >= 0 {
r.Headers = make([]*RecordHeader, numHeaders)
}
for i := int64(0); i < numHeaders; i++ {
hdr := new(RecordHeader)
if err := hdr.decode(pd); err != nil {
return err
}
r.Headers[i] = hdr
}
return pd.pop()
}

268
vendor/github.com/Shopify/sarama/record_batch.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
const recordBatchOverhead = 49
type recordsArray []*Record
func (e recordsArray) encode(pe packetEncoder) error {
for _, r := range e {
if err := r.encode(pe); err != nil {
return err
}
}
return nil
}
func (e recordsArray) decode(pd packetDecoder) error {
for i := range e {
rec := &Record{}
if err := rec.decode(pd); err != nil {
return err
}
e[i] = rec
}
return nil
}
type RecordBatch struct {
FirstOffset int64
PartitionLeaderEpoch int32
Version int8
Codec CompressionCodec
CompressionLevel int
Control bool
LastOffsetDelta int32
FirstTimestamp time.Time
MaxTimestamp time.Time
ProducerID int64
ProducerEpoch int16
FirstSequence int32
Records []*Record
PartialTrailingRecord bool
compressedRecords []byte
recordsLen int // uncompressed records size
}
func (b *RecordBatch) encode(pe packetEncoder) error {
if b.Version != 2 {
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
}
pe.putInt64(b.FirstOffset)
pe.push(&lengthField{})
pe.putInt32(b.PartitionLeaderEpoch)
pe.putInt8(b.Version)
pe.push(newCRC32Field(crcCastagnoli))
pe.putInt16(b.computeAttributes())
pe.putInt32(b.LastOffsetDelta)
if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
return err
}
if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
return err
}
pe.putInt64(b.ProducerID)
pe.putInt16(b.ProducerEpoch)
pe.putInt32(b.FirstSequence)
if err := pe.putArrayLength(len(b.Records)); err != nil {
return err
}
if b.compressedRecords == nil {
if err := b.encodeRecords(pe); err != nil {
return err
}
}
if err := pe.putRawBytes(b.compressedRecords); err != nil {
return err
}
if err := pe.pop(); err != nil {
return err
}
return pe.pop()
}
func (b *RecordBatch) decode(pd packetDecoder) (err error) {
if b.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
batchLen, err := pd.getInt32()
if err != nil {
return err
}
if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
return err
}
if b.Version, err = pd.getInt8(); err != nil {
return err
}
if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
return err
}
attributes, err := pd.getInt16()
if err != nil {
return err
}
b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
b.Control = attributes&controlMask == controlMask
if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
return err
}
if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
return err
}
if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
return err
}
if b.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if b.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if b.FirstSequence, err = pd.getInt32(); err != nil {
return err
}
numRecs, err := pd.getArrayLength()
if err != nil {
return err
}
if numRecs >= 0 {
b.Records = make([]*Record, numRecs)
}
bufSize := int(batchLen) - recordBatchOverhead
recBuffer, err := pd.getRawBytes(bufSize)
if err != nil {
if err == ErrInsufficientData {
b.PartialTrailingRecord = true
b.Records = nil
return nil
}
return err
}
if err = pd.pop(); err != nil {
return err
}
switch b.Codec {
case CompressionNone:
case CompressionGZIP:
reader, err := gzip.NewReader(bytes.NewReader(recBuffer))
if err != nil {
return err
}
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
return err
}
case CompressionSnappy:
if recBuffer, err = snappy.Decode(recBuffer); err != nil {
return err
}
case CompressionLZ4:
reader := lz4.NewReader(bytes.NewReader(recBuffer))
if recBuffer, err = ioutil.ReadAll(reader); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)}
}
b.recordsLen = len(recBuffer)
err = decode(recBuffer, recordsArray(b.Records))
if err == ErrInsufficientData {
b.PartialTrailingRecord = true
b.Records = nil
return nil
}
return err
}
func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
var raw []byte
var err error
if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
return err
}
b.recordsLen = len(raw)
switch b.Codec {
case CompressionNone:
b.compressedRecords = raw
case CompressionGZIP:
var buf bytes.Buffer
var writer *gzip.Writer
if b.CompressionLevel != CompressionLevelDefault {
writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel)
if err != nil {
return err
}
} else {
writer = gzip.NewWriter(&buf)
}
if _, err := writer.Write(raw); err != nil {
return err
}
if err := writer.Close(); err != nil {
return err
}
b.compressedRecords = buf.Bytes()
case CompressionSnappy:
b.compressedRecords = snappy.Encode(raw)
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err := writer.Write(raw); err != nil {
return err
}
if err := writer.Close(); err != nil {
return err
}
b.compressedRecords = buf.Bytes()
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
}
return nil
}
func (b *RecordBatch) computeAttributes() int16 {
attr := int16(b.Codec) & int16(compressionCodecMask)
if b.Control {
attr |= controlMask
}
return attr
}
func (b *RecordBatch) addRecord(r *Record) {
b.Records = append(b.Records, r)
}

194
vendor/github.com/Shopify/sarama/records.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package sarama
import "fmt"
const (
unknownRecords = iota
legacyRecords
defaultRecords
magicOffset = 16
magicLength = 1
)
// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
type Records struct {
recordsType int
MsgSet *MessageSet
RecordBatch *RecordBatch
}
func newLegacyRecords(msgSet *MessageSet) Records {
return Records{recordsType: legacyRecords, MsgSet: msgSet}
}
func newDefaultRecords(batch *RecordBatch) Records {
return Records{recordsType: defaultRecords, RecordBatch: batch}
}
// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
// The first return value indicates whether both fields are nil (and the type is not set).
// If both fields are not nil, it returns an error.
func (r *Records) setTypeFromFields() (bool, error) {
if r.MsgSet == nil && r.RecordBatch == nil {
return true, nil
}
if r.MsgSet != nil && r.RecordBatch != nil {
return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
}
r.recordsType = defaultRecords
if r.MsgSet != nil {
r.recordsType = legacyRecords
}
return false, nil
}
func (r *Records) encode(pe packetEncoder) error {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return err
}
}
switch r.recordsType {
case legacyRecords:
if r.MsgSet == nil {
return nil
}
return r.MsgSet.encode(pe)
case defaultRecords:
if r.RecordBatch == nil {
return nil
}
return r.RecordBatch.encode(pe)
}
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) setTypeFromMagic(pd packetDecoder) error {
magic, err := magicValue(pd)
if err != nil {
return err
}
r.recordsType = defaultRecords
if magic < 2 {
r.recordsType = legacyRecords
}
return nil
}
func (r *Records) decode(pd packetDecoder) error {
if r.recordsType == unknownRecords {
if err := r.setTypeFromMagic(pd); err != nil {
return err
}
}
switch r.recordsType {
case legacyRecords:
r.MsgSet = &MessageSet{}
return r.MsgSet.decode(pd)
case defaultRecords:
r.RecordBatch = &RecordBatch{}
return r.RecordBatch.decode(pd)
}
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) numRecords() (int, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return 0, err
}
}
switch r.recordsType {
case legacyRecords:
if r.MsgSet == nil {
return 0, nil
}
return len(r.MsgSet.Messages), nil
case defaultRecords:
if r.RecordBatch == nil {
return 0, nil
}
return len(r.RecordBatch.Records), nil
}
return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) isPartial() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case unknownRecords:
return false, nil
case legacyRecords:
if r.MsgSet == nil {
return false, nil
}
return r.MsgSet.PartialTrailingMessage, nil
case defaultRecords:
if r.RecordBatch == nil {
return false, nil
}
return r.RecordBatch.PartialTrailingRecord, nil
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) isControl() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case legacyRecords:
return false, nil
case defaultRecords:
if r.RecordBatch == nil {
return false, nil
}
return r.RecordBatch.Control, nil
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) isOverflow() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case unknownRecords:
return false, nil
case legacyRecords:
if r.MsgSet == nil {
return false, nil
}
return r.MsgSet.OverflowMessage, nil
case defaultRecords:
return false, nil
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func magicValue(pd packetDecoder) (int8, error) {
dec, err := pd.peek(magicOffset, magicLength)
if err != nil {
return 0, err
}
return dec.getInt8()
}

149
vendor/github.com/Shopify/sarama/request.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
package sarama
import (
"encoding/binary"
"fmt"
"io"
)
type protocolBody interface {
encoder
versionedDecoder
key() int16
version() int16
requiredVersion() KafkaVersion
}
type request struct {
correlationID int32
clientID string
body protocolBody
}
func (r *request) encode(pe packetEncoder) (err error) {
pe.push(&lengthField{})
pe.putInt16(r.body.key())
pe.putInt16(r.body.version())
pe.putInt32(r.correlationID)
err = pe.putString(r.clientID)
if err != nil {
return err
}
err = r.body.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (r *request) decode(pd packetDecoder) (err error) {
var key int16
if key, err = pd.getInt16(); err != nil {
return err
}
var version int16
if version, err = pd.getInt16(); err != nil {
return err
}
if r.correlationID, err = pd.getInt32(); err != nil {
return err
}
r.clientID, err = pd.getString()
r.body = allocateBody(key, version)
if r.body == nil {
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
}
return r.body.decode(pd, version)
}
func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
lengthBytes := make([]byte, 4)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, bytesRead, err
}
bytesRead += len(lengthBytes)
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, bytesRead, err
}
bytesRead += len(encodedReq)
req = &request{}
if err := decode(encodedReq, req); err != nil {
return nil, bytesRead, err
}
return req, bytesRead, nil
}
func allocateBody(key, version int16) protocolBody {
switch key {
case 0:
return &ProduceRequest{}
case 1:
return &FetchRequest{}
case 2:
return &OffsetRequest{Version: version}
case 3:
return &MetadataRequest{}
case 8:
return &OffsetCommitRequest{Version: version}
case 9:
return &OffsetFetchRequest{}
case 10:
return &FindCoordinatorRequest{}
case 11:
return &JoinGroupRequest{}
case 12:
return &HeartbeatRequest{}
case 13:
return &LeaveGroupRequest{}
case 14:
return &SyncGroupRequest{}
case 15:
return &DescribeGroupsRequest{}
case 16:
return &ListGroupsRequest{}
case 17:
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
case 19:
return &CreateTopicsRequest{}
case 20:
return &DeleteTopicsRequest{}
case 21:
return &DeleteRecordsRequest{}
case 22:
return &InitProducerIDRequest{}
case 24:
return &AddPartitionsToTxnRequest{}
case 25:
return &AddOffsetsToTxnRequest{}
case 26:
return &EndTxnRequest{}
case 28:
return &TxnOffsetCommitRequest{}
case 29:
return &DescribeAclsRequest{}
case 30:
return &CreateAclsRequest{}
case 31:
return &DeleteAclsRequest{}
case 32:
return &DescribeConfigsRequest{}
case 33:
return &AlterConfigsRequest{}
case 37:
return &CreatePartitionsRequest{}
case 42:
return &DeleteGroupsRequest{}
}
return nil
}

21
vendor/github.com/Shopify/sarama/response_header.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package sarama
import "fmt"
type responseHeader struct {
length int32
correlationID int32
}
func (r *responseHeader) decode(pd packetDecoder) (err error) {
r.length, err = pd.getInt32()
if err != nil {
return err
}
if r.length <= 4 || r.length > MaxResponseSize {
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
}
r.correlationID, err = pd.getInt32()
return err
}

Some files were not shown because too many files have changed in this diff Show More