Add dependency directories
This commit is contained in:
parent
d84a334ae5
commit
8dc428e54f
|
@ -12,9 +12,6 @@
|
|||
*.out
|
||||
coverage.txt
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
vendor/
|
||||
|
||||
# buf for protobuf
|
||||
buf.lock
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.20.2-alpine3.17 AS builder
|
||||
FROM golang:1.21.2-alpine3.18 AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
@ -9,11 +9,10 @@ ENV GOPROXY https://goproxy.cn,direct
|
|||
|
||||
COPY . .
|
||||
COPY etc/ /app/
|
||||
RUN go mod download
|
||||
RUN go build -o /app/pcm-ac /app/hpcac.go
|
||||
|
||||
|
||||
FROM alpine:3.16.2
|
||||
FROM alpine:3.18
|
||||
WORKDIR /app
|
||||
|
||||
#修改alpine源为上海交通大学
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
squirrel.test
|
|
@ -0,0 +1,30 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
|
||||
services:
|
||||
- mysql
|
||||
- postgresql
|
||||
|
||||
# Setting sudo access to false will let Travis CI use containers rather than
|
||||
# VMs to run the tests. For more details see:
|
||||
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
|
||||
sudo: false
|
||||
|
||||
before_script:
|
||||
- mysql -e 'CREATE DATABASE squirrel;'
|
||||
- psql -c 'CREATE DATABASE squirrel;' -U postgres
|
||||
|
||||
script:
|
||||
- go test
|
||||
- cd integration
|
||||
- go test -args -driver sqlite3
|
||||
- go test -args -driver mysql -dataSource travis@/squirrel
|
||||
- go test -args -driver postgres -dataSource 'postgres://postgres@localhost/squirrel?sslmode=disable'
|
||||
|
||||
notifications:
|
||||
irc: "irc.freenode.net#masterminds"
|
|
@ -0,0 +1,23 @@
|
|||
MIT License
|
||||
|
||||
Squirrel: The Masterminds
|
||||
Copyright (c) 2014-2015, Lann Martin. Copyright (C) 2015-2016, Google. Copyright (C) 2015, Matt Farina and Matt Butcher.
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,142 @@
|
|||
[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html)
|
||||
### Squirrel is "complete".
|
||||
Bug fixes will still be merged (slowly). Bug reports are welcome, but I will not necessarily respond to them. If another fork (or substantially similar project) actively improves on what Squirrel does, let me know and I may link to it here.
|
||||
|
||||
|
||||
# Squirrel - fluent SQL generator for Go
|
||||
|
||||
```go
|
||||
import "github.com/Masterminds/squirrel"
|
||||
```
|
||||
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel)
|
||||
[![Build Status](https://api.travis-ci.org/Masterminds/squirrel.svg?branch=master)](https://travis-ci.org/Masterminds/squirrel)
|
||||
|
||||
**Squirrel is not an ORM.** For an application of Squirrel, check out
|
||||
[structable, a table-struct mapper](https://github.com/Masterminds/structable)
|
||||
|
||||
|
||||
Squirrel helps you build SQL queries from composable parts:
|
||||
|
||||
```go
|
||||
import sq "github.com/Masterminds/squirrel"
|
||||
|
||||
users := sq.Select("*").From("users").Join("emails USING (email_id)")
|
||||
|
||||
active := users.Where(sq.Eq{"deleted_at": nil})
|
||||
|
||||
sql, args, err := active.ToSql()
|
||||
|
||||
sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL"
|
||||
```
|
||||
|
||||
```go
|
||||
sql, args, err := sq.
|
||||
Insert("users").Columns("name", "age").
|
||||
Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)).
|
||||
ToSql()
|
||||
|
||||
sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)"
|
||||
```
|
||||
|
||||
Squirrel can also execute queries directly:
|
||||
|
||||
```go
|
||||
stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}})
|
||||
three_stooges := stooges.Limit(3)
|
||||
rows, err := three_stooges.RunWith(db).Query()
|
||||
|
||||
// Behaves like:
|
||||
rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3",
|
||||
"moe", "larry", "curly", "shemp")
|
||||
```
|
||||
|
||||
Squirrel makes conditional query building a breeze:
|
||||
|
||||
```go
|
||||
if len(q) > 0 {
|
||||
users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%"))
|
||||
}
|
||||
```
|
||||
|
||||
Squirrel wants to make your life easier:
|
||||
|
||||
```go
|
||||
// StmtCache caches Prepared Stmts for you
|
||||
dbCache := sq.NewStmtCache(db)
|
||||
|
||||
// StatementBuilder keeps your syntax neat
|
||||
mydb := sq.StatementBuilder.RunWith(dbCache)
|
||||
select_users := mydb.Select("*").From("users")
|
||||
```
|
||||
|
||||
Squirrel loves PostgreSQL:
|
||||
|
||||
```go
|
||||
psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar)
|
||||
|
||||
// You use question marks for placeholders...
|
||||
sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna").ToSql()
|
||||
|
||||
/// ...squirrel replaces them using PlaceholderFormat.
|
||||
sql == "SELECT * FROM elephants WHERE name IN ($1,$2)"
|
||||
|
||||
|
||||
/// You can retrieve id ...
|
||||
query := sq.Insert("nodes").
|
||||
Columns("uuid", "type", "data").
|
||||
Values(node.Uuid, node.Type, node.Data).
|
||||
Suffix("RETURNING \"id\"").
|
||||
RunWith(m.db).
|
||||
PlaceholderFormat(sq.Dollar)
|
||||
|
||||
query.QueryRow().Scan(&node.id)
|
||||
```
|
||||
|
||||
You can escape question marks by inserting two question marks:
|
||||
|
||||
```sql
|
||||
SELECT * FROM nodes WHERE meta->'format' ??| array[?,?]
|
||||
```
|
||||
|
||||
will generate with the Dollar Placeholder:
|
||||
|
||||
```sql
|
||||
SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2]
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
* **How can I build an IN query on composite keys / tuples, e.g. `WHERE (col1, col2) IN ((1,2),(3,4))`? ([#104](https://github.com/Masterminds/squirrel/issues/104))**
|
||||
|
||||
Squirrel does not explicitly support tuples, but you can get the same effect with e.g.:
|
||||
|
||||
```go
|
||||
sq.Or{
|
||||
sq.Eq{"col1": 1, "col2": 2},
|
||||
sq.Eq{"col1": 3, "col2": 4}}
|
||||
```
|
||||
|
||||
```sql
|
||||
WHERE (col1 = 1 AND col2 = 2) OR (col1 = 3 AND col2 = 4)
|
||||
```
|
||||
|
||||
(which should produce the same query plan as the tuple version)
|
||||
|
||||
* **Why doesn't `Eq{"mynumber": []uint8{1,2,3}}` turn into an `IN` query? ([#114](https://github.com/Masterminds/squirrel/issues/114))**
|
||||
|
||||
Values of type `[]byte` are handled specially by `database/sql`. In Go, [`byte` is just an alias of `uint8`](https://golang.org/pkg/builtin/#byte), so there is no way to distinguish `[]uint8` from `[]byte`.
|
||||
|
||||
* **Some features are poorly documented!**
|
||||
|
||||
This isn't a frequent complaints section!
|
||||
|
||||
* **Some features are poorly documented?**
|
||||
|
||||
Yes. The tests should be considered a part of the documentation; take a look at those for ideas on how to express more complex queries.
|
||||
|
||||
## License
|
||||
|
||||
Squirrel is released under the
|
||||
[MIT License](http://www.opensource.org/licenses/MIT).
|
|
@ -0,0 +1,128 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
func init() {
|
||||
builder.Register(CaseBuilder{}, caseData{})
|
||||
}
|
||||
|
||||
// sqlizerBuffer is a helper that allows to write many Sqlizers one by one
|
||||
// without constant checks for errors that may come from Sqlizer
|
||||
type sqlizerBuffer struct {
|
||||
bytes.Buffer
|
||||
args []interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
// WriteSql converts Sqlizer to SQL strings and writes it to buffer
|
||||
func (b *sqlizerBuffer) WriteSql(item Sqlizer) {
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var str string
|
||||
var args []interface{}
|
||||
str, args, b.err = nestedToSql(item)
|
||||
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b.WriteString(str)
|
||||
b.WriteByte(' ')
|
||||
b.args = append(b.args, args...)
|
||||
}
|
||||
|
||||
func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) {
|
||||
return b.String(), b.args, b.err
|
||||
}
|
||||
|
||||
// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression
|
||||
type whenPart struct {
|
||||
when Sqlizer
|
||||
then Sqlizer
|
||||
}
|
||||
|
||||
func newWhenPart(when interface{}, then interface{}) whenPart {
|
||||
return whenPart{newPart(when), newPart(then)}
|
||||
}
|
||||
|
||||
// caseData holds all the data required to build a CASE SQL construct
|
||||
type caseData struct {
|
||||
What Sqlizer
|
||||
WhenParts []whenPart
|
||||
Else Sqlizer
|
||||
}
|
||||
|
||||
// ToSql implements Sqlizer
|
||||
func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
if len(d.WhenParts) == 0 {
|
||||
err = errors.New("case expression must contain at lease one WHEN clause")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
sql := sqlizerBuffer{}
|
||||
|
||||
sql.WriteString("CASE ")
|
||||
if d.What != nil {
|
||||
sql.WriteSql(d.What)
|
||||
}
|
||||
|
||||
for _, p := range d.WhenParts {
|
||||
sql.WriteString("WHEN ")
|
||||
sql.WriteSql(p.when)
|
||||
sql.WriteString("THEN ")
|
||||
sql.WriteSql(p.then)
|
||||
}
|
||||
|
||||
if d.Else != nil {
|
||||
sql.WriteString("ELSE ")
|
||||
sql.WriteSql(d.Else)
|
||||
}
|
||||
|
||||
sql.WriteString("END")
|
||||
|
||||
return sql.ToSql()
|
||||
}
|
||||
|
||||
// CaseBuilder builds SQL CASE construct which could be used as parts of queries.
|
||||
type CaseBuilder builder.Builder
|
||||
|
||||
// ToSql builds the query into a SQL string and bound args.
|
||||
func (b CaseBuilder) ToSql() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(caseData)
|
||||
return data.ToSql()
|
||||
}
|
||||
|
||||
// MustSql builds the query into a SQL string and bound args.
|
||||
// It panics if there are any errors.
|
||||
func (b CaseBuilder) MustSql() (string, []interface{}) {
|
||||
sql, args, err := b.ToSql()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sql, args
|
||||
}
|
||||
|
||||
// what sets optional value for CASE construct "CASE [value] ..."
|
||||
func (b CaseBuilder) what(expr interface{}) CaseBuilder {
|
||||
return builder.Set(b, "What", newPart(expr)).(CaseBuilder)
|
||||
}
|
||||
|
||||
// When adds "WHEN ... THEN ..." part to CASE construct
|
||||
func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder {
|
||||
// TODO: performance hint: replace slice of WhenPart with just slice of parts
|
||||
// where even indices of the slice belong to "when"s and odd indices belong to "then"s
|
||||
return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder)
|
||||
}
|
||||
|
||||
// What sets optional "ELSE ..." part for CASE construct
|
||||
func (b CaseBuilder) Else(expr interface{}) CaseBuilder {
|
||||
return builder.Set(b, "Else", newPart(expr)).(CaseBuilder)
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
type deleteData struct {
|
||||
PlaceholderFormat PlaceholderFormat
|
||||
RunWith BaseRunner
|
||||
Prefixes []Sqlizer
|
||||
From string
|
||||
WhereParts []Sqlizer
|
||||
OrderBys []string
|
||||
Limit string
|
||||
Offset string
|
||||
Suffixes []Sqlizer
|
||||
}
|
||||
|
||||
func (d *deleteData) Exec() (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return ExecWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
if len(d.From) == 0 {
|
||||
err = fmt.Errorf("delete statements must specify a From table")
|
||||
return
|
||||
}
|
||||
|
||||
sql := &bytes.Buffer{}
|
||||
|
||||
if len(d.Prefixes) > 0 {
|
||||
args, err = appendToSql(d.Prefixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
sql.WriteString("DELETE FROM ")
|
||||
sql.WriteString(d.From)
|
||||
|
||||
if len(d.WhereParts) > 0 {
|
||||
sql.WriteString(" WHERE ")
|
||||
args, err = appendToSql(d.WhereParts, sql, " AND ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.OrderBys) > 0 {
|
||||
sql.WriteString(" ORDER BY ")
|
||||
sql.WriteString(strings.Join(d.OrderBys, ", "))
|
||||
}
|
||||
|
||||
if len(d.Limit) > 0 {
|
||||
sql.WriteString(" LIMIT ")
|
||||
sql.WriteString(d.Limit)
|
||||
}
|
||||
|
||||
if len(d.Offset) > 0 {
|
||||
sql.WriteString(" OFFSET ")
|
||||
sql.WriteString(d.Offset)
|
||||
}
|
||||
|
||||
if len(d.Suffixes) > 0 {
|
||||
sql.WriteString(" ")
|
||||
args, err = appendToSql(d.Suffixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String())
|
||||
return
|
||||
}
|
||||
|
||||
// Builder
|
||||
|
||||
// DeleteBuilder builds SQL DELETE statements.
|
||||
type DeleteBuilder builder.Builder
|
||||
|
||||
func init() {
|
||||
builder.Register(DeleteBuilder{}, deleteData{})
|
||||
}
|
||||
|
||||
// Format methods
|
||||
|
||||
// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the
|
||||
// query.
|
||||
func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder {
|
||||
return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Runner methods
|
||||
|
||||
// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec.
|
||||
func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder {
|
||||
return setRunWith(b, runner).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Exec builds and Execs the query with the Runner set by RunWith.
|
||||
func (b DeleteBuilder) Exec() (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.Exec()
|
||||
}
|
||||
|
||||
// SQL methods
|
||||
|
||||
// ToSql builds the query into a SQL string and bound args.
|
||||
func (b DeleteBuilder) ToSql() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.ToSql()
|
||||
}
|
||||
|
||||
// MustSql builds the query into a SQL string and bound args.
|
||||
// It panics if there are any errors.
|
||||
func (b DeleteBuilder) MustSql() (string, []interface{}) {
|
||||
sql, args, err := b.ToSql()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sql, args
|
||||
}
|
||||
|
||||
// Prefix adds an expression to the beginning of the query
|
||||
func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder {
|
||||
return b.PrefixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// PrefixExpr adds an expression to the very beginning of the query
|
||||
func (b DeleteBuilder) PrefixExpr(expr Sqlizer) DeleteBuilder {
|
||||
return builder.Append(b, "Prefixes", expr).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// From sets the table to be deleted from.
|
||||
func (b DeleteBuilder) From(from string) DeleteBuilder {
|
||||
return builder.Set(b, "From", from).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Where adds WHERE expressions to the query.
|
||||
//
|
||||
// See SelectBuilder.Where for more information.
|
||||
func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder {
|
||||
return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// OrderBy adds ORDER BY expressions to the query.
|
||||
func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder {
|
||||
return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Limit sets a LIMIT clause on the query.
|
||||
func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder {
|
||||
return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Offset sets a OFFSET clause on the query.
|
||||
func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder {
|
||||
return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder)
|
||||
}
|
||||
|
||||
// Suffix adds an expression to the end of the query
|
||||
func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder {
|
||||
return b.SuffixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// SuffixExpr adds an expression to the end of the query
|
||||
func (b DeleteBuilder) SuffixExpr(expr Sqlizer) DeleteBuilder {
|
||||
return builder.Append(b, "Suffixes", expr).(DeleteBuilder)
|
||||
}
|
||||
|
||||
func (b DeleteBuilder) Query() (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.Query()
|
||||
}
|
||||
|
||||
func (d *deleteData) Query() (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return QueryWith(d.RunWith, d)
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
func (d *deleteData) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(ExecerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return ExecContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *deleteData) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(QueryerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return QueryContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *deleteData) QueryRowContext(ctx context.Context) RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRowerContext)
|
||||
if !ok {
|
||||
if _, ok := d.RunWith.(QueryerContext); !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return &Row{err: NoContextSupport}
|
||||
}
|
||||
return QueryRowContextWith(ctx, queryRower, d)
|
||||
}
|
||||
|
||||
// ExecContext builds and ExecContexts the query with the Runner set by RunWith.
|
||||
func (b DeleteBuilder) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.ExecContext(ctx)
|
||||
}
|
||||
|
||||
// QueryContext builds and QueryContexts the query with the Runner set by RunWith.
|
||||
func (b DeleteBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.QueryContext(ctx)
|
||||
}
|
||||
|
||||
// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith.
|
||||
func (b DeleteBuilder) QueryRowContext(ctx context.Context) RowScanner {
|
||||
data := builder.GetStruct(b).(deleteData)
|
||||
return data.QueryRowContext(ctx)
|
||||
}
|
||||
|
||||
// ScanContext is a shortcut for QueryRowContext().Scan.
|
||||
func (b DeleteBuilder) ScanContext(ctx context.Context, dest ...interface{}) error {
|
||||
return b.QueryRowContext(ctx).Scan(dest...)
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// Portable true/false literals.
|
||||
sqlTrue = "(1=1)"
|
||||
sqlFalse = "(1=0)"
|
||||
)
|
||||
|
||||
type expr struct {
|
||||
sql string
|
||||
args []interface{}
|
||||
}
|
||||
|
||||
// Expr builds an expression from a SQL fragment and arguments.
|
||||
//
|
||||
// Ex:
|
||||
// Expr("FROM_UNIXTIME(?)", t)
|
||||
func Expr(sql string, args ...interface{}) Sqlizer {
|
||||
return expr{sql: sql, args: args}
|
||||
}
|
||||
|
||||
func (e expr) ToSql() (sql string, args []interface{}, err error) {
|
||||
simple := true
|
||||
for _, arg := range e.args {
|
||||
if _, ok := arg.(Sqlizer); ok {
|
||||
simple = false
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return e.sql, e.args, nil
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
ap := e.args
|
||||
sp := e.sql
|
||||
|
||||
var isql string
|
||||
var iargs []interface{}
|
||||
|
||||
for err == nil && len(ap) > 0 && len(sp) > 0 {
|
||||
i := strings.Index(sp, "?")
|
||||
if i < 0 {
|
||||
// no more placeholders
|
||||
break
|
||||
}
|
||||
if len(sp) > i+1 && sp[i+1:i+2] == "?" {
|
||||
// escaped "??"; append it and step past
|
||||
buf.WriteString(sp[:i+2])
|
||||
sp = sp[i+2:]
|
||||
continue
|
||||
}
|
||||
|
||||
if as, ok := ap[0].(Sqlizer); ok {
|
||||
// sqlizer argument; expand it and append the result
|
||||
isql, iargs, err = as.ToSql()
|
||||
buf.WriteString(sp[:i])
|
||||
buf.WriteString(isql)
|
||||
args = append(args, iargs...)
|
||||
} else {
|
||||
// normal argument; append it and the placeholder
|
||||
buf.WriteString(sp[:i+1])
|
||||
args = append(args, ap[0])
|
||||
}
|
||||
|
||||
// step past the argument and placeholder
|
||||
ap = ap[1:]
|
||||
sp = sp[i+1:]
|
||||
}
|
||||
|
||||
// append the remaining sql and arguments
|
||||
buf.WriteString(sp)
|
||||
return buf.String(), append(args, ap...), err
|
||||
}
|
||||
|
||||
type concatExpr []interface{}
|
||||
|
||||
func (ce concatExpr) ToSql() (sql string, args []interface{}, err error) {
|
||||
for _, part := range ce {
|
||||
switch p := part.(type) {
|
||||
case string:
|
||||
sql += p
|
||||
case Sqlizer:
|
||||
pSql, pArgs, err := p.ToSql()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
sql += pSql
|
||||
args = append(args, pArgs...)
|
||||
default:
|
||||
return "", nil, fmt.Errorf("%#v is not a string or Sqlizer", part)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ConcatExpr builds an expression by concatenating strings and other expressions.
|
||||
//
|
||||
// Ex:
|
||||
// name_expr := Expr("CONCAT(?, ' ', ?)", firstName, lastName)
|
||||
// ConcatExpr("COALESCE(full_name,", name_expr, ")")
|
||||
func ConcatExpr(parts ...interface{}) concatExpr {
|
||||
return concatExpr(parts)
|
||||
}
|
||||
|
||||
// aliasExpr helps to alias part of SQL query generated with underlying "expr"
|
||||
type aliasExpr struct {
|
||||
expr Sqlizer
|
||||
alias string
|
||||
}
|
||||
|
||||
// Alias allows to define alias for column in SelectBuilder. Useful when column is
|
||||
// defined as complex expression like IF or CASE
|
||||
// Ex:
|
||||
// .Column(Alias(caseStmt, "case_column"))
|
||||
func Alias(expr Sqlizer, alias string) aliasExpr {
|
||||
return aliasExpr{expr, alias}
|
||||
}
|
||||
|
||||
func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) {
|
||||
sql, args, err = e.expr.ToSql()
|
||||
if err == nil {
|
||||
sql = fmt.Sprintf("(%s) AS %s", sql, e.alias)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Eq is syntactic sugar for use with Where/Having/Set methods.
|
||||
type Eq map[string]interface{}
|
||||
|
||||
func (eq Eq) toSQL(useNotOpr bool) (sql string, args []interface{}, err error) {
|
||||
if len(eq) == 0 {
|
||||
// Empty Sql{} evaluates to true.
|
||||
sql = sqlTrue
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
exprs []string
|
||||
equalOpr = "="
|
||||
inOpr = "IN"
|
||||
nullOpr = "IS"
|
||||
inEmptyExpr = sqlFalse
|
||||
)
|
||||
|
||||
if useNotOpr {
|
||||
equalOpr = "<>"
|
||||
inOpr = "NOT IN"
|
||||
nullOpr = "IS NOT"
|
||||
inEmptyExpr = sqlTrue
|
||||
}
|
||||
|
||||
sortedKeys := getSortedKeys(eq)
|
||||
for _, key := range sortedKeys {
|
||||
var expr string
|
||||
val := eq[key]
|
||||
|
||||
switch v := val.(type) {
|
||||
case driver.Valuer:
|
||||
if val, err = v.Value(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r := reflect.ValueOf(val)
|
||||
if r.Kind() == reflect.Ptr {
|
||||
if r.IsNil() {
|
||||
val = nil
|
||||
} else {
|
||||
val = r.Elem().Interface()
|
||||
}
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
expr = fmt.Sprintf("%s %s NULL", key, nullOpr)
|
||||
} else {
|
||||
if isListType(val) {
|
||||
valVal := reflect.ValueOf(val)
|
||||
if valVal.Len() == 0 {
|
||||
expr = inEmptyExpr
|
||||
if args == nil {
|
||||
args = []interface{}{}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < valVal.Len(); i++ {
|
||||
args = append(args, valVal.Index(i).Interface())
|
||||
}
|
||||
expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len()))
|
||||
}
|
||||
} else {
|
||||
expr = fmt.Sprintf("%s %s ?", key, equalOpr)
|
||||
args = append(args, val)
|
||||
}
|
||||
}
|
||||
exprs = append(exprs, expr)
|
||||
}
|
||||
sql = strings.Join(exprs, " AND ")
|
||||
return
|
||||
}
|
||||
|
||||
func (eq Eq) ToSql() (sql string, args []interface{}, err error) {
|
||||
return eq.toSQL(false)
|
||||
}
|
||||
|
||||
// NotEq is syntactic sugar for use with Where/Having/Set methods.
|
||||
// Ex:
|
||||
// .Where(NotEq{"id": 1}) == "id <> 1"
|
||||
type NotEq Eq
|
||||
|
||||
func (neq NotEq) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Eq(neq).toSQL(true)
|
||||
}
|
||||
|
||||
// Like is syntactic sugar for use with LIKE conditions.
|
||||
// Ex:
|
||||
// .Where(Like{"name": "%irrel"})
|
||||
type Like map[string]interface{}
|
||||
|
||||
func (lk Like) toSql(opr string) (sql string, args []interface{}, err error) {
|
||||
var exprs []string
|
||||
for key, val := range lk {
|
||||
expr := ""
|
||||
|
||||
switch v := val.(type) {
|
||||
case driver.Valuer:
|
||||
if val, err = v.Value(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
err = fmt.Errorf("cannot use null with like operators")
|
||||
return
|
||||
} else {
|
||||
if isListType(val) {
|
||||
err = fmt.Errorf("cannot use array or slice with like operators")
|
||||
return
|
||||
} else {
|
||||
expr = fmt.Sprintf("%s %s ?", key, opr)
|
||||
args = append(args, val)
|
||||
}
|
||||
}
|
||||
exprs = append(exprs, expr)
|
||||
}
|
||||
sql = strings.Join(exprs, " AND ")
|
||||
return
|
||||
}
|
||||
|
||||
func (lk Like) ToSql() (sql string, args []interface{}, err error) {
|
||||
return lk.toSql("LIKE")
|
||||
}
|
||||
|
||||
// NotLike is syntactic sugar for use with LIKE conditions.
|
||||
// Ex:
|
||||
// .Where(NotLike{"name": "%irrel"})
|
||||
type NotLike Like
|
||||
|
||||
func (nlk NotLike) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Like(nlk).toSql("NOT LIKE")
|
||||
}
|
||||
|
||||
// ILike is syntactic sugar for use with ILIKE conditions.
|
||||
// Ex:
|
||||
// .Where(ILike{"name": "sq%"})
|
||||
type ILike Like
|
||||
|
||||
func (ilk ILike) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Like(ilk).toSql("ILIKE")
|
||||
}
|
||||
|
||||
// NotILike is syntactic sugar for use with ILIKE conditions.
|
||||
// Ex:
|
||||
// .Where(NotILike{"name": "sq%"})
|
||||
type NotILike Like
|
||||
|
||||
func (nilk NotILike) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Like(nilk).toSql("NOT ILIKE")
|
||||
}
|
||||
|
||||
// Lt is syntactic sugar for use with Where/Having/Set methods.
|
||||
// Ex:
|
||||
// .Where(Lt{"id": 1})
|
||||
type Lt map[string]interface{}
|
||||
|
||||
func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) {
|
||||
var (
|
||||
exprs []string
|
||||
opr = "<"
|
||||
)
|
||||
|
||||
if opposite {
|
||||
opr = ">"
|
||||
}
|
||||
|
||||
if orEq {
|
||||
opr = fmt.Sprintf("%s%s", opr, "=")
|
||||
}
|
||||
|
||||
sortedKeys := getSortedKeys(lt)
|
||||
for _, key := range sortedKeys {
|
||||
var expr string
|
||||
val := lt[key]
|
||||
|
||||
switch v := val.(type) {
|
||||
case driver.Valuer:
|
||||
if val, err = v.Value(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
err = fmt.Errorf("cannot use null with less than or greater than operators")
|
||||
return
|
||||
}
|
||||
if isListType(val) {
|
||||
err = fmt.Errorf("cannot use array or slice with less than or greater than operators")
|
||||
return
|
||||
}
|
||||
expr = fmt.Sprintf("%s %s ?", key, opr)
|
||||
args = append(args, val)
|
||||
|
||||
exprs = append(exprs, expr)
|
||||
}
|
||||
sql = strings.Join(exprs, " AND ")
|
||||
return
|
||||
}
|
||||
|
||||
func (lt Lt) ToSql() (sql string, args []interface{}, err error) {
|
||||
return lt.toSql(false, false)
|
||||
}
|
||||
|
||||
// LtOrEq is syntactic sugar for use with Where/Having/Set methods.
|
||||
// Ex:
|
||||
// .Where(LtOrEq{"id": 1}) == "id <= 1"
|
||||
type LtOrEq Lt
|
||||
|
||||
func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Lt(ltOrEq).toSql(false, true)
|
||||
}
|
||||
|
||||
// Gt is syntactic sugar for use with Where/Having/Set methods.
|
||||
// Ex:
|
||||
// .Where(Gt{"id": 1}) == "id > 1"
|
||||
type Gt Lt
|
||||
|
||||
func (gt Gt) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Lt(gt).toSql(true, false)
|
||||
}
|
||||
|
||||
// GtOrEq is syntactic sugar for use with Where/Having/Set methods.
|
||||
// Ex:
|
||||
// .Where(GtOrEq{"id": 1}) == "id >= 1"
|
||||
type GtOrEq Lt
|
||||
|
||||
func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) {
|
||||
return Lt(gtOrEq).toSql(true, true)
|
||||
}
|
||||
|
||||
type conj []Sqlizer
|
||||
|
||||
func (c conj) join(sep, defaultExpr string) (sql string, args []interface{}, err error) {
|
||||
if len(c) == 0 {
|
||||
return defaultExpr, []interface{}{}, nil
|
||||
}
|
||||
var sqlParts []string
|
||||
for _, sqlizer := range c {
|
||||
partSQL, partArgs, err := nestedToSql(sqlizer)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if partSQL != "" {
|
||||
sqlParts = append(sqlParts, partSQL)
|
||||
args = append(args, partArgs...)
|
||||
}
|
||||
}
|
||||
if len(sqlParts) > 0 {
|
||||
sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// And conjunction Sqlizers
|
||||
type And conj
|
||||
|
||||
func (a And) ToSql() (string, []interface{}, error) {
|
||||
return conj(a).join(" AND ", sqlTrue)
|
||||
}
|
||||
|
||||
// Or conjunction Sqlizers
|
||||
type Or conj
|
||||
|
||||
func (o Or) ToSql() (string, []interface{}, error) {
|
||||
return conj(o).join(" OR ", sqlFalse)
|
||||
}
|
||||
|
||||
func getSortedKeys(exp map[string]interface{}) []string {
|
||||
sortedKeys := make([]string, 0, len(exp))
|
||||
for k := range exp {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
return sortedKeys
|
||||
}
|
||||
|
||||
func isListType(val interface{}) bool {
|
||||
if driver.IsValue(val) {
|
||||
return false
|
||||
}
|
||||
valVal := reflect.ValueOf(val)
|
||||
return valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice
|
||||
}
|
|
@ -0,0 +1,298 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
type insertData struct {
|
||||
PlaceholderFormat PlaceholderFormat
|
||||
RunWith BaseRunner
|
||||
Prefixes []Sqlizer
|
||||
StatementKeyword string
|
||||
Options []string
|
||||
Into string
|
||||
Columns []string
|
||||
Values [][]interface{}
|
||||
Suffixes []Sqlizer
|
||||
Select *SelectBuilder
|
||||
}
|
||||
|
||||
func (d *insertData) Exec() (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return ExecWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *insertData) Query() (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return QueryWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *insertData) QueryRow() RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRower)
|
||||
if !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return QueryRowWith(queryRower, d)
|
||||
}
|
||||
|
||||
func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
if len(d.Into) == 0 {
|
||||
err = errors.New("insert statements must specify a table")
|
||||
return
|
||||
}
|
||||
if len(d.Values) == 0 && d.Select == nil {
|
||||
err = errors.New("insert statements must have at least one set of values or select clause")
|
||||
return
|
||||
}
|
||||
|
||||
sql := &bytes.Buffer{}
|
||||
|
||||
if len(d.Prefixes) > 0 {
|
||||
args, err = appendToSql(d.Prefixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
if d.StatementKeyword == "" {
|
||||
sql.WriteString("INSERT ")
|
||||
} else {
|
||||
sql.WriteString(d.StatementKeyword)
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
if len(d.Options) > 0 {
|
||||
sql.WriteString(strings.Join(d.Options, " "))
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
sql.WriteString("INTO ")
|
||||
sql.WriteString(d.Into)
|
||||
sql.WriteString(" ")
|
||||
|
||||
if len(d.Columns) > 0 {
|
||||
sql.WriteString("(")
|
||||
sql.WriteString(strings.Join(d.Columns, ","))
|
||||
sql.WriteString(") ")
|
||||
}
|
||||
|
||||
if d.Select != nil {
|
||||
args, err = d.appendSelectToSQL(sql, args)
|
||||
} else {
|
||||
args, err = d.appendValuesToSQL(sql, args)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(d.Suffixes) > 0 {
|
||||
sql.WriteString(" ")
|
||||
args, err = appendToSql(d.Suffixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String())
|
||||
return
|
||||
}
|
||||
|
||||
func (d *insertData) appendValuesToSQL(w io.Writer, args []interface{}) ([]interface{}, error) {
|
||||
if len(d.Values) == 0 {
|
||||
return args, errors.New("values for insert statements are not set")
|
||||
}
|
||||
|
||||
io.WriteString(w, "VALUES ")
|
||||
|
||||
valuesStrings := make([]string, len(d.Values))
|
||||
for r, row := range d.Values {
|
||||
valueStrings := make([]string, len(row))
|
||||
for v, val := range row {
|
||||
if vs, ok := val.(Sqlizer); ok {
|
||||
vsql, vargs, err := vs.ToSql()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueStrings[v] = vsql
|
||||
args = append(args, vargs...)
|
||||
} else {
|
||||
valueStrings[v] = "?"
|
||||
args = append(args, val)
|
||||
}
|
||||
}
|
||||
valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ","))
|
||||
}
|
||||
|
||||
io.WriteString(w, strings.Join(valuesStrings, ","))
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (d *insertData) appendSelectToSQL(w io.Writer, args []interface{}) ([]interface{}, error) {
|
||||
if d.Select == nil {
|
||||
return args, errors.New("select clause for insert statements are not set")
|
||||
}
|
||||
|
||||
selectClause, sArgs, err := d.Select.ToSql()
|
||||
if err != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
io.WriteString(w, selectClause)
|
||||
args = append(args, sArgs...)
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// Builder
|
||||
|
||||
// InsertBuilder builds SQL INSERT statements.
|
||||
type InsertBuilder builder.Builder
|
||||
|
||||
func init() {
|
||||
builder.Register(InsertBuilder{}, insertData{})
|
||||
}
|
||||
|
||||
// Format methods
|
||||
|
||||
// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the
|
||||
// query.
|
||||
func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder {
|
||||
return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Runner methods
|
||||
|
||||
// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec.
|
||||
func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder {
|
||||
return setRunWith(b, runner).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Exec builds and Execs the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) Exec() (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.Exec()
|
||||
}
|
||||
|
||||
// Query builds and Querys the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) Query() (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.Query()
|
||||
}
|
||||
|
||||
// QueryRow builds and QueryRows the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) QueryRow() RowScanner {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.QueryRow()
|
||||
}
|
||||
|
||||
// Scan is a shortcut for QueryRow().Scan.
|
||||
func (b InsertBuilder) Scan(dest ...interface{}) error {
|
||||
return b.QueryRow().Scan(dest...)
|
||||
}
|
||||
|
||||
// SQL methods
|
||||
|
||||
// ToSql builds the query into a SQL string and bound args.
|
||||
func (b InsertBuilder) ToSql() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.ToSql()
|
||||
}
|
||||
|
||||
// MustSql builds the query into a SQL string and bound args.
|
||||
// It panics if there are any errors.
|
||||
func (b InsertBuilder) MustSql() (string, []interface{}) {
|
||||
sql, args, err := b.ToSql()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sql, args
|
||||
}
|
||||
|
||||
// Prefix adds an expression to the beginning of the query
|
||||
func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder {
|
||||
return b.PrefixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// PrefixExpr adds an expression to the very beginning of the query
|
||||
func (b InsertBuilder) PrefixExpr(expr Sqlizer) InsertBuilder {
|
||||
return builder.Append(b, "Prefixes", expr).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Options adds keyword options before the INTO clause of the query.
|
||||
func (b InsertBuilder) Options(options ...string) InsertBuilder {
|
||||
return builder.Extend(b, "Options", options).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Into sets the INTO clause of the query.
|
||||
func (b InsertBuilder) Into(from string) InsertBuilder {
|
||||
return builder.Set(b, "Into", from).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Columns adds insert columns to the query.
|
||||
func (b InsertBuilder) Columns(columns ...string) InsertBuilder {
|
||||
return builder.Extend(b, "Columns", columns).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Values adds a single row's values to the query.
|
||||
func (b InsertBuilder) Values(values ...interface{}) InsertBuilder {
|
||||
return builder.Append(b, "Values", values).(InsertBuilder)
|
||||
}
|
||||
|
||||
// Suffix adds an expression to the end of the query
|
||||
func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder {
|
||||
return b.SuffixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// SuffixExpr adds an expression to the end of the query
|
||||
func (b InsertBuilder) SuffixExpr(expr Sqlizer) InsertBuilder {
|
||||
return builder.Append(b, "Suffixes", expr).(InsertBuilder)
|
||||
}
|
||||
|
||||
// SetMap set columns and values for insert builder from a map of column name and value
|
||||
// note that it will reset all previous columns and values was set if any
|
||||
func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder {
|
||||
// Keep the columns in a consistent order by sorting the column key string.
|
||||
cols := make([]string, 0, len(clauses))
|
||||
for col := range clauses {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
sort.Strings(cols)
|
||||
|
||||
vals := make([]interface{}, 0, len(clauses))
|
||||
for _, col := range cols {
|
||||
vals = append(vals, clauses[col])
|
||||
}
|
||||
|
||||
b = builder.Set(b, "Columns", cols).(InsertBuilder)
|
||||
b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Select set Select clause for insert query
|
||||
// If Values and Select are used, then Select has higher priority
|
||||
func (b InsertBuilder) Select(sb SelectBuilder) InsertBuilder {
|
||||
return builder.Set(b, "Select", &sb).(InsertBuilder)
|
||||
}
|
||||
|
||||
func (b InsertBuilder) statementKeyword(keyword string) InsertBuilder {
|
||||
return builder.Set(b, "StatementKeyword", keyword).(InsertBuilder)
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
func (d *insertData) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(ExecerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return ExecContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *insertData) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(QueryerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return QueryContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *insertData) QueryRowContext(ctx context.Context) RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRowerContext)
|
||||
if !ok {
|
||||
if _, ok := d.RunWith.(QueryerContext); !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return &Row{err: NoContextSupport}
|
||||
}
|
||||
return QueryRowContextWith(ctx, queryRower, d)
|
||||
}
|
||||
|
||||
// ExecContext builds and ExecContexts the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.ExecContext(ctx)
|
||||
}
|
||||
|
||||
// QueryContext builds and QueryContexts the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.QueryContext(ctx)
|
||||
}
|
||||
|
||||
// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith.
|
||||
func (b InsertBuilder) QueryRowContext(ctx context.Context) RowScanner {
|
||||
data := builder.GetStruct(b).(insertData)
|
||||
return data.QueryRowContext(ctx)
|
||||
}
|
||||
|
||||
// ScanContext is a shortcut for QueryRowContext().Scan.
|
||||
func (b InsertBuilder) ScanContext(ctx context.Context, dest ...interface{}) error {
|
||||
return b.QueryRowContext(ctx).Scan(dest...)
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type part struct {
|
||||
pred interface{}
|
||||
args []interface{}
|
||||
}
|
||||
|
||||
func newPart(pred interface{}, args ...interface{}) Sqlizer {
|
||||
return &part{pred, args}
|
||||
}
|
||||
|
||||
func (p part) ToSql() (sql string, args []interface{}, err error) {
|
||||
switch pred := p.pred.(type) {
|
||||
case nil:
|
||||
// no-op
|
||||
case Sqlizer:
|
||||
sql, args, err = nestedToSql(pred)
|
||||
case string:
|
||||
sql = pred
|
||||
args = p.args
|
||||
default:
|
||||
err = fmt.Errorf("expected string or Sqlizer, not %T", pred)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nestedToSql(s Sqlizer) (string, []interface{}, error) {
|
||||
if raw, ok := s.(rawSqlizer); ok {
|
||||
return raw.toSqlRaw()
|
||||
} else {
|
||||
return s.ToSql()
|
||||
}
|
||||
}
|
||||
|
||||
func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) {
|
||||
for i, p := range parts {
|
||||
partSql, partArgs, err := nestedToSql(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(partSql) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
_, err := io.WriteString(w, sep)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = io.WriteString(w, partSql)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, partArgs...)
|
||||
}
|
||||
return args, nil
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method.
|
||||
//
|
||||
// ReplacePlaceholders takes a SQL statement and replaces each question mark
|
||||
// placeholder with a (possibly different) SQL placeholder.
|
||||
type PlaceholderFormat interface {
|
||||
ReplacePlaceholders(sql string) (string, error)
|
||||
}
|
||||
|
||||
type placeholderDebugger interface {
|
||||
debugPlaceholder() string
|
||||
}
|
||||
|
||||
var (
|
||||
// Question is a PlaceholderFormat instance that leaves placeholders as
|
||||
// question marks.
|
||||
Question = questionFormat{}
|
||||
|
||||
// Dollar is a PlaceholderFormat instance that replaces placeholders with
|
||||
// dollar-prefixed positional placeholders (e.g. $1, $2, $3).
|
||||
Dollar = dollarFormat{}
|
||||
|
||||
// Colon is a PlaceholderFormat instance that replaces placeholders with
|
||||
// colon-prefixed positional placeholders (e.g. :1, :2, :3).
|
||||
Colon = colonFormat{}
|
||||
|
||||
// AtP is a PlaceholderFormat instance that replaces placeholders with
|
||||
// "@p"-prefixed positional placeholders (e.g. @p1, @p2, @p3).
|
||||
AtP = atpFormat{}
|
||||
)
|
||||
|
||||
type questionFormat struct{}
|
||||
|
||||
func (questionFormat) ReplacePlaceholders(sql string) (string, error) {
|
||||
return sql, nil
|
||||
}
|
||||
|
||||
func (questionFormat) debugPlaceholder() string {
|
||||
return "?"
|
||||
}
|
||||
|
||||
type dollarFormat struct{}
|
||||
|
||||
func (dollarFormat) ReplacePlaceholders(sql string) (string, error) {
|
||||
return replacePositionalPlaceholders(sql, "$")
|
||||
}
|
||||
|
||||
func (dollarFormat) debugPlaceholder() string {
|
||||
return "$"
|
||||
}
|
||||
|
||||
type colonFormat struct{}
|
||||
|
||||
func (colonFormat) ReplacePlaceholders(sql string) (string, error) {
|
||||
return replacePositionalPlaceholders(sql, ":")
|
||||
}
|
||||
|
||||
func (colonFormat) debugPlaceholder() string {
|
||||
return ":"
|
||||
}
|
||||
|
||||
type atpFormat struct{}
|
||||
|
||||
func (atpFormat) ReplacePlaceholders(sql string) (string, error) {
|
||||
return replacePositionalPlaceholders(sql, "@p")
|
||||
}
|
||||
|
||||
func (atpFormat) debugPlaceholder() string {
|
||||
return "@p"
|
||||
}
|
||||
|
||||
// Placeholders returns a string with count ? placeholders joined with commas.
|
||||
func Placeholders(count int) string {
|
||||
if count < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.Repeat(",?", count)[1:]
|
||||
}
|
||||
|
||||
func replacePositionalPlaceholders(sql, prefix string) (string, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
i := 0
|
||||
for {
|
||||
p := strings.Index(sql, "?")
|
||||
if p == -1 {
|
||||
break
|
||||
}
|
||||
|
||||
if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ?
|
||||
buf.WriteString(sql[:p])
|
||||
buf.WriteString("?")
|
||||
if len(sql[p:]) == 1 {
|
||||
break
|
||||
}
|
||||
sql = sql[p+2:]
|
||||
} else {
|
||||
i++
|
||||
buf.WriteString(sql[:p])
|
||||
fmt.Fprintf(buf, "%s%d", prefix, i)
|
||||
sql = sql[p+1:]
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(sql)
|
||||
return buf.String(), nil
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package squirrel
|
||||
|
||||
// RowScanner is the interface that wraps the Scan method.
|
||||
//
|
||||
// Scan behaves like database/sql.Row.Scan.
|
||||
type RowScanner interface {
|
||||
Scan(...interface{}) error
|
||||
}
|
||||
|
||||
// Row wraps database/sql.Row to let squirrel return new errors on Scan.
|
||||
type Row struct {
|
||||
RowScanner
|
||||
err error
|
||||
}
|
||||
|
||||
// Scan returns Row.err or calls RowScanner.Scan.
|
||||
func (r *Row) Scan(dest ...interface{}) error {
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
return r.RowScanner.Scan(dest...)
|
||||
}
|
|
@ -0,0 +1,403 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
type selectData struct {
|
||||
PlaceholderFormat PlaceholderFormat
|
||||
RunWith BaseRunner
|
||||
Prefixes []Sqlizer
|
||||
Options []string
|
||||
Columns []Sqlizer
|
||||
From Sqlizer
|
||||
Joins []Sqlizer
|
||||
WhereParts []Sqlizer
|
||||
GroupBys []string
|
||||
HavingParts []Sqlizer
|
||||
OrderByParts []Sqlizer
|
||||
Limit string
|
||||
Offset string
|
||||
Suffixes []Sqlizer
|
||||
}
|
||||
|
||||
func (d *selectData) Exec() (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return ExecWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *selectData) Query() (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return QueryWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *selectData) QueryRow() RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRower)
|
||||
if !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return QueryRowWith(queryRower, d)
|
||||
}
|
||||
|
||||
func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
sqlStr, args, err = d.toSqlRaw()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sqlStr)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *selectData) toSqlRaw() (sqlStr string, args []interface{}, err error) {
|
||||
if len(d.Columns) == 0 {
|
||||
err = fmt.Errorf("select statements must have at least one result column")
|
||||
return
|
||||
}
|
||||
|
||||
sql := &bytes.Buffer{}
|
||||
|
||||
if len(d.Prefixes) > 0 {
|
||||
args, err = appendToSql(d.Prefixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
sql.WriteString("SELECT ")
|
||||
|
||||
if len(d.Options) > 0 {
|
||||
sql.WriteString(strings.Join(d.Options, " "))
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
if len(d.Columns) > 0 {
|
||||
args, err = appendToSql(d.Columns, sql, ", ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if d.From != nil {
|
||||
sql.WriteString(" FROM ")
|
||||
args, err = appendToSql([]Sqlizer{d.From}, sql, "", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.Joins) > 0 {
|
||||
sql.WriteString(" ")
|
||||
args, err = appendToSql(d.Joins, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.WhereParts) > 0 {
|
||||
sql.WriteString(" WHERE ")
|
||||
args, err = appendToSql(d.WhereParts, sql, " AND ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.GroupBys) > 0 {
|
||||
sql.WriteString(" GROUP BY ")
|
||||
sql.WriteString(strings.Join(d.GroupBys, ", "))
|
||||
}
|
||||
|
||||
if len(d.HavingParts) > 0 {
|
||||
sql.WriteString(" HAVING ")
|
||||
args, err = appendToSql(d.HavingParts, sql, " AND ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.OrderByParts) > 0 {
|
||||
sql.WriteString(" ORDER BY ")
|
||||
args, err = appendToSql(d.OrderByParts, sql, ", ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.Limit) > 0 {
|
||||
sql.WriteString(" LIMIT ")
|
||||
sql.WriteString(d.Limit)
|
||||
}
|
||||
|
||||
if len(d.Offset) > 0 {
|
||||
sql.WriteString(" OFFSET ")
|
||||
sql.WriteString(d.Offset)
|
||||
}
|
||||
|
||||
if len(d.Suffixes) > 0 {
|
||||
sql.WriteString(" ")
|
||||
|
||||
args, err = appendToSql(d.Suffixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sqlStr = sql.String()
|
||||
return
|
||||
}
|
||||
|
||||
// Builder
|
||||
|
||||
// SelectBuilder builds SQL SELECT statements.
|
||||
type SelectBuilder builder.Builder
|
||||
|
||||
func init() {
|
||||
builder.Register(SelectBuilder{}, selectData{})
|
||||
}
|
||||
|
||||
// Format methods
|
||||
|
||||
// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the
|
||||
// query.
|
||||
func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder {
|
||||
return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Runner methods
|
||||
|
||||
// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec.
|
||||
// For most cases runner will be a database connection.
|
||||
//
|
||||
// Internally we use this to mock out the database connection for testing.
|
||||
func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder {
|
||||
return setRunWith(b, runner).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Exec builds and Execs the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) Exec() (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.Exec()
|
||||
}
|
||||
|
||||
// Query builds and Querys the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) Query() (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.Query()
|
||||
}
|
||||
|
||||
// QueryRow builds and QueryRows the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) QueryRow() RowScanner {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.QueryRow()
|
||||
}
|
||||
|
||||
// Scan is a shortcut for QueryRow().Scan.
|
||||
func (b SelectBuilder) Scan(dest ...interface{}) error {
|
||||
return b.QueryRow().Scan(dest...)
|
||||
}
|
||||
|
||||
// SQL methods
|
||||
|
||||
// ToSql builds the query into a SQL string and bound args.
|
||||
func (b SelectBuilder) ToSql() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.ToSql()
|
||||
}
|
||||
|
||||
func (b SelectBuilder) toSqlRaw() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.toSqlRaw()
|
||||
}
|
||||
|
||||
// MustSql builds the query into a SQL string and bound args.
|
||||
// It panics if there are any errors.
|
||||
func (b SelectBuilder) MustSql() (string, []interface{}) {
|
||||
sql, args, err := b.ToSql()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sql, args
|
||||
}
|
||||
|
||||
// Prefix adds an expression to the beginning of the query
|
||||
func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder {
|
||||
return b.PrefixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// PrefixExpr adds an expression to the very beginning of the query
|
||||
func (b SelectBuilder) PrefixExpr(expr Sqlizer) SelectBuilder {
|
||||
return builder.Append(b, "Prefixes", expr).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Distinct adds a DISTINCT clause to the query.
|
||||
func (b SelectBuilder) Distinct() SelectBuilder {
|
||||
return b.Options("DISTINCT")
|
||||
}
|
||||
|
||||
// Options adds select option to the query
|
||||
func (b SelectBuilder) Options(options ...string) SelectBuilder {
|
||||
return builder.Extend(b, "Options", options).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Columns adds result columns to the query.
|
||||
func (b SelectBuilder) Columns(columns ...string) SelectBuilder {
|
||||
parts := make([]interface{}, 0, len(columns))
|
||||
for _, str := range columns {
|
||||
parts = append(parts, newPart(str))
|
||||
}
|
||||
return builder.Extend(b, "Columns", parts).(SelectBuilder)
|
||||
}
|
||||
|
||||
// RemoveColumns remove all columns from query.
|
||||
// Must add a new column with Column or Columns methods, otherwise
|
||||
// return a error.
|
||||
func (b SelectBuilder) RemoveColumns() SelectBuilder {
|
||||
return builder.Delete(b, "Columns").(SelectBuilder)
|
||||
}
|
||||
|
||||
// Column adds a result column to the query.
|
||||
// Unlike Columns, Column accepts args which will be bound to placeholders in
|
||||
// the columns string, for example:
|
||||
// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3)
|
||||
func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder {
|
||||
return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// From sets the FROM clause of the query.
|
||||
func (b SelectBuilder) From(from string) SelectBuilder {
|
||||
return builder.Set(b, "From", newPart(from)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// FromSelect sets a subquery into the FROM clause of the query.
|
||||
func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder {
|
||||
// Prevent misnumbered parameters in nested selects (#183).
|
||||
from = from.PlaceholderFormat(Question)
|
||||
return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// JoinClause adds a join clause to the query.
|
||||
func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder {
|
||||
return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Join adds a JOIN clause to the query.
|
||||
func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// LeftJoin adds a LEFT JOIN clause to the query.
|
||||
func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("LEFT JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// RightJoin adds a RIGHT JOIN clause to the query.
|
||||
func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("RIGHT JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// InnerJoin adds a INNER JOIN clause to the query.
|
||||
func (b SelectBuilder) InnerJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("INNER JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// CrossJoin adds a CROSS JOIN clause to the query.
|
||||
func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("CROSS JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// Where adds an expression to the WHERE clause of the query.
|
||||
//
|
||||
// Expressions are ANDed together in the generated SQL.
|
||||
//
|
||||
// Where accepts several types for its pred argument:
|
||||
//
|
||||
// nil OR "" - ignored.
|
||||
//
|
||||
// string - SQL expression.
|
||||
// If the expression has SQL placeholders then a set of arguments must be passed
|
||||
// as well, one for each placeholder.
|
||||
//
|
||||
// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is
|
||||
// transformed into an expression like "<key> = ?", with the corresponding value
|
||||
// bound to the placeholder. If the value is nil, the expression will be "<key>
|
||||
// IS NULL". If the value is an array or slice, the expression will be "<key> IN
|
||||
// (?,?,...)", with one placeholder for each item in the value. These expressions
|
||||
// are ANDed together.
|
||||
//
|
||||
// Where will panic if pred isn't any of the above types.
|
||||
func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder {
|
||||
if pred == nil || pred == "" {
|
||||
return b
|
||||
}
|
||||
return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// GroupBy adds GROUP BY expressions to the query.
|
||||
func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder {
|
||||
return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Having adds an expression to the HAVING clause of the query.
|
||||
//
|
||||
// See Where.
|
||||
func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder {
|
||||
return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// OrderByClause adds ORDER BY clause to the query.
|
||||
func (b SelectBuilder) OrderByClause(pred interface{}, args ...interface{}) SelectBuilder {
|
||||
return builder.Append(b, "OrderByParts", newPart(pred, args...)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// OrderBy adds ORDER BY expressions to the query.
|
||||
func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder {
|
||||
for _, orderBy := range orderBys {
|
||||
b = b.OrderByClause(orderBy)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Limit sets a LIMIT clause on the query.
|
||||
func (b SelectBuilder) Limit(limit uint64) SelectBuilder {
|
||||
return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// Limit ALL allows to access all records with limit
|
||||
func (b SelectBuilder) RemoveLimit() SelectBuilder {
|
||||
return builder.Delete(b, "Limit").(SelectBuilder)
|
||||
}
|
||||
|
||||
// Offset sets a OFFSET clause on the query.
|
||||
func (b SelectBuilder) Offset(offset uint64) SelectBuilder {
|
||||
return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder)
|
||||
}
|
||||
|
||||
// RemoveOffset removes OFFSET clause.
|
||||
func (b SelectBuilder) RemoveOffset() SelectBuilder {
|
||||
return builder.Delete(b, "Offset").(SelectBuilder)
|
||||
}
|
||||
|
||||
// Suffix adds an expression to the end of the query
|
||||
func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder {
|
||||
return b.SuffixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// SuffixExpr adds an expression to the end of the query
|
||||
func (b SelectBuilder) SuffixExpr(expr Sqlizer) SelectBuilder {
|
||||
return builder.Append(b, "Suffixes", expr).(SelectBuilder)
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
func (d *selectData) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(ExecerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return ExecContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *selectData) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(QueryerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return QueryContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *selectData) QueryRowContext(ctx context.Context) RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRowerContext)
|
||||
if !ok {
|
||||
if _, ok := d.RunWith.(QueryerContext); !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return &Row{err: NoContextSupport}
|
||||
}
|
||||
return QueryRowContextWith(ctx, queryRower, d)
|
||||
}
|
||||
|
||||
// ExecContext builds and ExecContexts the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.ExecContext(ctx)
|
||||
}
|
||||
|
||||
// QueryContext builds and QueryContexts the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.QueryContext(ctx)
|
||||
}
|
||||
|
||||
// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith.
|
||||
func (b SelectBuilder) QueryRowContext(ctx context.Context) RowScanner {
|
||||
data := builder.GetStruct(b).(selectData)
|
||||
return data.QueryRowContext(ctx)
|
||||
}
|
||||
|
||||
// ScanContext is a shortcut for QueryRowContext().Scan.
|
||||
func (b SelectBuilder) ScanContext(ctx context.Context, dest ...interface{}) error {
|
||||
return b.QueryRowContext(ctx).Scan(dest...)
|
||||
}
|
|
@ -0,0 +1,183 @@
|
|||
// Package squirrel provides a fluent SQL generator.
|
||||
//
|
||||
// See https://github.com/Masterminds/squirrel for examples.
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
// Sqlizer is the interface that wraps the ToSql method.
|
||||
//
|
||||
// ToSql returns a SQL representation of the Sqlizer, along with a slice of args
|
||||
// as passed to e.g. database/sql.Exec. It can also return an error.
|
||||
type Sqlizer interface {
|
||||
ToSql() (string, []interface{}, error)
|
||||
}
|
||||
|
||||
// rawSqlizer is expected to do what Sqlizer does, but without finalizing placeholders.
|
||||
// This is useful for nested queries.
|
||||
type rawSqlizer interface {
|
||||
toSqlRaw() (string, []interface{}, error)
|
||||
}
|
||||
|
||||
// Execer is the interface that wraps the Exec method.
|
||||
//
|
||||
// Exec executes the given query as implemented by database/sql.Exec.
|
||||
type Execer interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
// Queryer is the interface that wraps the Query method.
|
||||
//
|
||||
// Query executes the given query as implemented by database/sql.Query.
|
||||
type Queryer interface {
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
}
|
||||
|
||||
// QueryRower is the interface that wraps the QueryRow method.
|
||||
//
|
||||
// QueryRow executes the given query as implemented by database/sql.QueryRow.
|
||||
type QueryRower interface {
|
||||
QueryRow(query string, args ...interface{}) RowScanner
|
||||
}
|
||||
|
||||
// BaseRunner groups the Execer and Queryer interfaces.
|
||||
type BaseRunner interface {
|
||||
Execer
|
||||
Queryer
|
||||
}
|
||||
|
||||
// Runner groups the Execer, Queryer, and QueryRower interfaces.
|
||||
type Runner interface {
|
||||
Execer
|
||||
Queryer
|
||||
QueryRower
|
||||
}
|
||||
|
||||
// WrapStdSql wraps a type implementing the standard SQL interface with methods that
|
||||
// squirrel expects.
|
||||
func WrapStdSql(stdSql StdSql) Runner {
|
||||
return &stdsqlRunner{stdSql}
|
||||
}
|
||||
|
||||
// StdSql encompasses the standard methods of the *sql.DB type, and other types that
|
||||
// wrap these methods.
|
||||
type StdSql interface {
|
||||
Query(string, ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(string, ...interface{}) *sql.Row
|
||||
Exec(string, ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
type stdsqlRunner struct {
|
||||
StdSql
|
||||
}
|
||||
|
||||
func (r *stdsqlRunner) QueryRow(query string, args ...interface{}) RowScanner {
|
||||
return r.StdSql.QueryRow(query, args...)
|
||||
}
|
||||
|
||||
func setRunWith(b interface{}, runner BaseRunner) interface{} {
|
||||
switch r := runner.(type) {
|
||||
case StdSqlCtx:
|
||||
runner = WrapStdSqlCtx(r)
|
||||
case StdSql:
|
||||
runner = WrapStdSql(r)
|
||||
}
|
||||
return builder.Set(b, "RunWith", runner)
|
||||
}
|
||||
|
||||
// RunnerNotSet is returned by methods that need a Runner if it isn't set.
|
||||
var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)")
|
||||
|
||||
// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower.
|
||||
var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower")
|
||||
|
||||
// ExecWith Execs the SQL returned by s with db.
|
||||
func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) {
|
||||
query, args, err := s.ToSql()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return db.Exec(query, args...)
|
||||
}
|
||||
|
||||
// QueryWith Querys the SQL returned by s with db.
|
||||
func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) {
|
||||
query, args, err := s.ToSql()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return db.Query(query, args...)
|
||||
}
|
||||
|
||||
// QueryRowWith QueryRows the SQL returned by s with db.
|
||||
func QueryRowWith(db QueryRower, s Sqlizer) RowScanner {
|
||||
query, args, err := s.ToSql()
|
||||
return &Row{RowScanner: db.QueryRow(query, args...), err: err}
|
||||
}
|
||||
|
||||
// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed
|
||||
//
|
||||
// If ToSql returns an error, the result of this method will look like:
|
||||
// "[ToSql error: %s]" or "[DebugSqlizer error: %s]"
|
||||
//
|
||||
// IMPORTANT: As its name suggests, this function should only be used for
|
||||
// debugging. While the string result *might* be valid SQL, this function does
|
||||
// not try very hard to ensure it. Additionally, executing the output of this
|
||||
// function with any untrusted user input is certainly insecure.
|
||||
func DebugSqlizer(s Sqlizer) string {
|
||||
sql, args, err := s.ToSql()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("[ToSql error: %s]", err)
|
||||
}
|
||||
|
||||
var placeholder string
|
||||
downCast, ok := s.(placeholderDebugger)
|
||||
if !ok {
|
||||
placeholder = "?"
|
||||
} else {
|
||||
placeholder = downCast.debugPlaceholder()
|
||||
}
|
||||
// TODO: dedupe this with placeholder.go
|
||||
buf := &bytes.Buffer{}
|
||||
i := 0
|
||||
for {
|
||||
p := strings.Index(sql, placeholder)
|
||||
if p == -1 {
|
||||
break
|
||||
}
|
||||
if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ?
|
||||
buf.WriteString(sql[:p])
|
||||
buf.WriteString("?")
|
||||
if len(sql[p:]) == 1 {
|
||||
break
|
||||
}
|
||||
sql = sql[p+2:]
|
||||
} else {
|
||||
if i+1 > len(args) {
|
||||
return fmt.Sprintf(
|
||||
"[DebugSqlizer error: too many placeholders in %#v for %d args]",
|
||||
sql, len(args))
|
||||
}
|
||||
buf.WriteString(sql[:p])
|
||||
fmt.Fprintf(buf, "'%v'", args[i])
|
||||
// advance our sql string "cursor" beyond the arg we placed
|
||||
sql = sql[p+1:]
|
||||
i++
|
||||
}
|
||||
}
|
||||
if i < len(args) {
|
||||
return fmt.Sprintf(
|
||||
"[DebugSqlizer error: not enough placeholders in %#v for %d args]",
|
||||
sql, len(args))
|
||||
}
|
||||
// "append" any remaning sql that won't need interpolating
|
||||
buf.WriteString(sql)
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// NoContextSupport is returned if a db doesn't support Context.
|
||||
var NoContextSupport = errors.New("DB does not support Context")
|
||||
|
||||
// ExecerContext is the interface that wraps the ExecContext method.
|
||||
//
|
||||
// Exec executes the given query as implemented by database/sql.ExecContext.
|
||||
type ExecerContext interface {
|
||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
// QueryerContext is the interface that wraps the QueryContext method.
|
||||
//
|
||||
// QueryContext executes the given query as implemented by database/sql.QueryContext.
|
||||
type QueryerContext interface {
|
||||
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
||||
}
|
||||
|
||||
// QueryRowerContext is the interface that wraps the QueryRowContext method.
|
||||
//
|
||||
// QueryRowContext executes the given query as implemented by database/sql.QueryRowContext.
|
||||
type QueryRowerContext interface {
|
||||
QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner
|
||||
}
|
||||
|
||||
// RunnerContext groups the Runner interface, along with the Context versions of each of
|
||||
// its methods
|
||||
type RunnerContext interface {
|
||||
Runner
|
||||
QueryerContext
|
||||
QueryRowerContext
|
||||
ExecerContext
|
||||
}
|
||||
|
||||
// WrapStdSqlCtx wraps a type implementing the standard SQL interface plus the context
|
||||
// versions of the methods with methods that squirrel expects.
|
||||
func WrapStdSqlCtx(stdSqlCtx StdSqlCtx) RunnerContext {
|
||||
return &stdsqlCtxRunner{stdSqlCtx}
|
||||
}
|
||||
|
||||
// StdSqlCtx encompasses the standard methods of the *sql.DB type, along with the Context
|
||||
// versions of those methods, and other types that wrap these methods.
|
||||
type StdSqlCtx interface {
|
||||
StdSql
|
||||
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
|
||||
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
|
||||
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
|
||||
}
|
||||
|
||||
type stdsqlCtxRunner struct {
|
||||
StdSqlCtx
|
||||
}
|
||||
|
||||
func (r *stdsqlCtxRunner) QueryRow(query string, args ...interface{}) RowScanner {
|
||||
return r.StdSqlCtx.QueryRow(query, args...)
|
||||
}
|
||||
|
||||
func (r *stdsqlCtxRunner) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner {
|
||||
return r.StdSqlCtx.QueryRowContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
// ExecContextWith ExecContexts the SQL returned by s with db.
|
||||
func ExecContextWith(ctx context.Context, db ExecerContext, s Sqlizer) (res sql.Result, err error) {
|
||||
query, args, err := s.ToSql()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return db.ExecContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
// QueryContextWith QueryContexts the SQL returned by s with db.
|
||||
func QueryContextWith(ctx context.Context, db QueryerContext, s Sqlizer) (rows *sql.Rows, err error) {
|
||||
query, args, err := s.ToSql()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return db.QueryContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
// QueryRowContextWith QueryRowContexts the SQL returned by s with db.
|
||||
func QueryRowContextWith(ctx context.Context, db QueryRowerContext, s Sqlizer) RowScanner {
|
||||
query, args, err := s.ToSql()
|
||||
return &Row{RowScanner: db.QueryRowContext(ctx, query, args...), err: err}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
package squirrel
|
||||
|
||||
import "github.com/lann/builder"
|
||||
|
||||
// StatementBuilderType is the type of StatementBuilder.
|
||||
type StatementBuilderType builder.Builder
|
||||
|
||||
// Select returns a SelectBuilder for this StatementBuilderType.
|
||||
func (b StatementBuilderType) Select(columns ...string) SelectBuilder {
|
||||
return SelectBuilder(b).Columns(columns...)
|
||||
}
|
||||
|
||||
// Insert returns a InsertBuilder for this StatementBuilderType.
|
||||
func (b StatementBuilderType) Insert(into string) InsertBuilder {
|
||||
return InsertBuilder(b).Into(into)
|
||||
}
|
||||
|
||||
// Replace returns a InsertBuilder for this StatementBuilderType with the
|
||||
// statement keyword set to "REPLACE".
|
||||
func (b StatementBuilderType) Replace(into string) InsertBuilder {
|
||||
return InsertBuilder(b).statementKeyword("REPLACE").Into(into)
|
||||
}
|
||||
|
||||
// Update returns a UpdateBuilder for this StatementBuilderType.
|
||||
func (b StatementBuilderType) Update(table string) UpdateBuilder {
|
||||
return UpdateBuilder(b).Table(table)
|
||||
}
|
||||
|
||||
// Delete returns a DeleteBuilder for this StatementBuilderType.
|
||||
func (b StatementBuilderType) Delete(from string) DeleteBuilder {
|
||||
return DeleteBuilder(b).From(from)
|
||||
}
|
||||
|
||||
// PlaceholderFormat sets the PlaceholderFormat field for any child builders.
|
||||
func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType {
|
||||
return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType)
|
||||
}
|
||||
|
||||
// RunWith sets the RunWith field for any child builders.
|
||||
func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType {
|
||||
return setRunWith(b, runner).(StatementBuilderType)
|
||||
}
|
||||
|
||||
// Where adds WHERE expressions to the query.
|
||||
//
|
||||
// See SelectBuilder.Where for more information.
|
||||
func (b StatementBuilderType) Where(pred interface{}, args ...interface{}) StatementBuilderType {
|
||||
return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(StatementBuilderType)
|
||||
}
|
||||
|
||||
// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder.
|
||||
var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question)
|
||||
|
||||
// Select returns a new SelectBuilder, optionally setting some result columns.
|
||||
//
|
||||
// See SelectBuilder.Columns.
|
||||
func Select(columns ...string) SelectBuilder {
|
||||
return StatementBuilder.Select(columns...)
|
||||
}
|
||||
|
||||
// Insert returns a new InsertBuilder with the given table name.
|
||||
//
|
||||
// See InsertBuilder.Into.
|
||||
func Insert(into string) InsertBuilder {
|
||||
return StatementBuilder.Insert(into)
|
||||
}
|
||||
|
||||
// Replace returns a new InsertBuilder with the statement keyword set to
|
||||
// "REPLACE" and with the given table name.
|
||||
//
|
||||
// See InsertBuilder.Into.
|
||||
func Replace(into string) InsertBuilder {
|
||||
return StatementBuilder.Replace(into)
|
||||
}
|
||||
|
||||
// Update returns a new UpdateBuilder with the given table name.
|
||||
//
|
||||
// See UpdateBuilder.Table.
|
||||
func Update(table string) UpdateBuilder {
|
||||
return StatementBuilder.Update(table)
|
||||
}
|
||||
|
||||
// Delete returns a new DeleteBuilder with the given table name.
|
||||
//
|
||||
// See DeleteBuilder.Table.
|
||||
func Delete(from string) DeleteBuilder {
|
||||
return StatementBuilder.Delete(from)
|
||||
}
|
||||
|
||||
// Case returns a new CaseBuilder
|
||||
// "what" represents case value
|
||||
func Case(what ...interface{}) CaseBuilder {
|
||||
b := CaseBuilder(builder.EmptyBuilder)
|
||||
|
||||
switch len(what) {
|
||||
case 0:
|
||||
case 1:
|
||||
b = b.what(what[0])
|
||||
default:
|
||||
b = b.what(newPart(what[0], what[1:]...))
|
||||
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Prepareer is the interface that wraps the Prepare method.
|
||||
//
|
||||
// Prepare executes the given query as implemented by database/sql.Prepare.
|
||||
type Preparer interface {
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces.
|
||||
type DBProxy interface {
|
||||
Execer
|
||||
Queryer
|
||||
QueryRower
|
||||
Preparer
|
||||
}
|
||||
|
||||
// NOTE: NewStmtCache is defined in stmtcacher_ctx.go (Go >= 1.8) or stmtcacher_noctx.go (Go < 1.8).
|
||||
|
||||
// StmtCache wraps and delegates down to a Preparer type
|
||||
//
|
||||
// It also automatically prepares all statements sent to the underlying Preparer calls
|
||||
// for Exec, Query and QueryRow and caches the returns *sql.Stmt using the provided
|
||||
// query as the key. So that it can be automatically re-used.
|
||||
type StmtCache struct {
|
||||
prep Preparer
|
||||
cache map[string]*sql.Stmt
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Prepare delegates down to the underlying Preparer and caches the result
|
||||
// using the provided query as a key
|
||||
func (sc *StmtCache) Prepare(query string) (*sql.Stmt, error) {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
stmt, ok := sc.cache[query]
|
||||
if ok {
|
||||
return stmt, nil
|
||||
}
|
||||
stmt, err := sc.prep.Prepare(query)
|
||||
if err == nil {
|
||||
sc.cache[query] = stmt
|
||||
}
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
// Exec delegates down to the underlying Preparer using a prepared statement
|
||||
func (sc *StmtCache) Exec(query string, args ...interface{}) (res sql.Result, err error) {
|
||||
stmt, err := sc.Prepare(query)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return stmt.Exec(args...)
|
||||
}
|
||||
|
||||
// Query delegates down to the underlying Preparer using a prepared statement
|
||||
func (sc *StmtCache) Query(query string, args ...interface{}) (rows *sql.Rows, err error) {
|
||||
stmt, err := sc.Prepare(query)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return stmt.Query(args...)
|
||||
}
|
||||
|
||||
// QueryRow delegates down to the underlying Preparer using a prepared statement
|
||||
func (sc *StmtCache) QueryRow(query string, args ...interface{}) RowScanner {
|
||||
stmt, err := sc.Prepare(query)
|
||||
if err != nil {
|
||||
return &Row{err: err}
|
||||
}
|
||||
return stmt.QueryRow(args...)
|
||||
}
|
||||
|
||||
// Clear removes and closes all the currently cached prepared statements
|
||||
func (sc *StmtCache) Clear() (err error) {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
for key, stmt := range sc.cache {
|
||||
delete(sc.cache, key)
|
||||
|
||||
if stmt == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if cerr := stmt.Close(); cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("one or more Stmt.Close failed; last error: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type DBProxyBeginner interface {
|
||||
DBProxy
|
||||
Begin() (*sql.Tx, error)
|
||||
}
|
||||
|
||||
type stmtCacheProxy struct {
|
||||
DBProxy
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner {
|
||||
return &stmtCacheProxy{DBProxy: NewStmtCache(db), db: db}
|
||||
}
|
||||
|
||||
func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) {
|
||||
return sp.db.Begin()
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
// PrepareerContext is the interface that wraps the Prepare and PrepareContext methods.
|
||||
//
|
||||
// Prepare executes the given query as implemented by database/sql.Prepare.
|
||||
// PrepareContext executes the given query as implemented by database/sql.PrepareContext.
|
||||
type PreparerContext interface {
|
||||
Preparer
|
||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// DBProxyContext groups the Execer, Queryer, QueryRower and PreparerContext interfaces.
|
||||
type DBProxyContext interface {
|
||||
Execer
|
||||
Queryer
|
||||
QueryRower
|
||||
PreparerContext
|
||||
}
|
||||
|
||||
// NewStmtCache returns a *StmtCache wrapping a PreparerContext that caches Prepared Stmts.
|
||||
//
|
||||
// Stmts are cached based on the string value of their queries.
|
||||
func NewStmtCache(prep PreparerContext) *StmtCache {
|
||||
return &StmtCache{prep: prep, cache: make(map[string]*sql.Stmt)}
|
||||
}
|
||||
|
||||
// NewStmtCacher is deprecated
|
||||
//
|
||||
// Use NewStmtCache instead
|
||||
func NewStmtCacher(prep PreparerContext) DBProxyContext {
|
||||
return NewStmtCache(prep)
|
||||
}
|
||||
|
||||
// PrepareContext delegates down to the underlying PreparerContext and caches the result
|
||||
// using the provided query as a key
|
||||
func (sc *StmtCache) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
||||
ctxPrep, ok := sc.prep.(PreparerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
stmt, ok := sc.cache[query]
|
||||
if ok {
|
||||
return stmt, nil
|
||||
}
|
||||
stmt, err := ctxPrep.PrepareContext(ctx, query)
|
||||
if err == nil {
|
||||
sc.cache[query] = stmt
|
||||
}
|
||||
return stmt, err
|
||||
}
|
||||
|
||||
// ExecContext delegates down to the underlying PreparerContext using a prepared statement
|
||||
func (sc *StmtCache) ExecContext(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) {
|
||||
stmt, err := sc.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return stmt.ExecContext(ctx, args...)
|
||||
}
|
||||
|
||||
// QueryContext delegates down to the underlying PreparerContext using a prepared statement
|
||||
func (sc *StmtCache) QueryContext(ctx context.Context, query string, args ...interface{}) (rows *sql.Rows, err error) {
|
||||
stmt, err := sc.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return stmt.QueryContext(ctx, args...)
|
||||
}
|
||||
|
||||
// QueryRowContext delegates down to the underlying PreparerContext using a prepared statement
|
||||
func (sc *StmtCache) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner {
|
||||
stmt, err := sc.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return &Row{err: err}
|
||||
}
|
||||
return stmt.QueryRowContext(ctx, args...)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
// +build !go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts.
|
||||
//
|
||||
// Stmts are cached based on the string value of their queries.
|
||||
func NewStmtCache(prep Preparer) *StmtCache {
|
||||
return &StmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)}
|
||||
}
|
||||
|
||||
// NewStmtCacher is deprecated
|
||||
//
|
||||
// Use NewStmtCache instead
|
||||
func NewStmtCacher(prep Preparer) DBProxy {
|
||||
return NewStmtCache(prep)
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
type updateData struct {
|
||||
PlaceholderFormat PlaceholderFormat
|
||||
RunWith BaseRunner
|
||||
Prefixes []Sqlizer
|
||||
Table string
|
||||
SetClauses []setClause
|
||||
From Sqlizer
|
||||
WhereParts []Sqlizer
|
||||
OrderBys []string
|
||||
Limit string
|
||||
Offset string
|
||||
Suffixes []Sqlizer
|
||||
}
|
||||
|
||||
type setClause struct {
|
||||
column string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (d *updateData) Exec() (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return ExecWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *updateData) Query() (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
return QueryWith(d.RunWith, d)
|
||||
}
|
||||
|
||||
func (d *updateData) QueryRow() RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRower)
|
||||
if !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return QueryRowWith(queryRower, d)
|
||||
}
|
||||
|
||||
func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
if len(d.Table) == 0 {
|
||||
err = fmt.Errorf("update statements must specify a table")
|
||||
return
|
||||
}
|
||||
if len(d.SetClauses) == 0 {
|
||||
err = fmt.Errorf("update statements must have at least one Set clause")
|
||||
return
|
||||
}
|
||||
|
||||
sql := &bytes.Buffer{}
|
||||
|
||||
if len(d.Prefixes) > 0 {
|
||||
args, err = appendToSql(d.Prefixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sql.WriteString(" ")
|
||||
}
|
||||
|
||||
sql.WriteString("UPDATE ")
|
||||
sql.WriteString(d.Table)
|
||||
|
||||
sql.WriteString(" SET ")
|
||||
setSqls := make([]string, len(d.SetClauses))
|
||||
for i, setClause := range d.SetClauses {
|
||||
var valSql string
|
||||
if vs, ok := setClause.value.(Sqlizer); ok {
|
||||
vsql, vargs, err := vs.ToSql()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if _, ok := vs.(SelectBuilder); ok {
|
||||
valSql = fmt.Sprintf("(%s)", vsql)
|
||||
} else {
|
||||
valSql = vsql
|
||||
}
|
||||
args = append(args, vargs...)
|
||||
} else {
|
||||
valSql = "?"
|
||||
args = append(args, setClause.value)
|
||||
}
|
||||
setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql)
|
||||
}
|
||||
sql.WriteString(strings.Join(setSqls, ", "))
|
||||
|
||||
if d.From != nil {
|
||||
sql.WriteString(" FROM ")
|
||||
args, err = appendToSql([]Sqlizer{d.From}, sql, "", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.WhereParts) > 0 {
|
||||
sql.WriteString(" WHERE ")
|
||||
args, err = appendToSql(d.WhereParts, sql, " AND ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.OrderBys) > 0 {
|
||||
sql.WriteString(" ORDER BY ")
|
||||
sql.WriteString(strings.Join(d.OrderBys, ", "))
|
||||
}
|
||||
|
||||
if len(d.Limit) > 0 {
|
||||
sql.WriteString(" LIMIT ")
|
||||
sql.WriteString(d.Limit)
|
||||
}
|
||||
|
||||
if len(d.Offset) > 0 {
|
||||
sql.WriteString(" OFFSET ")
|
||||
sql.WriteString(d.Offset)
|
||||
}
|
||||
|
||||
if len(d.Suffixes) > 0 {
|
||||
sql.WriteString(" ")
|
||||
args, err = appendToSql(d.Suffixes, sql, " ", args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String())
|
||||
return
|
||||
}
|
||||
|
||||
// Builder
|
||||
|
||||
// UpdateBuilder builds SQL UPDATE statements.
|
||||
type UpdateBuilder builder.Builder
|
||||
|
||||
func init() {
|
||||
builder.Register(UpdateBuilder{}, updateData{})
|
||||
}
|
||||
|
||||
// Format methods
|
||||
|
||||
// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the
|
||||
// query.
|
||||
func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder {
|
||||
return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Runner methods
|
||||
|
||||
// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec.
|
||||
func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder {
|
||||
return setRunWith(b, runner).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Exec builds and Execs the query with the Runner set by RunWith.
|
||||
func (b UpdateBuilder) Exec() (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.Exec()
|
||||
}
|
||||
|
||||
func (b UpdateBuilder) Query() (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.Query()
|
||||
}
|
||||
|
||||
func (b UpdateBuilder) QueryRow() RowScanner {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.QueryRow()
|
||||
}
|
||||
|
||||
func (b UpdateBuilder) Scan(dest ...interface{}) error {
|
||||
return b.QueryRow().Scan(dest...)
|
||||
}
|
||||
|
||||
// SQL methods
|
||||
|
||||
// ToSql builds the query into a SQL string and bound args.
|
||||
func (b UpdateBuilder) ToSql() (string, []interface{}, error) {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.ToSql()
|
||||
}
|
||||
|
||||
// MustSql builds the query into a SQL string and bound args.
|
||||
// It panics if there are any errors.
|
||||
func (b UpdateBuilder) MustSql() (string, []interface{}) {
|
||||
sql, args, err := b.ToSql()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return sql, args
|
||||
}
|
||||
|
||||
// Prefix adds an expression to the beginning of the query
|
||||
func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder {
|
||||
return b.PrefixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// PrefixExpr adds an expression to the very beginning of the query
|
||||
func (b UpdateBuilder) PrefixExpr(expr Sqlizer) UpdateBuilder {
|
||||
return builder.Append(b, "Prefixes", expr).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Table sets the table to be updated.
|
||||
func (b UpdateBuilder) Table(table string) UpdateBuilder {
|
||||
return builder.Set(b, "Table", table).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Set adds SET clauses to the query.
|
||||
func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder {
|
||||
return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// SetMap is a convenience method which calls .Set for each key/value pair in clauses.
|
||||
func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder {
|
||||
keys := make([]string, len(clauses))
|
||||
i := 0
|
||||
for key := range clauses {
|
||||
keys[i] = key
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
val, _ := clauses[key]
|
||||
b = b.Set(key, val)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// From adds FROM clause to the query
|
||||
// FROM is valid construct in postgresql only.
|
||||
func (b UpdateBuilder) From(from string) UpdateBuilder {
|
||||
return builder.Set(b, "From", newPart(from)).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// FromSelect sets a subquery into the FROM clause of the query.
|
||||
func (b UpdateBuilder) FromSelect(from SelectBuilder, alias string) UpdateBuilder {
|
||||
// Prevent misnumbered parameters in nested selects (#183).
|
||||
from = from.PlaceholderFormat(Question)
|
||||
return builder.Set(b, "From", Alias(from, alias)).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Where adds WHERE expressions to the query.
|
||||
//
|
||||
// See SelectBuilder.Where for more information.
|
||||
func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder {
|
||||
return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// OrderBy adds ORDER BY expressions to the query.
|
||||
func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder {
|
||||
return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Limit sets a LIMIT clause on the query.
|
||||
func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder {
|
||||
return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Offset sets a OFFSET clause on the query.
|
||||
func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder {
|
||||
return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder)
|
||||
}
|
||||
|
||||
// Suffix adds an expression to the end of the query
|
||||
func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder {
|
||||
return b.SuffixExpr(Expr(sql, args...))
|
||||
}
|
||||
|
||||
// SuffixExpr adds an expression to the end of the query
|
||||
func (b UpdateBuilder) SuffixExpr(expr Sqlizer) UpdateBuilder {
|
||||
return builder.Append(b, "Suffixes", expr).(UpdateBuilder)
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
// +build go1.8
|
||||
|
||||
package squirrel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/lann/builder"
|
||||
)
|
||||
|
||||
func (d *updateData) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(ExecerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return ExecContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *updateData) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
if d.RunWith == nil {
|
||||
return nil, RunnerNotSet
|
||||
}
|
||||
ctxRunner, ok := d.RunWith.(QueryerContext)
|
||||
if !ok {
|
||||
return nil, NoContextSupport
|
||||
}
|
||||
return QueryContextWith(ctx, ctxRunner, d)
|
||||
}
|
||||
|
||||
func (d *updateData) QueryRowContext(ctx context.Context) RowScanner {
|
||||
if d.RunWith == nil {
|
||||
return &Row{err: RunnerNotSet}
|
||||
}
|
||||
queryRower, ok := d.RunWith.(QueryRowerContext)
|
||||
if !ok {
|
||||
if _, ok := d.RunWith.(QueryerContext); !ok {
|
||||
return &Row{err: RunnerNotQueryRunner}
|
||||
}
|
||||
return &Row{err: NoContextSupport}
|
||||
}
|
||||
return QueryRowContextWith(ctx, queryRower, d)
|
||||
}
|
||||
|
||||
// ExecContext builds and ExecContexts the query with the Runner set by RunWith.
|
||||
func (b UpdateBuilder) ExecContext(ctx context.Context) (sql.Result, error) {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.ExecContext(ctx)
|
||||
}
|
||||
|
||||
// QueryContext builds and QueryContexts the query with the Runner set by RunWith.
|
||||
func (b UpdateBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.QueryContext(ctx)
|
||||
}
|
||||
|
||||
// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith.
|
||||
func (b UpdateBuilder) QueryRowContext(ctx context.Context) RowScanner {
|
||||
data := builder.GetStruct(b).(updateData)
|
||||
return data.QueryRowContext(ctx)
|
||||
}
|
||||
|
||||
// ScanContext is a shortcut for QueryRowContext().Scan.
|
||||
func (b UpdateBuilder) ScanContext(ctx context.Context, dest ...interface{}) error {
|
||||
return b.QueryRowContext(ctx).Scan(dest...)
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package squirrel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type wherePart part
|
||||
|
||||
func newWherePart(pred interface{}, args ...interface{}) Sqlizer {
|
||||
return &wherePart{pred: pred, args: args}
|
||||
}
|
||||
|
||||
func (p wherePart) ToSql() (sql string, args []interface{}, err error) {
|
||||
switch pred := p.pred.(type) {
|
||||
case nil:
|
||||
// no-op
|
||||
case rawSqlizer:
|
||||
return pred.toSqlRaw()
|
||||
case Sqlizer:
|
||||
return pred.ToSql()
|
||||
case map[string]interface{}:
|
||||
return Eq(pred).ToSql()
|
||||
case string:
|
||||
sql = pred
|
||||
args = p.args
|
||||
default:
|
||||
err = fmt.Errorf("expected string-keyed map or string, not %T", pred)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,316 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,16 @@
|
|||
### go-simplejson
|
||||
|
||||
a Go package to interact with arbitrary JSON
|
||||
|
||||
[![Build Status](https://github.com/bitly/go-simplejson/actions/workflows/ci.yaml/badge.svg)](https://github.com/bitly/go-simplejson/actions)
|
||||
[![GoDoc](https://pkg.go.dev/badge/github.com/bitly/go-simplejson)](https://pkg.go.dev/github.com/bitly/go-simplejson)
|
||||
[![GitHub release](https://img.shields.io/github/release/bitly/go-simplejson.svg)](https://github.com/bitly/go-simplejson/releases/latest)
|
||||
|
||||
|
||||
### Importing
|
||||
|
||||
import github.com/bitly/go-simplejson
|
||||
|
||||
### Documentation
|
||||
|
||||
Visit the docs on [Go package discovery & docs](https://pkg.go.dev/github.com/bitly/go-simplejson)
|
|
@ -0,0 +1,458 @@
|
|||
package simplejson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
)
|
||||
|
||||
// returns the current implementation version
|
||||
func Version() string {
|
||||
return "0.5.1"
|
||||
}
|
||||
|
||||
type Json struct {
|
||||
data interface{}
|
||||
}
|
||||
|
||||
// NewJson returns a pointer to a new `Json` object
|
||||
// after unmarshaling `body` bytes
|
||||
func NewJson(body []byte) (*Json, error) {
|
||||
j := new(Json)
|
||||
err := j.UnmarshalJSON(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// New returns a pointer to a new, empty `Json` object
|
||||
func New() *Json {
|
||||
return &Json{
|
||||
data: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Interface returns the underlying data
|
||||
func (j *Json) Interface() interface{} {
|
||||
return j.data
|
||||
}
|
||||
|
||||
// Encode returns its marshaled data as `[]byte`
|
||||
func (j *Json) Encode() ([]byte, error) {
|
||||
return j.MarshalJSON()
|
||||
}
|
||||
|
||||
// EncodePretty returns its marshaled data as `[]byte` with indentation
|
||||
func (j *Json) EncodePretty() ([]byte, error) {
|
||||
return json.MarshalIndent(&j.data, "", " ")
|
||||
}
|
||||
|
||||
// Implements the json.Marshaler interface.
|
||||
func (j *Json) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&j.data)
|
||||
}
|
||||
|
||||
// Set modifies `Json` map by `key` and `value`
|
||||
// Useful for changing single key/value in a `Json` object easily.
|
||||
func (j *Json) Set(key string, val interface{}) {
|
||||
m, err := j.Map()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m[key] = val
|
||||
}
|
||||
|
||||
// SetPath modifies `Json`, recursively checking/creating map keys for the supplied path,
|
||||
// and then finally writing in the value
|
||||
func (j *Json) SetPath(branch []string, val interface{}) {
|
||||
if len(branch) == 0 {
|
||||
j.data = val
|
||||
return
|
||||
}
|
||||
|
||||
// in order to insert our branch, we need map[string]interface{}
|
||||
if _, ok := (j.data).(map[string]interface{}); !ok {
|
||||
// have to replace with something suitable
|
||||
j.data = make(map[string]interface{})
|
||||
}
|
||||
curr := j.data.(map[string]interface{})
|
||||
|
||||
for i := 0; i < len(branch)-1; i++ {
|
||||
b := branch[i]
|
||||
// key exists?
|
||||
if _, ok := curr[b]; !ok {
|
||||
n := make(map[string]interface{})
|
||||
curr[b] = n
|
||||
curr = n
|
||||
continue
|
||||
}
|
||||
|
||||
// make sure the value is the right sort of thing
|
||||
if _, ok := curr[b].(map[string]interface{}); !ok {
|
||||
// have to replace with something suitable
|
||||
n := make(map[string]interface{})
|
||||
curr[b] = n
|
||||
}
|
||||
|
||||
curr = curr[b].(map[string]interface{})
|
||||
}
|
||||
|
||||
// add remaining k/v
|
||||
curr[branch[len(branch)-1]] = val
|
||||
}
|
||||
|
||||
// Del modifies `Json` map by deleting `key` if it is present.
|
||||
func (j *Json) Del(key string) {
|
||||
m, err := j.Map()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delete(m, key)
|
||||
}
|
||||
|
||||
// Get returns a pointer to a new `Json` object
|
||||
// for `key` in its `map` representation
|
||||
//
|
||||
// useful for chaining operations (to traverse a nested JSON):
|
||||
//
|
||||
// js.Get("top_level").Get("dict").Get("value").Int()
|
||||
func (j *Json) Get(key string) *Json {
|
||||
m, err := j.Map()
|
||||
if err == nil {
|
||||
if val, ok := m[key]; ok {
|
||||
return &Json{val}
|
||||
}
|
||||
}
|
||||
return &Json{nil}
|
||||
}
|
||||
|
||||
// GetPath searches for the item as specified by the branch
|
||||
// without the need to deep dive using Get()'s.
|
||||
//
|
||||
// js.GetPath("top_level", "dict")
|
||||
func (j *Json) GetPath(branch ...string) *Json {
|
||||
jin := j
|
||||
for _, p := range branch {
|
||||
jin = jin.Get(p)
|
||||
}
|
||||
return jin
|
||||
}
|
||||
|
||||
// GetIndex returns a pointer to a new `Json` object
|
||||
// for `index` in its `array` representation
|
||||
//
|
||||
// this is the analog to Get when accessing elements of
|
||||
// a json array instead of a json object:
|
||||
//
|
||||
// js.Get("top_level").Get("array").GetIndex(1).Get("key").Int()
|
||||
func (j *Json) GetIndex(index int) *Json {
|
||||
a, err := j.Array()
|
||||
if err == nil {
|
||||
if len(a) > index {
|
||||
return &Json{a[index]}
|
||||
}
|
||||
}
|
||||
return &Json{nil}
|
||||
}
|
||||
|
||||
// CheckGet returns a pointer to a new `Json` object and
|
||||
// a `bool` identifying success or failure
|
||||
//
|
||||
// useful for chained operations when success is important:
|
||||
//
|
||||
// if data, ok := js.Get("top_level").CheckGet("inner"); ok {
|
||||
// log.Println(data)
|
||||
// }
|
||||
func (j *Json) CheckGet(key string) (*Json, bool) {
|
||||
m, err := j.Map()
|
||||
if err == nil {
|
||||
if val, ok := m[key]; ok {
|
||||
return &Json{val}, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Map type asserts to `map`
|
||||
func (j *Json) Map() (map[string]interface{}, error) {
|
||||
if m, ok := (j.data).(map[string]interface{}); ok {
|
||||
return m, nil
|
||||
}
|
||||
return nil, errors.New("type assertion to map[string]interface{} failed")
|
||||
}
|
||||
|
||||
// Array type asserts to an `array`
|
||||
func (j *Json) Array() ([]interface{}, error) {
|
||||
if a, ok := (j.data).([]interface{}); ok {
|
||||
return a, nil
|
||||
}
|
||||
return nil, errors.New("type assertion to []interface{} failed")
|
||||
}
|
||||
|
||||
// Bool type asserts to `bool`
|
||||
func (j *Json) Bool() (bool, error) {
|
||||
if s, ok := (j.data).(bool); ok {
|
||||
return s, nil
|
||||
}
|
||||
return false, errors.New("type assertion to bool failed")
|
||||
}
|
||||
|
||||
// String type asserts to `string`
|
||||
func (j *Json) String() (string, error) {
|
||||
if s, ok := (j.data).(string); ok {
|
||||
return s, nil
|
||||
}
|
||||
return "", errors.New("type assertion to string failed")
|
||||
}
|
||||
|
||||
// Bytes type asserts to `[]byte`
|
||||
func (j *Json) Bytes() ([]byte, error) {
|
||||
if s, ok := (j.data).(string); ok {
|
||||
return []byte(s), nil
|
||||
}
|
||||
return nil, errors.New("type assertion to []byte failed")
|
||||
}
|
||||
|
||||
// StringArray type asserts to an `array` of `string`
|
||||
func (j *Json) StringArray() ([]string, error) {
|
||||
arr, err := j.Array()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retArr := make([]string, 0, len(arr))
|
||||
for _, a := range arr {
|
||||
if a == nil {
|
||||
retArr = append(retArr, "")
|
||||
continue
|
||||
}
|
||||
s, ok := a.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("type assertion to []string failed")
|
||||
}
|
||||
retArr = append(retArr, s)
|
||||
}
|
||||
return retArr, nil
|
||||
}
|
||||
|
||||
// MustArray guarantees the return of a `[]interface{}` (with optional default)
|
||||
//
|
||||
// useful when you want to interate over array values in a succinct manner:
|
||||
//
|
||||
// for i, v := range js.Get("results").MustArray() {
|
||||
// fmt.Println(i, v)
|
||||
// }
|
||||
func (j *Json) MustArray(args ...[]interface{}) []interface{} {
|
||||
var def []interface{}
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustArray() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
a, err := j.Array()
|
||||
if err == nil {
|
||||
return a
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustMap guarantees the return of a `map[string]interface{}` (with optional default)
|
||||
//
|
||||
// useful when you want to interate over map values in a succinct manner:
|
||||
//
|
||||
// for k, v := range js.Get("dictionary").MustMap() {
|
||||
// fmt.Println(k, v)
|
||||
// }
|
||||
func (j *Json) MustMap(args ...map[string]interface{}) map[string]interface{} {
|
||||
var def map[string]interface{}
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustMap() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
a, err := j.Map()
|
||||
if err == nil {
|
||||
return a
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustString guarantees the return of a `string` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want a `string` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustString(), js.Get("optional_param").MustString("my_default"))
|
||||
func (j *Json) MustString(args ...string) string {
|
||||
var def string
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustString() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
s, err := j.String()
|
||||
if err == nil {
|
||||
return s
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustStringArray guarantees the return of a `[]string` (with optional default)
|
||||
//
|
||||
// useful when you want to interate over array values in a succinct manner:
|
||||
//
|
||||
// for i, s := range js.Get("results").MustStringArray() {
|
||||
// fmt.Println(i, s)
|
||||
// }
|
||||
func (j *Json) MustStringArray(args ...[]string) []string {
|
||||
var def []string
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustStringArray() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
a, err := j.StringArray()
|
||||
if err == nil {
|
||||
return a
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustInt guarantees the return of an `int` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want an `int` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustInt(), js.Get("optional_param").MustInt(5150))
|
||||
func (j *Json) MustInt(args ...int) int {
|
||||
var def int
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustInt() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
i, err := j.Int()
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustFloat64 guarantees the return of a `float64` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want a `float64` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustFloat64(), js.Get("optional_param").MustFloat64(5.150))
|
||||
func (j *Json) MustFloat64(args ...float64) float64 {
|
||||
var def float64
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustFloat64() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
f, err := j.Float64()
|
||||
if err == nil {
|
||||
return f
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustBool guarantees the return of a `bool` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want a `bool` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustBool(), js.Get("optional_param").MustBool(true))
|
||||
func (j *Json) MustBool(args ...bool) bool {
|
||||
var def bool
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustBool() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
b, err := j.Bool()
|
||||
if err == nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustInt64 guarantees the return of an `int64` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want an `int64` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustInt64(), js.Get("optional_param").MustInt64(5150))
|
||||
func (j *Json) MustInt64(args ...int64) int64 {
|
||||
var def int64
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustInt64() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
i, err := j.Int64()
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
||||
|
||||
// MustUInt64 guarantees the return of an `uint64` (with optional default)
|
||||
//
|
||||
// useful when you explicitly want an `uint64` in a single value return context:
|
||||
//
|
||||
// myFunc(js.Get("param1").MustUint64(), js.Get("optional_param").MustUint64(5150))
|
||||
func (j *Json) MustUint64(args ...uint64) uint64 {
|
||||
var def uint64
|
||||
|
||||
switch len(args) {
|
||||
case 0:
|
||||
case 1:
|
||||
def = args[0]
|
||||
default:
|
||||
log.Panicf("MustUint64() received too many arguments %d", len(args))
|
||||
}
|
||||
|
||||
i, err := j.Uint64()
|
||||
if err == nil {
|
||||
return i
|
||||
}
|
||||
|
||||
return def
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
package simplejson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Implements the json.Unmarshaler interface.
|
||||
func (j *Json) UnmarshalJSON(p []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewBuffer(p))
|
||||
dec.UseNumber()
|
||||
return dec.Decode(&j.data)
|
||||
}
|
||||
|
||||
// NewFromReader returns a *Json by decoding from an io.Reader
|
||||
func NewFromReader(r io.Reader) (*Json, error) {
|
||||
j := new(Json)
|
||||
dec := json.NewDecoder(r)
|
||||
dec.UseNumber()
|
||||
err := dec.Decode(&j.data)
|
||||
return j, err
|
||||
}
|
||||
|
||||
// Float64 coerces into a float64
|
||||
func (j *Json) Float64() (float64, error) {
|
||||
switch j.data.(type) {
|
||||
case json.Number:
|
||||
return j.data.(json.Number).Float64()
|
||||
case float32, float64:
|
||||
return reflect.ValueOf(j.data).Float(), nil
|
||||
case int, int8, int16, int32, int64:
|
||||
return float64(reflect.ValueOf(j.data).Int()), nil
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
return float64(reflect.ValueOf(j.data).Uint()), nil
|
||||
}
|
||||
return 0, errors.New("invalid value type")
|
||||
}
|
||||
|
||||
// Int coerces into an int
|
||||
func (j *Json) Int() (int, error) {
|
||||
switch j.data.(type) {
|
||||
case json.Number:
|
||||
i, err := j.data.(json.Number).Int64()
|
||||
return int(i), err
|
||||
case float32, float64:
|
||||
return int(reflect.ValueOf(j.data).Float()), nil
|
||||
case int, int8, int16, int32, int64:
|
||||
return int(reflect.ValueOf(j.data).Int()), nil
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
return int(reflect.ValueOf(j.data).Uint()), nil
|
||||
}
|
||||
return 0, errors.New("invalid value type")
|
||||
}
|
||||
|
||||
// Int64 coerces into an int64
|
||||
func (j *Json) Int64() (int64, error) {
|
||||
switch j.data.(type) {
|
||||
case json.Number:
|
||||
return j.data.(json.Number).Int64()
|
||||
case float32, float64:
|
||||
return int64(reflect.ValueOf(j.data).Float()), nil
|
||||
case int, int8, int16, int32, int64:
|
||||
return reflect.ValueOf(j.data).Int(), nil
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
return int64(reflect.ValueOf(j.data).Uint()), nil
|
||||
}
|
||||
return 0, errors.New("invalid value type")
|
||||
}
|
||||
|
||||
// Uint64 coerces into an uint64
|
||||
func (j *Json) Uint64() (uint64, error) {
|
||||
switch j.data.(type) {
|
||||
case json.Number:
|
||||
return strconv.ParseUint(j.data.(json.Number).String(), 10, 64)
|
||||
case float32, float64:
|
||||
return uint64(reflect.ValueOf(j.data).Float()), nil
|
||||
case int, int8, int16, int32, int64:
|
||||
return uint64(reflect.ValueOf(j.data).Int()), nil
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
return reflect.ValueOf(j.data).Uint(), nil
|
||||
}
|
||||
return 0, errors.New("invalid value type")
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
install:
|
||||
- go get -v .
|
||||
- go get -v golang.org/x/lint/golint
|
||||
script:
|
||||
- diff <(gofmt -d .) <(echo -n)
|
||||
- go vet -x ./...
|
||||
- golint -set_exit_status ./...
|
||||
- go test -v -race ./...
|
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2016, Bruce
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,143 @@
|
|||
snowflake
|
||||
====
|
||||
[![GoDoc](https://godoc.org/github.com/bwmarrin/snowflake?status.svg)](https://godoc.org/github.com/bwmarrin/snowflake) [![Go report](http://goreportcard.com/badge/bwmarrin/snowflake)](http://goreportcard.com/report/bwmarrin/snowflake) [![Coverage](http://gocover.io/_badge/github.com/bwmarrin/snowflake)](https://gocover.io/github.com/bwmarrin/snowflake) [![Build Status](https://travis-ci.org/bwmarrin/snowflake.svg?branch=master)](https://travis-ci.org/bwmarrin/snowflake) [![Discord Gophers](https://img.shields.io/badge/Discord%20Gophers-%23info-blue.svg)](https://discord.gg/0f1SbxBZjYq9jLBk)
|
||||
|
||||
snowflake is a [Go](https://golang.org/) package that provides
|
||||
* A very simple Twitter snowflake generator.
|
||||
* Methods to parse existing snowflake IDs.
|
||||
* Methods to convert a snowflake ID into several other data types and back.
|
||||
* JSON Marshal/Unmarshal functions to easily use snowflake IDs within a JSON API.
|
||||
* Monotonic Clock calculations protect from clock drift.
|
||||
|
||||
**For help with this package or general Go discussion, please join the [Discord
|
||||
Gophers](https://discord.gg/0f1SbxBZjYq9jLBk) chat server.**
|
||||
|
||||
## Status
|
||||
This package should be considered stable and completed. Any additions in the
|
||||
future will strongly avoid API changes to existing functions.
|
||||
|
||||
### ID Format
|
||||
By default, the ID format follows the original Twitter snowflake format.
|
||||
* The ID as a whole is a 63 bit integer stored in an int64
|
||||
* 41 bits are used to store a timestamp with millisecond precision, using a custom epoch.
|
||||
* 10 bits are used to store a node id - a range from 0 through 1023.
|
||||
* 12 bits are used to store a sequence number - a range from 0 through 4095.
|
||||
|
||||
### Custom Format
|
||||
You can alter the number of bits used for the node id and step number (sequence)
|
||||
by setting the snowflake.NodeBits and snowflake.StepBits values. Remember that
|
||||
There is a maximum of 22 bits available that can be shared between these two
|
||||
values. You do not have to use all 22 bits.
|
||||
|
||||
### Custom Epoch
|
||||
By default this package uses the Twitter Epoch of 1288834974657 or Nov 04 2010 01:42:54.
|
||||
You can set your own epoch value by setting snowflake.Epoch to a time in milliseconds
|
||||
to use as the epoch.
|
||||
|
||||
### Custom Notes
|
||||
When setting custom epoch or bit values you need to set them prior to calling
|
||||
any functions on the snowflake package, including NewNode(). Otherwise the
|
||||
custom values you set will not be applied correctly.
|
||||
|
||||
### How it Works.
|
||||
Each time you generate an ID, it works, like this.
|
||||
* A timestamp with millisecond precision is stored using 41 bits of the ID.
|
||||
* Then the NodeID is added in subsequent bits.
|
||||
* Then the Sequence Number is added, starting at 0 and incrementing for each ID generated in the same millisecond. If you generate enough IDs in the same millisecond that the sequence would roll over or overfill then the generate function will pause until the next millisecond.
|
||||
|
||||
The default Twitter format shown below.
|
||||
```
|
||||
+--------------------------------------------------------------------------+
|
||||
| 1 Bit Unused | 41 Bit Timestamp | 10 Bit NodeID | 12 Bit Sequence ID |
|
||||
+--------------------------------------------------------------------------+
|
||||
```
|
||||
|
||||
Using the default settings, this allows for 4096 unique IDs to be generated every millisecond, per Node ID.
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
This assumes you already have a working Go environment, if not please see
|
||||
[this page](https://golang.org/doc/install) first.
|
||||
|
||||
```sh
|
||||
go get github.com/bwmarrin/snowflake
|
||||
```
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
Import the package into your project then construct a new snowflake Node using a
|
||||
unique node number. The default settings permit a node number range from 0 to 1023.
|
||||
If you have set a custom NodeBits value, you will need to calculate what your
|
||||
node number range will be. With the node object call the Generate() method to
|
||||
generate and return a unique snowflake ID.
|
||||
|
||||
Keep in mind that each node you create must have a unique node number, even
|
||||
across multiple servers. If you do not keep node numbers unique the generator
|
||||
cannot guarantee unique IDs across all nodes.
|
||||
|
||||
|
||||
**Example Program:**
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/bwmarrin/snowflake"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// Create a new Node with a Node number of 1
|
||||
node, err := snowflake.NewNode(1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a snowflake ID.
|
||||
id := node.Generate()
|
||||
|
||||
// Print out the ID in a few different ways.
|
||||
fmt.Printf("Int64 ID: %d\n", id)
|
||||
fmt.Printf("String ID: %s\n", id)
|
||||
fmt.Printf("Base2 ID: %s\n", id.Base2())
|
||||
fmt.Printf("Base64 ID: %s\n", id.Base64())
|
||||
|
||||
// Print out the ID's timestamp
|
||||
fmt.Printf("ID Time : %d\n", id.Time())
|
||||
|
||||
// Print out the ID's node number
|
||||
fmt.Printf("ID Node : %d\n", id.Node())
|
||||
|
||||
// Print out the ID's sequence number
|
||||
fmt.Printf("ID Step : %d\n", id.Step())
|
||||
|
||||
// Generate and print, all in one.
|
||||
fmt.Printf("ID : %d\n", node.Generate().Int64())
|
||||
}
|
||||
```
|
||||
|
||||
### Performance
|
||||
|
||||
With default settings, this snowflake generator should be sufficiently fast
|
||||
enough on most systems to generate 4096 unique ID's per millisecond. This is
|
||||
the maximum that the snowflake ID format supports. That is, around 243-244
|
||||
nanoseconds per operation.
|
||||
|
||||
Since the snowflake generator is single threaded the primary limitation will be
|
||||
the maximum speed of a single processor on your system.
|
||||
|
||||
To benchmark the generator on your system run the following command inside the
|
||||
snowflake package directory.
|
||||
|
||||
```sh
|
||||
go test -run=^$ -bench=.
|
||||
```
|
||||
|
||||
If your curious, check out this commit that shows benchmarks that compare a few
|
||||
different ways of implementing a snowflake generator in Go.
|
||||
* https://github.com/bwmarrin/snowflake/tree/9befef8908df13f4102ed21f42b083dd862b5036
|
|
@ -0,0 +1,365 @@
|
|||
// Package snowflake provides a very simple Twitter snowflake generator and parser.
|
||||
package snowflake
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// Epoch is set to the twitter snowflake epoch of Nov 04 2010 01:42:54 UTC in milliseconds
|
||||
// You may customize this to set a different epoch for your application.
|
||||
Epoch int64 = 1288834974657
|
||||
|
||||
// NodeBits holds the number of bits to use for Node
|
||||
// Remember, you have a total 22 bits to share between Node/Step
|
||||
NodeBits uint8 = 10
|
||||
|
||||
// StepBits holds the number of bits to use for Step
|
||||
// Remember, you have a total 22 bits to share between Node/Step
|
||||
StepBits uint8 = 12
|
||||
|
||||
// DEPRECATED: the below four variables will be removed in a future release.
|
||||
mu sync.Mutex
|
||||
nodeMax int64 = -1 ^ (-1 << NodeBits)
|
||||
nodeMask = nodeMax << StepBits
|
||||
stepMask int64 = -1 ^ (-1 << StepBits)
|
||||
timeShift = NodeBits + StepBits
|
||||
nodeShift = StepBits
|
||||
)
|
||||
|
||||
const encodeBase32Map = "ybndrfg8ejkmcpqxot1uwisza345h769"
|
||||
|
||||
var decodeBase32Map [256]byte
|
||||
|
||||
const encodeBase58Map = "123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
|
||||
|
||||
var decodeBase58Map [256]byte
|
||||
|
||||
// A JSONSyntaxError is returned from UnmarshalJSON if an invalid ID is provided.
|
||||
type JSONSyntaxError struct{ original []byte }
|
||||
|
||||
func (j JSONSyntaxError) Error() string {
|
||||
return fmt.Sprintf("invalid snowflake ID %q", string(j.original))
|
||||
}
|
||||
|
||||
// ErrInvalidBase58 is returned by ParseBase58 when given an invalid []byte
|
||||
var ErrInvalidBase58 = errors.New("invalid base58")
|
||||
|
||||
// ErrInvalidBase32 is returned by ParseBase32 when given an invalid []byte
|
||||
var ErrInvalidBase32 = errors.New("invalid base32")
|
||||
|
||||
// Create maps for decoding Base58/Base32.
|
||||
// This speeds up the process tremendously.
|
||||
func init() {
|
||||
|
||||
for i := 0; i < len(encodeBase58Map); i++ {
|
||||
decodeBase58Map[i] = 0xFF
|
||||
}
|
||||
|
||||
for i := 0; i < len(encodeBase58Map); i++ {
|
||||
decodeBase58Map[encodeBase58Map[i]] = byte(i)
|
||||
}
|
||||
|
||||
for i := 0; i < len(encodeBase32Map); i++ {
|
||||
decodeBase32Map[i] = 0xFF
|
||||
}
|
||||
|
||||
for i := 0; i < len(encodeBase32Map); i++ {
|
||||
decodeBase32Map[encodeBase32Map[i]] = byte(i)
|
||||
}
|
||||
}
|
||||
|
||||
// A Node struct holds the basic information needed for a snowflake generator
|
||||
// node
|
||||
type Node struct {
|
||||
mu sync.Mutex
|
||||
epoch time.Time
|
||||
time int64
|
||||
node int64
|
||||
step int64
|
||||
|
||||
nodeMax int64
|
||||
nodeMask int64
|
||||
stepMask int64
|
||||
timeShift uint8
|
||||
nodeShift uint8
|
||||
}
|
||||
|
||||
// An ID is a custom type used for a snowflake ID. This is used so we can
|
||||
// attach methods onto the ID.
|
||||
type ID int64
|
||||
|
||||
// NewNode returns a new snowflake node that can be used to generate snowflake
|
||||
// IDs
|
||||
func NewNode(node int64) (*Node, error) {
|
||||
|
||||
// re-calc in case custom NodeBits or StepBits were set
|
||||
// DEPRECATED: the below block will be removed in a future release.
|
||||
mu.Lock()
|
||||
nodeMax = -1 ^ (-1 << NodeBits)
|
||||
nodeMask = nodeMax << StepBits
|
||||
stepMask = -1 ^ (-1 << StepBits)
|
||||
timeShift = NodeBits + StepBits
|
||||
nodeShift = StepBits
|
||||
mu.Unlock()
|
||||
|
||||
n := Node{}
|
||||
n.node = node
|
||||
n.nodeMax = -1 ^ (-1 << NodeBits)
|
||||
n.nodeMask = n.nodeMax << StepBits
|
||||
n.stepMask = -1 ^ (-1 << StepBits)
|
||||
n.timeShift = NodeBits + StepBits
|
||||
n.nodeShift = StepBits
|
||||
|
||||
if n.node < 0 || n.node > n.nodeMax {
|
||||
return nil, errors.New("Node number must be between 0 and " + strconv.FormatInt(n.nodeMax, 10))
|
||||
}
|
||||
|
||||
var curTime = time.Now()
|
||||
// add time.Duration to curTime to make sure we use the monotonic clock if available
|
||||
n.epoch = curTime.Add(time.Unix(Epoch/1000, (Epoch%1000)*1000000).Sub(curTime))
|
||||
|
||||
return &n, nil
|
||||
}
|
||||
|
||||
// Generate creates and returns a unique snowflake ID
|
||||
// To help guarantee uniqueness
|
||||
// - Make sure your system is keeping accurate system time
|
||||
// - Make sure you never have multiple nodes running with the same node ID
|
||||
func (n *Node) Generate() ID {
|
||||
|
||||
n.mu.Lock()
|
||||
|
||||
now := time.Since(n.epoch).Nanoseconds() / 1000000
|
||||
|
||||
if now == n.time {
|
||||
n.step = (n.step + 1) & n.stepMask
|
||||
|
||||
if n.step == 0 {
|
||||
for now <= n.time {
|
||||
now = time.Since(n.epoch).Nanoseconds() / 1000000
|
||||
}
|
||||
}
|
||||
} else {
|
||||
n.step = 0
|
||||
}
|
||||
|
||||
n.time = now
|
||||
|
||||
r := ID((now)<<n.timeShift |
|
||||
(n.node << n.nodeShift) |
|
||||
(n.step),
|
||||
)
|
||||
|
||||
n.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
// Int64 returns an int64 of the snowflake ID
|
||||
func (f ID) Int64() int64 {
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
// ParseInt64 converts an int64 into a snowflake ID
|
||||
func ParseInt64(id int64) ID {
|
||||
return ID(id)
|
||||
}
|
||||
|
||||
// String returns a string of the snowflake ID
|
||||
func (f ID) String() string {
|
||||
return strconv.FormatInt(int64(f), 10)
|
||||
}
|
||||
|
||||
// ParseString converts a string into a snowflake ID
|
||||
func ParseString(id string) (ID, error) {
|
||||
i, err := strconv.ParseInt(id, 10, 64)
|
||||
return ID(i), err
|
||||
|
||||
}
|
||||
|
||||
// Base2 returns a string base2 of the snowflake ID
|
||||
func (f ID) Base2() string {
|
||||
return strconv.FormatInt(int64(f), 2)
|
||||
}
|
||||
|
||||
// ParseBase2 converts a Base2 string into a snowflake ID
|
||||
func ParseBase2(id string) (ID, error) {
|
||||
i, err := strconv.ParseInt(id, 2, 64)
|
||||
return ID(i), err
|
||||
}
|
||||
|
||||
// Base32 uses the z-base-32 character set but encodes and decodes similar
|
||||
// to base58, allowing it to create an even smaller result string.
|
||||
// NOTE: There are many different base32 implementations so becareful when
|
||||
// doing any interoperation.
|
||||
func (f ID) Base32() string {
|
||||
|
||||
if f < 32 {
|
||||
return string(encodeBase32Map[f])
|
||||
}
|
||||
|
||||
b := make([]byte, 0, 12)
|
||||
for f >= 32 {
|
||||
b = append(b, encodeBase32Map[f%32])
|
||||
f /= 32
|
||||
}
|
||||
b = append(b, encodeBase32Map[f])
|
||||
|
||||
for x, y := 0, len(b)-1; x < y; x, y = x+1, y-1 {
|
||||
b[x], b[y] = b[y], b[x]
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// ParseBase32 parses a base32 []byte into a snowflake ID
|
||||
// NOTE: There are many different base32 implementations so becareful when
|
||||
// doing any interoperation.
|
||||
func ParseBase32(b []byte) (ID, error) {
|
||||
|
||||
var id int64
|
||||
|
||||
for i := range b {
|
||||
if decodeBase32Map[b[i]] == 0xFF {
|
||||
return -1, ErrInvalidBase32
|
||||
}
|
||||
id = id*32 + int64(decodeBase32Map[b[i]])
|
||||
}
|
||||
|
||||
return ID(id), nil
|
||||
}
|
||||
|
||||
// Base36 returns a base36 string of the snowflake ID
|
||||
func (f ID) Base36() string {
|
||||
return strconv.FormatInt(int64(f), 36)
|
||||
}
|
||||
|
||||
// ParseBase36 converts a Base36 string into a snowflake ID
|
||||
func ParseBase36(id string) (ID, error) {
|
||||
i, err := strconv.ParseInt(id, 36, 64)
|
||||
return ID(i), err
|
||||
}
|
||||
|
||||
// Base58 returns a base58 string of the snowflake ID
|
||||
func (f ID) Base58() string {
|
||||
|
||||
if f < 58 {
|
||||
return string(encodeBase58Map[f])
|
||||
}
|
||||
|
||||
b := make([]byte, 0, 11)
|
||||
for f >= 58 {
|
||||
b = append(b, encodeBase58Map[f%58])
|
||||
f /= 58
|
||||
}
|
||||
b = append(b, encodeBase58Map[f])
|
||||
|
||||
for x, y := 0, len(b)-1; x < y; x, y = x+1, y-1 {
|
||||
b[x], b[y] = b[y], b[x]
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// ParseBase58 parses a base58 []byte into a snowflake ID
|
||||
func ParseBase58(b []byte) (ID, error) {
|
||||
|
||||
var id int64
|
||||
|
||||
for i := range b {
|
||||
if decodeBase58Map[b[i]] == 0xFF {
|
||||
return -1, ErrInvalidBase58
|
||||
}
|
||||
id = id*58 + int64(decodeBase58Map[b[i]])
|
||||
}
|
||||
|
||||
return ID(id), nil
|
||||
}
|
||||
|
||||
// Base64 returns a base64 string of the snowflake ID
|
||||
func (f ID) Base64() string {
|
||||
return base64.StdEncoding.EncodeToString(f.Bytes())
|
||||
}
|
||||
|
||||
// ParseBase64 converts a base64 string into a snowflake ID
|
||||
func ParseBase64(id string) (ID, error) {
|
||||
b, err := base64.StdEncoding.DecodeString(id)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return ParseBytes(b)
|
||||
|
||||
}
|
||||
|
||||
// Bytes returns a byte slice of the snowflake ID
|
||||
func (f ID) Bytes() []byte {
|
||||
return []byte(f.String())
|
||||
}
|
||||
|
||||
// ParseBytes converts a byte slice into a snowflake ID
|
||||
func ParseBytes(id []byte) (ID, error) {
|
||||
i, err := strconv.ParseInt(string(id), 10, 64)
|
||||
return ID(i), err
|
||||
}
|
||||
|
||||
// IntBytes returns an array of bytes of the snowflake ID, encoded as a
|
||||
// big endian integer.
|
||||
func (f ID) IntBytes() [8]byte {
|
||||
var b [8]byte
|
||||
binary.BigEndian.PutUint64(b[:], uint64(f))
|
||||
return b
|
||||
}
|
||||
|
||||
// ParseIntBytes converts an array of bytes encoded as big endian integer as
|
||||
// a snowflake ID
|
||||
func ParseIntBytes(id [8]byte) ID {
|
||||
return ID(int64(binary.BigEndian.Uint64(id[:])))
|
||||
}
|
||||
|
||||
// Time returns an int64 unix timestamp in milliseconds of the snowflake ID time
|
||||
// DEPRECATED: the below function will be removed in a future release.
|
||||
func (f ID) Time() int64 {
|
||||
return (int64(f) >> timeShift) + Epoch
|
||||
}
|
||||
|
||||
// Node returns an int64 of the snowflake ID node number
|
||||
// DEPRECATED: the below function will be removed in a future release.
|
||||
func (f ID) Node() int64 {
|
||||
return int64(f) & nodeMask >> nodeShift
|
||||
}
|
||||
|
||||
// Step returns an int64 of the snowflake step (or sequence) number
|
||||
// DEPRECATED: the below function will be removed in a future release.
|
||||
func (f ID) Step() int64 {
|
||||
return int64(f) & stepMask
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json byte array string of the snowflake ID.
|
||||
func (f ID) MarshalJSON() ([]byte, error) {
|
||||
buff := make([]byte, 0, 22)
|
||||
buff = append(buff, '"')
|
||||
buff = strconv.AppendInt(buff, int64(f), 10)
|
||||
buff = append(buff, '"')
|
||||
return buff, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts a json byte array of a snowflake ID into an ID type.
|
||||
func (f *ID) UnmarshalJSON(b []byte) error {
|
||||
if len(b) < 3 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return JSONSyntaxError{b}
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(b[1:len(b)-1]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*f = ID(i)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2021 Carl Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,312 @@
|
|||
# Requests [![GoDoc](https://godoc.org/github.com/carlmjohnson/requests?status.svg)](https://godoc.org/github.com/carlmjohnson/requests) [![Go Report Card](https://goreportcard.com/badge/github.com/carlmjohnson/requests)](https://goreportcard.com/report/github.com/carlmjohnson/requests) [![Coverage Status](https://coveralls.io/repos/github/carlmjohnson/requests/badge.svg)](https://coveralls.io/github/carlmjohnson/requests) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
|
||||
|
||||
![Requests logo](/img/gopher-web.png)
|
||||
|
||||
## _HTTP requests for Gophers._
|
||||
|
||||
**The problem**: Go's net/http is powerful and versatile, but using it correctly for client requests can be extremely verbose.
|
||||
|
||||
**The solution**: The requests.Builder type is a convenient way to build, send, and handle HTTP requests. Builder has a fluent API with methods returning a pointer to the same struct, which allows for declaratively describing a request by method chaining.
|
||||
|
||||
Requests also comes with tools for building custom http transports, include a request recorder and replayer for testing.
|
||||
|
||||
## Features
|
||||
|
||||
- Simplifies HTTP client usage compared to net/http
|
||||
- Can't forget to close response body
|
||||
- Checks status codes by default
|
||||
- Supports context.Context
|
||||
- JSON serialization and deserialization helpers
|
||||
- Easily manipulate URLs and query parameters
|
||||
- Request recording and replaying for tests
|
||||
- Customizable transports and validators that are compatible with the standard library and third party libraries
|
||||
- No third party dependencies
|
||||
- Good test coverage
|
||||
|
||||
## Examples
|
||||
### Simple GET into a string
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th><strong>code with net/http</strong></th>
|
||||
<th><strong>code with requests</strong></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
req, err := http.NewRequestWithContext(ctx,
|
||||
http.MethodGet, "http://example.com", nil)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
defer res.Body.Close()
|
||||
b, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
s := string(b)
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
var s string
|
||||
err := requests.
|
||||
URL("http://example.com").
|
||||
ToString(&s).
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr><td>11+ lines</td><td>5 lines</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
|
||||
### POST a raw body
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th><strong>code with net/http</strong></th>
|
||||
<th><strong>code with requests</strong></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
body := bytes.NewReader(([]byte(`hello, world`))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
"https://postman-echo.com/post", body)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
defer res.Body.Close()
|
||||
_, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
err := requests.
|
||||
URL("https://postman-echo.com/post").
|
||||
BodyBytes([]byte(`hello, world`)).
|
||||
ContentType("text/plain").
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr><td>12+ lines</td><td>5 lines</td></tr></tbody></table>
|
||||
|
||||
### GET a JSON object
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th><strong>code with net/http</strong></th>
|
||||
<th><strong>code with requests</strong></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
var post placeholder
|
||||
u, err := url.Parse("https://jsonplaceholder.typicode.com")
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
u.Path = fmt.Sprintf("/posts/%d", 1)
|
||||
req, err := http.NewRequestWithContext(ctx,
|
||||
http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
defer res.Body.Close()
|
||||
b, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
err := json.Unmarshal(b, &post)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
</td><td>
|
||||
|
||||
```go
|
||||
var post placeholder
|
||||
err := requests.
|
||||
URL("https://jsonplaceholder.typicode.com").
|
||||
Pathf("/posts/%d", 1).
|
||||
ToJSON(&post).
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
<tr><td>18+ lines</td><td>7 lines</td></tr></tbody></table>
|
||||
|
||||
### POST a JSON object and parse the response
|
||||
|
||||
```go
|
||||
var res placeholder
|
||||
req := placeholder{
|
||||
Title: "foo",
|
||||
Body: "baz",
|
||||
UserID: 1,
|
||||
}
|
||||
err := requests.
|
||||
URL("/posts").
|
||||
Host("jsonplaceholder.typicode.com").
|
||||
BodyJSON(&req).
|
||||
ToJSON(&res).
|
||||
Fetch(ctx)
|
||||
// net/http equivalent left as an exercise for the reader
|
||||
```
|
||||
|
||||
### Set custom headers for a request
|
||||
|
||||
```go
|
||||
// Set headers
|
||||
var headers postman
|
||||
err := requests.
|
||||
URL("https://postman-echo.com/get").
|
||||
UserAgent("bond/james-bond").
|
||||
ContentType("secret").
|
||||
Header("martini", "shaken").
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
### Easily manipulate URLs and query parameters
|
||||
|
||||
```go
|
||||
u, err := requests.
|
||||
URL("https://prod.example.com/get?a=1&b=2").
|
||||
Hostf("%s.example.com", "dev1").
|
||||
Param("b", "3").
|
||||
ParamInt("c", 4).
|
||||
URL()
|
||||
if err != nil { /* ... */ }
|
||||
fmt.Println(u.String()) // https://dev1.example.com/get?a=1&b=3&c=4
|
||||
```
|
||||
|
||||
### Record and replay responses
|
||||
|
||||
```go
|
||||
// record a request to the file system
|
||||
var s1, s2 string
|
||||
err := requests.URL("http://example.com").
|
||||
Transport(requests.Record(nil, "somedir")).
|
||||
ToString(&s1).
|
||||
Fetch(ctx)
|
||||
check(err)
|
||||
|
||||
// now replay the request in tests
|
||||
err = requests.URL("http://example.com").
|
||||
Transport(requests.Replay("somedir")).
|
||||
ToString(&s2).
|
||||
Fetch(ctx)
|
||||
check(err)
|
||||
assert(s1 == s2) // true
|
||||
```
|
||||
|
||||
## FAQs
|
||||
|
||||
[See wiki](https://github.com/carlmjohnson/requests/wiki) for more details.
|
||||
|
||||
### Why not just use the standard library HTTP client?
|
||||
|
||||
Brad Fitzpatrick, long time maintainer of the net/http package, [wrote an extensive list of problems with the standard library HTTP client](https://github.com/bradfitz/exp-httpclient/blob/master/problems.md). His four main points (ignoring issues that can't be resolved by a wrapper around the standard library) are:
|
||||
|
||||
> - Too easy to not call Response.Body.Close.
|
||||
> - Too easy to not check return status codes
|
||||
> - Context support is oddly bolted on
|
||||
> - Proper usage is too many lines of boilerplate
|
||||
|
||||
Requests solves these issues by always closing the response body, checking status codes by default, always requiring a `context.Context`, and simplifying the boilerplate with a descriptive UI based on fluent method chaining.
|
||||
|
||||
### Why requests and not some other helper library?
|
||||
|
||||
There are two major flaws in other libraries as I see it. One is that in other libraries support for `context.Context` tends to be bolted on if it exists at all. Two, many hide the underlying `http.Client` in such a way that it is difficult or impossible to replace or mock out. Beyond that, I believe that none have acheived the same core simplicity that the requests library has.
|
||||
|
||||
### How do I just get some JSON?
|
||||
|
||||
```go
|
||||
var data SomeDataType
|
||||
err := requests.
|
||||
URL("https://example.com/my-json").
|
||||
ToJSON(&data).
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
### How do I post JSON and read the response JSON?
|
||||
|
||||
```go
|
||||
body := MyRequestType{}
|
||||
var resp MyResponseType
|
||||
err := requests.
|
||||
URL("https://example.com/my-json").
|
||||
BodyJSON(&body).
|
||||
ToJSON(&resp).
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
### How do I just save a file to disk?
|
||||
|
||||
It depends on exactly what you need in terms of file atomicity and buffering, but this will work for most cases:
|
||||
|
||||
```go
|
||||
err := requests.
|
||||
URL("http://example.com").
|
||||
ToFile("myfile.txt").
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
For more advanced use case, use `ToWriter`.
|
||||
|
||||
### How do I save a response to a string?
|
||||
|
||||
```go
|
||||
var s string
|
||||
err := requests.
|
||||
URL("http://example.com").
|
||||
ToString(&s).
|
||||
Fetch(ctx)
|
||||
```
|
||||
|
||||
### How do I validate the response status?
|
||||
|
||||
By default, if no other validators are added to a builder, requests will check that the response is in the 2XX range. If you add another validator, you can add `builder.CheckStatus(200)` or `builder.AddValidator(requests.DefaultValidator)` to the validation stack.
|
||||
|
||||
To disable all response validation, run `builder.AddValidator(nil)`.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please [create a discussion](https://github.com/carlmjohnson/requests/discussions) before submitting a pull request for a new feature.
|
|
@ -0,0 +1,9 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the most recently tagged version of this repository is supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email me@carlmjohnson.net with any security concerns or vulnerabilities.
|
|
@ -0,0 +1,77 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BodyGetter provides a Builder with a source for a request body.
|
||||
type BodyGetter = func() (io.ReadCloser, error)
|
||||
|
||||
// BodyReader is a BodyGetter that returns an io.Reader.
|
||||
func BodyReader(r io.Reader) BodyGetter {
|
||||
return func() (io.ReadCloser, error) {
|
||||
if rc, ok := r.(io.ReadCloser); ok {
|
||||
return rc, nil
|
||||
}
|
||||
return rc(r), nil
|
||||
}
|
||||
}
|
||||
|
||||
// BodyWriter is a BodyGetter that pipes writes into a request body.
|
||||
func BodyWriter(f func(w io.Writer) error) BodyGetter {
|
||||
return func() (io.ReadCloser, error) {
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
var err error
|
||||
defer func() {
|
||||
w.CloseWithError(err)
|
||||
}()
|
||||
err = f(w)
|
||||
}()
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
// BodyBytes is a BodyGetter that returns the provided raw bytes.
|
||||
func BodyBytes(b []byte) BodyGetter {
|
||||
return func() (io.ReadCloser, error) {
|
||||
return rc(bytes.NewReader(b)), nil
|
||||
}
|
||||
}
|
||||
|
||||
// BodySerializer is a BodyGetter
|
||||
// that uses the provided [Serializer]
|
||||
// to build the body of a request from v.
|
||||
func BodySerializer(s Serializer, v any) BodyGetter {
|
||||
return func() (io.ReadCloser, error) {
|
||||
b, err := s(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc(bytes.NewReader(b)), nil
|
||||
}
|
||||
}
|
||||
|
||||
// BodyJSON is a [BodySerializer]
|
||||
// that uses [JSONSerializer] to marshal the object.
|
||||
func BodyJSON(v any) BodyGetter {
|
||||
return BodySerializer(JSONSerializer, v)
|
||||
}
|
||||
|
||||
// BodyForm is a BodyGetter that builds an encoded form body.
|
||||
func BodyForm(data url.Values) BodyGetter {
|
||||
return func() (r io.ReadCloser, err error) {
|
||||
return rc(strings.NewReader(data.Encode())), nil
|
||||
}
|
||||
}
|
||||
|
||||
// BodyFile is a BodyGetter that reads the provided file path.
|
||||
func BodyFile(name string) BodyGetter {
|
||||
return func() (r io.ReadCloser, err error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,260 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/carlmjohnson/requests/internal/minitrue"
|
||||
"github.com/carlmjohnson/requests/internal/slicex"
|
||||
)
|
||||
|
||||
// Builder is a convenient way to build, send, and handle HTTP requests.
|
||||
// Builder has a fluent API with methods returning a pointer to the same
|
||||
// struct, which allows for declaratively describing a request by method chaining.
|
||||
//
|
||||
// Builder can build a url.URL,
|
||||
// build an http.Request,
|
||||
// or handle a full http.Client request and response with validation.
|
||||
//
|
||||
// # Build a url.URL with Builder.URL
|
||||
//
|
||||
// Set the base URL by creating a new Builder with [requests.URL]
|
||||
// or by calling [Builder.BaseURL]
|
||||
// then customize it with
|
||||
// [Builder.Scheme], [Builder.Host], [Builder.Hostf], [Builder.Path],
|
||||
// [Builder.Pathf], [Builder.Param], and [Builder.ParamInt].
|
||||
//
|
||||
// # Build an http.Request with Builder.Request
|
||||
//
|
||||
// Set the method for a request with [Builder.Method]
|
||||
// or use the [Builder.Delete], [Builder.Head], [Builder.Patch], [Builder.Post], and [Builder.Put] methods.
|
||||
// By default, requests without a body are GET,
|
||||
// and those with a body are POST.
|
||||
//
|
||||
// Set headers with [Builder.Header]
|
||||
// or set conventional header keys with
|
||||
// [Builder.Accept], [Builder.BasicAuth], [Builder.Bearer], [Builder.CacheControl],
|
||||
// [Builder.ContentType], [Builder.Cookie], and [Builder.UserAgent].
|
||||
//
|
||||
// Set the body of the request, if any, with [Builder.Body]
|
||||
// or use built in [Builder.BodyBytes], [Builder.BodyFile], [Builder.BodyForm],
|
||||
// [Builder.BodyJSON], [Builder.BodyReader], or [Builder.BodyWriter].
|
||||
//
|
||||
// # Handle a request and response with Builder.Do or Builder.Fetch
|
||||
//
|
||||
// Set the http.Client to use for a request with [Builder.Client]
|
||||
// and/or set an http.RoundTripper with [Builder.Transport].
|
||||
//
|
||||
// Add a response validator to the Builder with [Builder.AddValidator]
|
||||
// or use the built in [Builder.CheckStatus], [Builder.CheckContentType],
|
||||
// [Builder.CheckPeek], [Builder.CopyHeaders], and [Builder.ErrorJSON].
|
||||
// If no validator has been added, Builder will use [DefaultValidator].
|
||||
//
|
||||
// Set a handler for a response with [Builder.Handle]
|
||||
// or use the built in [Builder.ToHeaders], [Builder.ToJSON], [Builder.ToString],
|
||||
// [Builder.ToBytesBuffer], or [Builder.ToWriter].
|
||||
//
|
||||
// [Builder.Fetch] creates an http.Request with [Builder.Request]
|
||||
// and validates and handles it with [Builder.Do].
|
||||
//
|
||||
// # Other methods
|
||||
//
|
||||
// [Builder.Config] can be used to set several options on a Builder at once.
|
||||
// [New] creates a new Builder and applies [Config] options to it.
|
||||
//
|
||||
// In many cases, it will be possible to set most options for an API endpoint
|
||||
// in a Builder at the package or struct level
|
||||
// and then call [Builder.Clone] in a function
|
||||
// to add request specific details for the URL, parameters, headers, body, or handler.
|
||||
//
|
||||
// Errors returned by Builder methods will have an [ErrorKind] indicating their origin.
|
||||
//
|
||||
// The zero value of Builder is usable.
|
||||
type Builder struct {
|
||||
ub urlBuilder
|
||||
rb requestBuilder
|
||||
cl *http.Client
|
||||
rt http.RoundTripper
|
||||
validators []ResponseHandler
|
||||
handler ResponseHandler
|
||||
}
|
||||
|
||||
// BaseURL sets the base URL that other URL methods modify.
|
||||
// It is usually more convenient to use [URL] instead.
|
||||
func (rb *Builder) BaseURL(baseurl string) *Builder {
|
||||
rb.ub.BaseURL(baseurl)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Scheme sets the scheme for a Builder's URL.
|
||||
// It overrides the scheme set by BaseURL.
|
||||
func (rb *Builder) Scheme(scheme string) *Builder {
|
||||
rb.ub.Scheme(scheme)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Host sets the host for a Builder's URL.
|
||||
// It overrides the host set by BaseURL.
|
||||
func (rb *Builder) Host(host string) *Builder {
|
||||
rb.ub.Host(host)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Path joins a path to a Builder's URL per the path joining rules of RFC 3986.
|
||||
// If the path begins with /, it overrides any existing path.
|
||||
// If the path begins with ./ or ../, the final path will be rewritten in its absolute form when creating a request.
|
||||
func (rb *Builder) Path(path string) *Builder {
|
||||
rb.ub.Path(path)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Param sets a query parameter on a Builder's URL.
|
||||
// It overwrites the existing values of a key.
|
||||
func (rb *Builder) Param(key string, values ...string) *Builder {
|
||||
rb.ub.Param(key, values...)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Header sets a header on a request. It overwrites the existing values of a key.
|
||||
func (rb *Builder) Header(key string, values ...string) *Builder {
|
||||
rb.rb.Header(key, values...)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Cookie adds a cookie to a request.
|
||||
// Unlike other headers, adding a cookie does not overwrite existing values.
|
||||
func (rb *Builder) Cookie(name, value string) *Builder {
|
||||
rb.rb.Cookie(name, value)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Method sets the HTTP method for a request.
|
||||
// By default, requests without a body are GET,
|
||||
// and those with a body are POST.
|
||||
func (rb *Builder) Method(method string) *Builder {
|
||||
rb.rb.Method(method)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Body sets the BodyGetter to use to build the body of a request.
|
||||
// The provided BodyGetter is used as an http.Request.GetBody func.
|
||||
// It implicitly sets method to POST.
|
||||
func (rb *Builder) Body(src BodyGetter) *Builder {
|
||||
rb.rb.Body(src)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Client sets the http.Client to use for requests. If nil, it uses http.DefaultClient.
|
||||
func (rb *Builder) Client(cl *http.Client) *Builder {
|
||||
rb.cl = cl
|
||||
return rb
|
||||
}
|
||||
|
||||
// Transport sets the http.RoundTripper to use for requests.
|
||||
// If set, it makes a shallow copy of the http.Client before modifying it.
|
||||
func (rb *Builder) Transport(rt http.RoundTripper) *Builder {
|
||||
rb.rt = rt
|
||||
return rb
|
||||
}
|
||||
|
||||
// AddValidator adds a response validator to the Builder.
|
||||
// Adding a validator disables DefaultValidator.
|
||||
// To disable all validation, just add nil.
|
||||
func (rb *Builder) AddValidator(h ResponseHandler) *Builder {
|
||||
rb.validators = append(rb.validators, h)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Handle sets the response handler for a Builder.
|
||||
// To use multiple handlers, use ChainHandlers.
|
||||
func (rb *Builder) Handle(h ResponseHandler) *Builder {
|
||||
rb.handler = h
|
||||
return rb
|
||||
}
|
||||
|
||||
// Config allows Builder to be extended by functions that set several options at once.
|
||||
func (rb *Builder) Config(cfgs ...Config) *Builder {
|
||||
for _, cfg := range cfgs {
|
||||
cfg(rb)
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// Clone creates a new Builder suitable for independent mutation.
|
||||
func (rb *Builder) Clone() *Builder {
|
||||
rb2 := *rb
|
||||
rb2.ub = *rb.ub.Clone()
|
||||
rb2.rb = *rb.rb.Clone()
|
||||
slicex.Clip(&rb2.validators)
|
||||
return &rb2
|
||||
}
|
||||
|
||||
func joinerrs(a, b error) error {
|
||||
return fmt.Errorf("%w: %w", a, b)
|
||||
}
|
||||
|
||||
// URL builds a *url.URL from the base URL and options set on the Builder.
|
||||
// If a valid url.URL cannot be built,
|
||||
// URL() nevertheless returns a new url.URL,
|
||||
// so it is always safe to call u.String().
|
||||
func (rb *Builder) URL() (u *url.URL, err error) {
|
||||
u, err = rb.ub.URL()
|
||||
if err != nil {
|
||||
return u, joinerrs(ErrURL, err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Request builds a new http.Request with its context set.
|
||||
func (rb *Builder) Request(ctx context.Context) (req *http.Request, err error) {
|
||||
u, err := rb.URL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err = rb.rb.Request(ctx, u)
|
||||
if err != nil {
|
||||
return nil, joinerrs(ErrRequest, err)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Do calls the underlying http.Client and validates and handles any resulting response. The response body is closed after all validators and the handler run.
|
||||
func (rb *Builder) Do(req *http.Request) (err error) {
|
||||
cl := minitrue.Or(rb.cl, http.DefaultClient)
|
||||
if rb.rt != nil {
|
||||
cl2 := *cl
|
||||
cl2.Transport = rb.rt
|
||||
cl = &cl2
|
||||
}
|
||||
validators := rb.validators
|
||||
if len(validators) == 0 {
|
||||
validators = []ResponseHandler{DefaultValidator}
|
||||
}
|
||||
h := minitrue.Cond(rb.handler != nil,
|
||||
rb.handler,
|
||||
consumeBody)
|
||||
|
||||
code, err := do(cl, req, validators, h)
|
||||
switch code {
|
||||
case doOK:
|
||||
return nil
|
||||
case doConnect:
|
||||
err = joinerrs(ErrTransport, err)
|
||||
case doValidate:
|
||||
err = joinerrs(ErrValidator, err)
|
||||
case doHandle:
|
||||
err = joinerrs(ErrHandler, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch builds a request, sends it, and handles the response.
|
||||
func (rb *Builder) Fetch(ctx context.Context) (err error) {
|
||||
req, err := rb.Request(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rb.Do(req)
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// URL creates a new Builder suitable for method chaining.
|
||||
// It is equivalent to calling BaseURL on an empty Builder.
|
||||
func URL(baseurl string) *Builder {
|
||||
return (&Builder{}).BaseURL(baseurl)
|
||||
}
|
||||
|
||||
// New creates a new Builder suitable for method chaining by applying the specified Configs.
|
||||
// It is equivalent to calling Config on an empty Builder.
|
||||
// The zero value of Builder is usable,
|
||||
// so it is not necessary to call New
|
||||
// when you do not have any Configs to apply.
|
||||
func New(cfgs ...Config) *Builder {
|
||||
return (&Builder{}).Config(cfgs...)
|
||||
}
|
||||
|
||||
// Head sets HTTP method to HEAD.
|
||||
func (rb *Builder) Head() *Builder {
|
||||
return rb.Method(http.MethodHead)
|
||||
}
|
||||
|
||||
// Post sets HTTP method to POST.
|
||||
//
|
||||
// Note that setting a Body causes a request to be POST by default.
|
||||
func (rb *Builder) Post() *Builder {
|
||||
return rb.Method(http.MethodPost)
|
||||
}
|
||||
|
||||
// Put sets HTTP method to PUT.
|
||||
func (rb *Builder) Put() *Builder {
|
||||
return rb.Method(http.MethodPut)
|
||||
}
|
||||
|
||||
// Patch sets HTTP method to PATCH.
|
||||
func (rb *Builder) Patch() *Builder {
|
||||
return rb.Method(http.MethodPatch)
|
||||
}
|
||||
|
||||
// Delete sets HTTP method to DELETE.
|
||||
func (rb *Builder) Delete() *Builder {
|
||||
return rb.Method(http.MethodDelete)
|
||||
}
|
||||
|
||||
// Hostf calls Host with fmt.Sprintf.
|
||||
func (rb *Builder) Hostf(format string, a ...any) *Builder {
|
||||
return rb.Host(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// Pathf calls Path with fmt.Sprintf.
|
||||
//
|
||||
// Note that for security reasons, you must not use %s
|
||||
// with a user provided string!
|
||||
func (rb *Builder) Pathf(format string, a ...any) *Builder {
|
||||
return rb.Path(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// ParamInt converts value to a string and calls Param.
|
||||
func (rb *Builder) ParamInt(key string, value int) *Builder {
|
||||
return rb.Param(key, strconv.Itoa(value))
|
||||
}
|
||||
|
||||
// Params calls Param with all the members of m.
|
||||
func (rb *Builder) Params(m map[string][]string) *Builder {
|
||||
for k, vv := range m {
|
||||
rb.Param(k, vv...)
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// Headers calls Header with all the members of m.
|
||||
func (rb *Builder) Headers(m map[string][]string) *Builder {
|
||||
for k, vv := range m {
|
||||
rb.Header(k, vv...)
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// Accept sets the Accept header for a request.
|
||||
func (rb *Builder) Accept(contentTypes string) *Builder {
|
||||
return rb.Header("Accept", contentTypes)
|
||||
}
|
||||
|
||||
// CacheControl sets the client-side Cache-Control directive for a request.
|
||||
func (rb *Builder) CacheControl(directive string) *Builder {
|
||||
return rb.Header("Cache-Control", directive)
|
||||
}
|
||||
|
||||
// ContentType sets the Content-Type header on a request.
|
||||
func (rb *Builder) ContentType(ct string) *Builder {
|
||||
return rb.Header("Content-Type", ct)
|
||||
}
|
||||
|
||||
// UserAgent sets the User-Agent header.
|
||||
func (rb *Builder) UserAgent(s string) *Builder {
|
||||
return rb.Header("User-Agent", s)
|
||||
}
|
||||
|
||||
// BasicAuth sets the Authorization header to a basic auth credential.
|
||||
func (rb *Builder) BasicAuth(username, password string) *Builder {
|
||||
auth := username + ":" + password
|
||||
v := base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
return rb.Header("Authorization", "Basic "+v)
|
||||
}
|
||||
|
||||
// Bearer sets the Authorization header to a bearer token.
|
||||
func (rb *Builder) Bearer(token string) *Builder {
|
||||
return rb.Header("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
// BodyReader sets the Builder's request body to r.
|
||||
func (rb *Builder) BodyReader(r io.Reader) *Builder {
|
||||
return rb.Body(BodyReader(r))
|
||||
}
|
||||
|
||||
// BodyWriter pipes writes from w to the Builder's request body.
|
||||
func (rb *Builder) BodyWriter(f func(w io.Writer) error) *Builder {
|
||||
return rb.Body(BodyWriter(f))
|
||||
}
|
||||
|
||||
// BodyBytes sets the Builder's request body to b.
|
||||
func (rb *Builder) BodyBytes(b []byte) *Builder {
|
||||
return rb.Body(BodyBytes(b))
|
||||
}
|
||||
|
||||
// BodySerializer sets the Builder's request body
|
||||
// to the serialized object.
|
||||
func (rb *Builder) BodySerializer(s Serializer, v any) *Builder {
|
||||
return rb.
|
||||
Body(BodySerializer(s, v))
|
||||
}
|
||||
|
||||
// BodyJSON sets the Builder's request body to the marshaled JSON.
|
||||
// It uses [JSONSerializer] to marshal the object.
|
||||
// It also sets ContentType to "application/json".
|
||||
func (rb *Builder) BodyJSON(v any) *Builder {
|
||||
return rb.
|
||||
Body(BodyJSON(v)).
|
||||
ContentType("application/json")
|
||||
}
|
||||
|
||||
// BodyForm sets the Builder's request body to the encoded form.
|
||||
// It also sets the ContentType to "application/x-www-form-urlencoded".
|
||||
func (rb *Builder) BodyForm(data url.Values) *Builder {
|
||||
return rb.
|
||||
Body(BodyForm(data)).
|
||||
ContentType("application/x-www-form-urlencoded")
|
||||
}
|
||||
|
||||
// BodyFile sets the Builder's request body to read from the given file path.
|
||||
func (rb *Builder) BodyFile(name string) *Builder {
|
||||
return rb.Body(BodyFile(name))
|
||||
}
|
||||
|
||||
// CheckStatus adds a validator for status code of a response.
|
||||
func (rb *Builder) CheckStatus(acceptStatuses ...int) *Builder {
|
||||
return rb.AddValidator(CheckStatus(acceptStatuses...))
|
||||
}
|
||||
|
||||
// CheckContentType adds a validator for the content type header of a response.
|
||||
func (rb *Builder) CheckContentType(cts ...string) *Builder {
|
||||
return rb.AddValidator(CheckContentType(cts...))
|
||||
}
|
||||
|
||||
// CheckPeek adds a validator that peeks at the first n bytes of a response body.
|
||||
func (rb *Builder) CheckPeek(n int, f func([]byte) error) *Builder {
|
||||
return rb.AddValidator(CheckPeek(n, f))
|
||||
}
|
||||
|
||||
// ToDeserializer sets the Builder to decode a response into v
|
||||
// using a [Deserializer].
|
||||
func (rb *Builder) ToDeserializer(d Deserializer, v any) *Builder {
|
||||
return rb.
|
||||
Handle(ToDeserializer(d, v))
|
||||
}
|
||||
|
||||
// ToJSON sets the Builder to decode a response as a JSON object.
|
||||
//
|
||||
// It uses [JSONDeserializer] to unmarshal the object.
|
||||
func (rb *Builder) ToJSON(v any) *Builder {
|
||||
return rb.Handle(ToJSON(v))
|
||||
}
|
||||
|
||||
// ToString sets the Builder to write the response body to the provided string pointer.
|
||||
func (rb *Builder) ToString(sp *string) *Builder {
|
||||
return rb.Handle(ToString(sp))
|
||||
}
|
||||
|
||||
// ToBytesBuffer sets the Builder to write the response body to the provided bytes.Buffer.
|
||||
func (rb *Builder) ToBytesBuffer(buf *bytes.Buffer) *Builder {
|
||||
return rb.Handle(ToBytesBuffer(buf))
|
||||
}
|
||||
|
||||
// ToWriter sets the Builder to copy the response body into w.
|
||||
func (rb *Builder) ToWriter(w io.Writer) *Builder {
|
||||
return rb.Handle(ToWriter(w))
|
||||
}
|
||||
|
||||
// ToFile sets the Builder to write the response body to the given file name.
|
||||
// The file and its parent directories are created automatically.
|
||||
// For more advanced use cases, use ToWriter.
|
||||
func (rb *Builder) ToFile(name string) *Builder {
|
||||
return rb.Handle(ToFile(name))
|
||||
}
|
||||
|
||||
// CopyHeaders adds a validator which copies the response headers to h.
|
||||
// Note that because CopyHeaders adds a validator,
|
||||
// the DefaultValidator is disabled and must be added back manually
|
||||
// if status code validation is desired.
|
||||
func (rb *Builder) CopyHeaders(h map[string][]string) *Builder {
|
||||
return rb.
|
||||
AddValidator(CopyHeaders(h))
|
||||
}
|
||||
|
||||
// ToHeaders sets the method to HEAD and adds a handler which copies the response headers to h.
|
||||
// To just copy headers, see Builder.CopyHeaders.
|
||||
func (rb *Builder) ToHeaders(h map[string][]string) *Builder {
|
||||
return rb.
|
||||
Head().
|
||||
Handle(ChainHandlers(CopyHeaders(h), consumeBody))
|
||||
}
|
||||
|
||||
// ErrorJSON adds a validator that applies DefaultValidator
|
||||
// and decodes the response as a JSON object
|
||||
// if the DefaultValidator check fails.
|
||||
func (rb *Builder) ErrorJSON(v any) *Builder {
|
||||
return rb.AddValidator(ErrorJSON(v))
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http/httptest"
|
||||
)
|
||||
|
||||
// Config allows Builder to be extended by setting several options at once.
|
||||
// For example, a Config might set a Body and its ContentType.
|
||||
type Config = func(rb *Builder)
|
||||
|
||||
// GzipConfig writes a gzip stream to its request body using a callback.
|
||||
// It also sets the appropriate Content-Encoding header and automatically
|
||||
// closes and the stream when the callback returns.
|
||||
func GzipConfig(level int, h func(gw *gzip.Writer) error) Config {
|
||||
return func(rb *Builder) {
|
||||
rb.
|
||||
Header("Content-Encoding", "gzip").
|
||||
BodyWriter(func(w io.Writer) error {
|
||||
gw, err := gzip.NewWriterLevel(w, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = h(gw); err != nil {
|
||||
gw.Close()
|
||||
return err
|
||||
}
|
||||
return gw.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestServerConfig returns a Config
|
||||
// which sets the Builder's BaseURL to s.URL
|
||||
// and the Builder's Client to s.Client().
|
||||
func TestServerConfig(s *httptest.Server) Config {
|
||||
return func(rb *Builder) {
|
||||
rb.
|
||||
BaseURL(s.URL).
|
||||
Client(s.Client())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
// NewCookieJar returns a cookie jar using the standard public suffix list.
|
||||
func NewCookieJar() http.CookieJar {
|
||||
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
||||
// As of Go 1.16, cookiejar.New err is hardcoded nil
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return jar
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type doResponse int
|
||||
|
||||
const (
|
||||
doOK doResponse = iota
|
||||
doConnect
|
||||
doValidate
|
||||
doHandle
|
||||
)
|
||||
|
||||
func do(cl *http.Client, req *http.Request, validators []ResponseHandler, h ResponseHandler) (doResponse, error) {
|
||||
res, err := cl.Do(req)
|
||||
if err != nil {
|
||||
return doConnect, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
for _, v := range validators {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err = v(res); err != nil {
|
||||
return doValidate, err
|
||||
}
|
||||
}
|
||||
if err = h(res); err != nil {
|
||||
return doHandle, err
|
||||
}
|
||||
|
||||
return doOK, nil
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/carlmjohnson/requests/internal/minitrue"
|
||||
"github.com/carlmjohnson/requests/internal/slicex"
|
||||
)
|
||||
|
||||
// nopCloser is like io.NopCloser(),
|
||||
// but it is a concrete type so we can strip it out
|
||||
// before setting a body on a request.
|
||||
// See https://github.com/carlmjohnson/requests/discussions/49
|
||||
type nopCloser struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func rc(r io.Reader) nopCloser {
|
||||
return nopCloser{r}
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
var _ io.ReadCloser = nopCloser{}
|
||||
|
||||
type requestBuilder struct {
|
||||
headers []multimap
|
||||
cookies []kvpair
|
||||
getBody BodyGetter
|
||||
method string
|
||||
}
|
||||
|
||||
func (rb *requestBuilder) Header(key string, values ...string) {
|
||||
rb.headers = append(rb.headers, multimap{key, values})
|
||||
}
|
||||
|
||||
func (rb *requestBuilder) Cookie(name, value string) {
|
||||
rb.cookies = append(rb.cookies, kvpair{name, value})
|
||||
}
|
||||
|
||||
func (rb *requestBuilder) Method(method string) {
|
||||
rb.method = method
|
||||
}
|
||||
|
||||
func (rb *requestBuilder) Body(src BodyGetter) {
|
||||
rb.getBody = src
|
||||
}
|
||||
|
||||
// Clone creates a new Builder suitable for independent mutation.
|
||||
func (rb *requestBuilder) Clone() *requestBuilder {
|
||||
rb2 := *rb
|
||||
slicex.Clip(&rb2.headers)
|
||||
slicex.Clip(&rb2.cookies)
|
||||
return &rb2
|
||||
}
|
||||
|
||||
// Request builds a new http.Request with its context set.
|
||||
func (rb *requestBuilder) Request(ctx context.Context, u *url.URL) (req *http.Request, err error) {
|
||||
var body io.Reader
|
||||
if rb.getBody != nil {
|
||||
if body, err = rb.getBody(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nopper, ok := body.(nopCloser); ok {
|
||||
body = nopper.Reader
|
||||
}
|
||||
}
|
||||
method := minitrue.Or(rb.method,
|
||||
minitrue.Cond(rb.getBody == nil,
|
||||
http.MethodGet,
|
||||
http.MethodPost))
|
||||
|
||||
req, err = http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.GetBody = rb.getBody
|
||||
|
||||
for _, kv := range rb.headers {
|
||||
req.Header[http.CanonicalHeaderKey(kv.key)] = kv.values
|
||||
}
|
||||
for _, kv := range rb.cookies {
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: kv.key,
|
||||
Value: kv.value,
|
||||
})
|
||||
}
|
||||
return req, nil
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/carlmjohnson/requests/internal/minitrue"
|
||||
"github.com/carlmjohnson/requests/internal/slicex"
|
||||
)
|
||||
|
||||
type multimap struct {
|
||||
key string
|
||||
values []string
|
||||
}
|
||||
|
||||
type kvpair struct {
|
||||
key, value string
|
||||
}
|
||||
|
||||
type urlBuilder struct {
|
||||
baseurl string
|
||||
scheme, host string
|
||||
paths []string
|
||||
params []multimap
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) BaseURL(baseurl string) {
|
||||
ub.baseurl = baseurl
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) Scheme(scheme string) {
|
||||
ub.scheme = scheme
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) Host(host string) {
|
||||
ub.host = host
|
||||
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) Path(path string) {
|
||||
ub.paths = append(ub.paths, path)
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) Param(key string, values ...string) {
|
||||
ub.params = append(ub.params, multimap{key, values})
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) Clone() *urlBuilder {
|
||||
ub2 := *ub
|
||||
slicex.Clip(&ub2.paths)
|
||||
slicex.Clip(&ub2.params)
|
||||
return &ub2
|
||||
}
|
||||
|
||||
func (ub *urlBuilder) URL() (u *url.URL, err error) {
|
||||
u, err = url.Parse(ub.baseurl)
|
||||
if err != nil {
|
||||
return new(url.URL), err
|
||||
}
|
||||
u.Scheme = minitrue.Or(
|
||||
ub.scheme,
|
||||
u.Scheme,
|
||||
"https",
|
||||
)
|
||||
u.Host = minitrue.Or(ub.host, u.Host)
|
||||
for _, p := range ub.paths {
|
||||
u.Path = u.ResolveReference(&url.URL{Path: p}).Path
|
||||
}
|
||||
if len(ub.params) > 0 {
|
||||
q := u.Query()
|
||||
for _, kv := range ub.params {
|
||||
q[kv.key] = kv.values
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
}
|
||||
// Reparsing, in case the path rewriting broke the URL
|
||||
u, err = url.Parse(u.String())
|
||||
if err != nil {
|
||||
return new(url.URL), err
|
||||
}
|
||||
return u, nil
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
// Package requests is a convenience wrapper around net/http to make it faster
|
||||
// and easier to build requests and custom transports.
|
||||
package requests
|
|
@ -0,0 +1,20 @@
|
|||
package requests
|
||||
|
||||
// ErrorKind indicates where an error was returned in the process of building, validating, and handling a request.
|
||||
// Errors returned by Builder can be tested for their ErrorKind using errors.Is or errors.As.
|
||||
type ErrorKind int8
|
||||
|
||||
//go:generate stringer -type=ErrorKind
|
||||
|
||||
// Enum values for type ErrorKind
|
||||
const (
|
||||
ErrURL ErrorKind = iota // error building URL
|
||||
ErrRequest // error building the request
|
||||
ErrTransport // error connecting
|
||||
ErrValidator // validator error
|
||||
ErrHandler // handler error
|
||||
)
|
||||
|
||||
func (ek ErrorKind) Error() string {
|
||||
return ek.String()
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Code generated by "stringer -type=ErrorKind"; DO NOT EDIT.
|
||||
|
||||
package requests
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[ErrURL-0]
|
||||
_ = x[ErrRequest-1]
|
||||
_ = x[ErrTransport-2]
|
||||
_ = x[ErrValidator-3]
|
||||
_ = x[ErrHandler-4]
|
||||
}
|
||||
|
||||
const _ErrorKind_name = "ErrURLErrRequestErrTransportErrValidatorErrHandler"
|
||||
|
||||
var _ErrorKind_index = [...]uint8{0, 6, 16, 28, 40, 50}
|
||||
|
||||
func (i ErrorKind) String() string {
|
||||
if i < 0 || i >= ErrorKind(len(_ErrorKind_index)-1) {
|
||||
return "ErrorKind(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _ErrorKind_name[_ErrorKind_index[i]:_ErrorKind_index[i+1]]
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
// ResponseHandler is used to validate or handle the response to a request.
|
||||
type ResponseHandler = func(*http.Response) error
|
||||
|
||||
// ChainHandlers allows for the composing of validators or response handlers.
|
||||
func ChainHandlers(handlers ...ResponseHandler) ResponseHandler {
|
||||
return func(r *http.Response) error {
|
||||
for _, h := range handlers {
|
||||
if h == nil {
|
||||
continue
|
||||
}
|
||||
if err := h(r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func consumeBody(res *http.Response) (err error) {
|
||||
const maxDiscardSize = 640 * 1 << 10
|
||||
if _, err = io.CopyN(io.Discard, res.Body, maxDiscardSize); err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ToDeserializer decodes a response into v using a [Deserializer].
|
||||
func ToDeserializer(d Deserializer, v any) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
data, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = d(data, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ToJSON decodes a response as a JSON object.
|
||||
//
|
||||
// It uses [JSONDeserializer] to unmarshal the object.
|
||||
func ToJSON(v any) ResponseHandler {
|
||||
return ToDeserializer(JSONDeserializer, v)
|
||||
}
|
||||
|
||||
// ToString writes the response body to the provided string pointer.
|
||||
func ToString(sp *string) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
var buf strings.Builder
|
||||
_, err := io.Copy(&buf, res.Body)
|
||||
if err == nil {
|
||||
*sp = buf.String()
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// ToBytesBuffer writes the response body to the provided bytes.Buffer.
|
||||
func ToBytesBuffer(buf *bytes.Buffer) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
_, err := io.Copy(buf, res.Body)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// ToBufioReader takes a callback which wraps the response body in a bufio.Reader.
|
||||
func ToBufioReader(f func(r *bufio.Reader) error) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
return f(bufio.NewReader(res.Body))
|
||||
}
|
||||
}
|
||||
|
||||
// ToBufioScanner takes a callback which wraps the response body in a bufio.Scanner.
|
||||
func ToBufioScanner(f func(r *bufio.Scanner) error) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
return f(bufio.NewScanner(res.Body))
|
||||
}
|
||||
}
|
||||
|
||||
// ToHTML parses the page with x/net/html.Parse.
|
||||
func ToHTML(n *html.Node) ResponseHandler {
|
||||
return ToBufioReader(func(r *bufio.Reader) error {
|
||||
n2, err := html.Parse(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = *n2
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ToWriter copies the response body to w.
|
||||
func ToWriter(w io.Writer) ResponseHandler {
|
||||
return ToBufioReader(func(r *bufio.Reader) error {
|
||||
_, err := io.Copy(w, r)
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// ToFile writes the response body at the provided file path.
|
||||
// The file and its parent directories are created automatically.
|
||||
func ToFile(name string) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
_ = os.MkdirAll(filepath.Dir(name), 0777)
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(f, res.Body)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// ToHeaders is an alias for backwards compatibility.
|
||||
var ToHeaders = CopyHeaders
|
20
vendor/github.com/carlmjohnson/requests/internal/minitrue/minitrue.go
generated
vendored
Normal file
20
vendor/github.com/carlmjohnson/requests/internal/minitrue/minitrue.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Package minitrue - Whatever the Package holds to be the truth, is truth.
|
||||
package minitrue
|
||||
|
||||
func Cond[T any](val bool, a, b T) T {
|
||||
if val {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Or returns the first non-empty argument it receives
|
||||
// or the zero value for T.
|
||||
func Or[T comparable](vals ...T) T {
|
||||
for _, val := range vals {
|
||||
if val != *new(T) {
|
||||
return val
|
||||
}
|
||||
}
|
||||
return *new(T)
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package slicex
|
||||
|
||||
func Clip[T any](sp *[]T) {
|
||||
s := *sp
|
||||
*sp = s[:len(s):len(s)]
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Record returns an http.RoundTripper that writes out its
|
||||
// requests and their responses to text files in basepath.
|
||||
// Requests are named according to a hash of their contents.
|
||||
// Responses are named according to the request that made them.
|
||||
func Record(rt http.RoundTripper, basepath string) Transport {
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("problem while recording transport: %w", err)
|
||||
}
|
||||
}()
|
||||
_ = os.MkdirAll(basepath, 0755)
|
||||
b, err := httputil.DumpRequest(req, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqname, resname := buildName(b)
|
||||
name := filepath.Join(basepath, reqname)
|
||||
if err = os.WriteFile(name, b, 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res, err = rt.RoundTrip(req); err != nil {
|
||||
return
|
||||
}
|
||||
b, err = httputil.DumpResponse(res, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name = filepath.Join(basepath, resname)
|
||||
if err = os.WriteFile(name, b, 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Replay returns an http.RoundTripper that reads its
|
||||
// responses from text files in basepath.
|
||||
// Responses are looked up according to a hash of the request.
|
||||
func Replay(basepath string) Transport {
|
||||
return ReplayFS(os.DirFS(basepath))
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("response not found")
|
||||
|
||||
// ReplayFS returns an http.RoundTripper that reads its
|
||||
// responses from text files in the fs.FS.
|
||||
// Responses are looked up according to a hash of the request.
|
||||
// Response file names may optionally be prefixed with comments for better human organization.
|
||||
func ReplayFS(fsys fs.FS) Transport {
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("problem while replaying transport: %w", err)
|
||||
}
|
||||
}()
|
||||
b, err := httputil.DumpRequest(req, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, name := buildName(b)
|
||||
glob := "*" + name
|
||||
matches, err := fs.Glob(fsys, glob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("%w: no replay file matches %q", errNotFound, glob)
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return nil, fmt.Errorf("ambiguous response: multiple replay files match %q", glob)
|
||||
}
|
||||
b, err = fs.ReadFile(fsys, matches[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := bufio.NewReader(bytes.NewReader(b))
|
||||
return http.ReadResponse(r, req)
|
||||
})
|
||||
}
|
||||
|
||||
func buildName(b []byte) (reqname, resname string) {
|
||||
h := md5.New()
|
||||
h.Write(b)
|
||||
s := base64.URLEncoding.EncodeToString(h.Sum(nil))
|
||||
return s[:8] + ".req.txt", s[:8] + ".res.txt"
|
||||
}
|
||||
|
||||
// Caching returns an http.RoundTripper that attempts to read its
|
||||
// responses from text files in basepath. If the response is absent,
|
||||
// it caches the result of issuing the request with rt in basepath.
|
||||
// Requests are named according to a hash of their contents.
|
||||
// Responses are named according to the request that made them.
|
||||
func Caching(rt http.RoundTripper, basepath string) Transport {
|
||||
replay := Replay(basepath).RoundTrip
|
||||
record := Record(rt, basepath).RoundTrip
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
res, err = replay(req)
|
||||
if errors.Is(err, errNotFound) {
|
||||
res, err = record(req)
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package requests
|
||||
|
||||
import "net/http"
|
||||
|
||||
// CheckRedirectPolicy is a function suitable for use as CheckRedirect on an http.Client.
|
||||
type CheckRedirectPolicy = func(req *http.Request, via []*http.Request) error
|
||||
|
||||
// MaxFollow returns a CheckRedirectPolicy that follows a maximum of n redirects.
|
||||
func MaxFollow(n int) CheckRedirectPolicy {
|
||||
return func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) > n {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NoFollow is a CheckRedirectPolicy that does not follow redirects.
|
||||
var NoFollow CheckRedirectPolicy = MaxFollow(0)
|
|
@ -0,0 +1,24 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// Serializer is a function
|
||||
// that can convert arbitrary data
|
||||
// to bytes in some format.
|
||||
type Serializer = func(v any) ([]byte, error)
|
||||
|
||||
// Deserializer is a function
|
||||
// that can read data in some format
|
||||
// and store the result in v.
|
||||
type Deserializer = func(data []byte, v any) error
|
||||
|
||||
var (
|
||||
// JSONSerializer is used by BodyJSON and Builder.BodyJSON.
|
||||
// The default serializer may be changed in a future version of requests.
|
||||
JSONSerializer Serializer = json.Marshal
|
||||
// JSONDeserializer is used by ToJSON and Builder.ToJSON.
|
||||
// The default deserializer may be changed in a future version of requests.
|
||||
JSONDeserializer Deserializer = json.Unmarshal
|
||||
)
|
|
@ -0,0 +1,117 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport is an alias of http.RoundTripper for documentation purposes.
|
||||
type Transport = http.RoundTripper
|
||||
|
||||
// RoundTripFunc is an adaptor to use a function as an http.RoundTripper.
|
||||
type RoundTripFunc func(req *http.Request) (res *http.Response, err error)
|
||||
|
||||
// RoundTrip implements http.RoundTripper.
|
||||
func (rtf RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return rtf(r)
|
||||
}
|
||||
|
||||
var _ Transport = RoundTripFunc(nil)
|
||||
|
||||
// ReplayString returns an http.RoundTripper that always responds with a
|
||||
// request built from rawResponse. It is intended for use in one-off tests.
|
||||
func ReplayString(rawResponse string) Transport {
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
r := bufio.NewReader(strings.NewReader(rawResponse))
|
||||
res, err = http.ReadResponse(r, req)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// UserAgentTransport returns a wrapped http.RoundTripper that sets the User-Agent header on requests to s.
|
||||
func UserAgentTransport(rt http.RoundTripper, s string) Transport {
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
r2 := *req
|
||||
r2.Header = r2.Header.Clone()
|
||||
r2.Header.Set("User-Agent", s)
|
||||
return rt.RoundTrip(&r2)
|
||||
})
|
||||
}
|
||||
|
||||
// PermitURLTransport returns a wrapped http.RoundTripper that rejects any requests whose URL doesn't match the provided regular expression string.
|
||||
//
|
||||
// PermitURLTransport will panic if the regexp does not compile.
|
||||
func PermitURLTransport(rt http.RoundTripper, regex string) Transport {
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
re := regexp.MustCompile(regex)
|
||||
reErr := fmt.Errorf("requested URL not permitted by regexp: %s", regex)
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
if u := req.URL.String(); !re.MatchString(u) {
|
||||
return nil, reErr
|
||||
}
|
||||
return rt.RoundTrip(req)
|
||||
})
|
||||
}
|
||||
|
||||
// LogTransport returns a wrapped http.RoundTripper
|
||||
// that calls fn with details when a response has finished.
|
||||
// A response is considered finished
|
||||
// when the wrapper http.RoundTripper returns an error
|
||||
// or the Response.Body is closed,
|
||||
// whichever comes first.
|
||||
// To simplify logging code,
|
||||
// a nil *http.Response is replaced with a new http.Response.
|
||||
func LogTransport(rt http.RoundTripper, fn func(req *http.Request, res *http.Response, err error, duration time.Duration)) Transport {
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
return RoundTripFunc(func(req *http.Request) (res *http.Response, err error) {
|
||||
start := time.Now()
|
||||
res, err = rt.RoundTrip(req)
|
||||
if err != nil {
|
||||
res2 := res
|
||||
if res == nil {
|
||||
res2 = new(http.Response)
|
||||
}
|
||||
fn(req, res2, err, time.Since(start))
|
||||
return
|
||||
}
|
||||
|
||||
res.Body = closeLogger{res.Body, func() {
|
||||
fn(req, res, err, time.Since(start))
|
||||
}}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
type closeLogger struct {
|
||||
io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (cl closeLogger) Close() error {
|
||||
cl.fn()
|
||||
return cl.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// DoerTransport converts a Doer into a Transport.
|
||||
// It exists for compatibility with other libraries.
|
||||
// A Doer is an interface with a Do method.
|
||||
// Users should prefer Transport,
|
||||
// because Do is the interface of http.Client
|
||||
// which has higher level concerns.
|
||||
func DoerTransport(cl interface {
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}) Transport {
|
||||
return RoundTripFunc(cl.Do)
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DefaultValidator is the validator applied by Builder unless otherwise specified.
|
||||
var DefaultValidator ResponseHandler = CheckStatus(
|
||||
http.StatusOK,
|
||||
http.StatusCreated,
|
||||
http.StatusAccepted,
|
||||
http.StatusNonAuthoritativeInfo,
|
||||
http.StatusNoContent,
|
||||
)
|
||||
|
||||
// ResponseError is the error type produced by CheckStatus and CheckContentType.
|
||||
type ResponseError http.Response
|
||||
|
||||
// Error fulfills the error interface.
|
||||
func (se *ResponseError) Error() string {
|
||||
return fmt.Sprintf("response error for %s", se.Request.URL.Redacted())
|
||||
}
|
||||
|
||||
// CheckStatus validates the response has an acceptable status code.
|
||||
func CheckStatus(acceptStatuses ...int) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
for _, code := range acceptStatuses {
|
||||
if res.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: unexpected status: %d",
|
||||
(*ResponseError)(res), res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// HasStatusErr returns true if err is a ResponseError caused by any of the codes given.
|
||||
func HasStatusErr(err error, codes ...int) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if se := new(ResponseError); errors.As(err, &se) {
|
||||
for _, code := range codes {
|
||||
if se.StatusCode == code {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckContentType validates that a response has one of the given content type headers.
|
||||
func CheckContentType(cts ...string) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
mt, _, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: problem matching Content-Type",
|
||||
(*ResponseError)(res))
|
||||
}
|
||||
for _, ct := range cts {
|
||||
if mt == ct {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%w: unexpected Content-Type: %s",
|
||||
(*ResponseError)(res), mt)
|
||||
}
|
||||
}
|
||||
|
||||
type bufioCloser struct {
|
||||
*bufio.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// CheckPeek wraps the body of a response in a bufio.Reader and
|
||||
// gives f a peek at the first n bytes for validation.
|
||||
func CheckPeek(n int, f func([]byte) error) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
// ensure buffer is at least minimum size
|
||||
buf := bufio.NewReader(res.Body)
|
||||
// ensure large peeks will fit in the buffer
|
||||
buf = bufio.NewReaderSize(buf, n)
|
||||
res.Body = &bufioCloser{
|
||||
buf,
|
||||
res.Body,
|
||||
}
|
||||
b, err := buf.Peek(n)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
return f(b)
|
||||
}
|
||||
}
|
||||
|
||||
// CopyHeaders copies the response headers to h.
|
||||
func CopyHeaders(h map[string][]string) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
for k, v := range res.Header {
|
||||
h[k] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package requests
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ValidatorHandler composes a Validator and a Handler.
|
||||
// If the validation check fails, it triggers the handler.
|
||||
// Any errors from validator or handler will be joined to the error returned.
|
||||
// If the handler succeeds, the error will matching ErrInvalidHandled.
|
||||
func ValidatorHandler(v, h ResponseHandler) ResponseHandler {
|
||||
return func(res *http.Response) error {
|
||||
err1 := v(res)
|
||||
if err1 == nil { // passes validation
|
||||
return nil
|
||||
}
|
||||
err2 := h(res)
|
||||
if err2 == nil { // successfully handled
|
||||
return fmt.Errorf("%w: %w", ErrInvalidHandled, err1)
|
||||
}
|
||||
return errors.Join(err1, err2)
|
||||
}
|
||||
}
|
||||
|
||||
var ErrInvalidHandled = errors.New("handled recovery from invalid response")
|
||||
|
||||
// ErrorJSON is a ValidatorHandler that applies DefaultValidator
|
||||
// and decodes the response as a JSON object
|
||||
// if the DefaultValidator check fails.
|
||||
func ErrorJSON(v any) ResponseHandler {
|
||||
return ValidatorHandler(DefaultValidator, ToJSON(v))
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# IDEs
|
||||
.idea/
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,32 @@
|
|||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
[Exponential backoff][exponential backoff wiki]
|
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||
in order to gradually find an acceptable rate.
|
||||
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
* I would like to keep this library as small as possible.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
|
@ -0,0 +1,66 @@
|
|||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
|
@ -0,0 +1,146 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,72 @@
|
|||
# xxhash
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
|
@ -0,0 +1,228 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
@ -0,0 +1,209 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ n, h
|
||||
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
|
@ -0,0 +1,183 @@
|
|||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
|
@ -0,0 +1,15 @@
|
|||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
|
@ -0,0 +1,76 @@
|
|||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,296 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(metadata); err != nil {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||
if aInt && !bInt {
|
||||
return -1
|
||||
} else if !aInt && bInt {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error {
|
||||
if id != "" && !reIdentifier.MatchString(id) {
|
||||
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
|
@ -0,0 +1,267 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
// This can be overridden at build-time:
|
||||
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||
journalSocket = "/run/systemd/journal/socket"
|
||||
|
||||
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||
// Concrete safe pointer type: *net.UnixConn
|
||||
unixConnPtr unsafe.Pointer
|
||||
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||
onceConn sync.Once
|
||||
)
|
||||
|
||||
// Enabled checks whether the local systemd journal is available for logging.
|
||||
func Enabled() bool {
|
||||
if c := getOrInitConn(); c == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
conn, err := net.Dial("unixgram", journalSocket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// StderrIsJournalStream returns whether the process stderr is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stderr's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stderr)
|
||||
}
|
||||
|
||||
// StdoutIsJournalStream returns whether the process stdout is connected
|
||||
// to the Journal's stream transport.
|
||||
//
|
||||
// Returns true if JOURNAL_STREAM environment variable is present,
|
||||
// and stdout's device and inode numbers match it.
|
||||
//
|
||||
// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
|
||||
// is present, but malformed, fstat syscall fails, etc.
|
||||
//
|
||||
// Most users should probably use [StderrIsJournalStream].
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return fdIsJournalStream(syscall.Stdout)
|
||||
}
|
||||
|
||||
func fdIsJournalStream(fd int) (bool, error) {
|
||||
journalStream := os.Getenv("JOURNAL_STREAM")
|
||||
if journalStream == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var expectedStat syscall.Stat_t
|
||||
_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
|
||||
}
|
||||
|
||||
var stat syscall.Stat_t
|
||||
err = syscall.Fstat(fd, &stat)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
conn := getOrInitConn()
|
||||
if conn == nil {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: journalSocket,
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !isSocketSpaceError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Large log entry, send it via tempfile and ancillary-fd.
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
|
||||
func getOrInitConn() *net.UnixConn {
|
||||
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
if conn != nil {
|
||||
return conn
|
||||
}
|
||||
onceConn.Do(initConn)
|
||||
return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if err := validVarName(name); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// validVarName validates a variable name to make sure journald will accept it.
|
||||
// The variable name must be in uppercase and consist only of characters,
|
||||
// numbers and underscores, and may not begin with an underscore:
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||
func validVarName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("Empty variable name")
|
||||
} else if name[0] == '_' {
|
||||
return errors.New("Variable name begins with an underscore")
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||
return errors.New("Variable name contains invalid characters")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSocketSpaceError checks whether the error is signaling
|
||||
// an "overlarge message" condition.
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok || opErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||
if !ok || sysErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// initConn initializes the global `unixConnPtr` socket.
|
||||
// It is automatically called when needed.
|
||||
func initConn() {
|
||||
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||
}
|
43
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
Normal file
43
vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func Enabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
func StderrIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func StdoutIsJournalStream() (bool, error) {
|
||||
return false, nil
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
@ -0,0 +1,145 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue