From: Paladz Date: Mon, 8 Jul 2019 02:54:24 +0000 (+0800) Subject: Small edit (#246) X-Git-Tag: v1.0.5~176 X-Git-Url: http://git.osdn.net/view?p=bytom%2Fvapor.git;a=commitdiff_plain;h=b72647ba6580dbedd557fa5b5b99a129438a6fc6 Small edit (#246) * replace the useless id generate * remove unused code --- diff --git a/account/accounts.go b/account/accounts.go index befbe509..e26833b0 100644 --- a/account/accounts.go +++ b/account/accounts.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/golang/groupcache/lru" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "github.com/vapor/blockchain/signers" @@ -147,7 +148,7 @@ func CreateAccount(xpubs []chainkd.XPub, quorum int, alias string, acctIndex uin return nil, errors.Wrap(err) } - id := signers.IDGenerate() + id := uuid.New().String() return &Account{Signer: signer, ID: id, Alias: strings.ToLower(strings.TrimSpace(alias))}, nil } diff --git a/blockchain/query/filter/expr.go b/blockchain/query/filter/expr.go deleted file mode 100644 index 143f0fd5..00000000 --- a/blockchain/query/filter/expr.go +++ /dev/null @@ -1,95 +0,0 @@ -package filter - -import "fmt" - -type expr interface { - String() string -} - -type binaryExpr struct { - op *binaryOp - l, r expr -} - -func (e binaryExpr) String() string { - return e.l.String() + " " + e.op.name + " " + e.r.String() -} - -type attrExpr struct { - attr string -} - -func (e attrExpr) String() string { - return e.attr -} - -type selectorExpr struct { - ident string - objExpr expr -} - -func (e selectorExpr) String() string { - return e.objExpr.String() + "." + e.ident -} - -type parenExpr struct { - inner expr -} - -func (e parenExpr) String() string { - return "(" + e.inner.String() + ")" -} - -type valueExpr struct { - typ token - value string -} - -func (e valueExpr) String() string { - return e.value -} - -type envExpr struct { - ident string - expr expr -} - -func (e envExpr) String() string { - return e.ident + "(" + e.expr.String() + ")" -} - -type placeholderExpr struct { - num int -} - -func (e placeholderExpr) String() string { - return fmt.Sprintf("$%d", e.num) -} - -// Type defines the value types in filter expressions. -type Type int - -//defines the value types in filter expressions. -const ( - Any Type = iota - Bool - String - Integer - Object -) - -func (t Type) String() string { - switch t { - case Any: - return "any" - case Bool: - return "bool" - case String: - return "string" - case Integer: - return "integer" - case Object: - return "object" - } - panic("unknown type") -} diff --git a/blockchain/query/filter/operators.go b/blockchain/query/filter/operators.go deleted file mode 100644 index a46be47b..00000000 --- a/blockchain/query/filter/operators.go +++ /dev/null @@ -1,13 +0,0 @@ -package filter - -type binaryOp struct { - precedence int - name string // AND, =, etc. - sqlOp string -} - -var binaryOps = map[string]*binaryOp{ - "OR": {1, "OR", "OR"}, - "AND": {2, "AND", "AND"}, - "=": {3, "=", "="}, -} diff --git a/blockchain/query/filter/parser.go b/blockchain/query/filter/parser.go deleted file mode 100644 index eb2fc18a..00000000 --- a/blockchain/query/filter/parser.go +++ /dev/null @@ -1,260 +0,0 @@ -package filter - -import ( - "fmt" - "strconv" - - "github.com/vapor/errors" -) - -// ErrBadFilter is returned from Parse when -// it encounters an invalid filter expression. -var ErrBadFilter = errors.New("invalid query filter") - -// Predicate represents a parsed filter predicate. -type Predicate struct { - expr expr - selectorTypes map[string]Type - Parameters int -} - -// String returns a cleaned, canonical representation of the -// predicate. -func (p Predicate) String() string { - if p.expr == nil { - return "" - } - return p.expr.String() -} - -// MarshalText implements the encoding.TextMarshaler interface and -// returns a cleaned, canonical representation of the predicate. -func (p Predicate) MarshalText() ([]byte, error) { - return []byte(p.expr.String()), nil -} - -// Parse parses a predicate and returns an internal representation of the -// predicate or an error if it fails to parse. -func Parse(predicate string, tbl *Table, vals []interface{}) (p Predicate, err error) { - expr, parser, err := parse(predicate) - if err != nil { - return p, errors.WithDetail(ErrBadFilter, err.Error()) - } - selectorTypes, err := typeCheck(expr, tbl, vals) - if err != nil { - return p, errors.WithDetail(ErrBadFilter, err.Error()) - } - - return Predicate{ - expr: expr, - selectorTypes: selectorTypes, - Parameters: parser.maxPlaceholder, - }, nil -} - -// Field is a type for simple expressions that simply access an attribute of -// the queried object. They're used for GROUP BYs. -type Field struct { - expr expr -} - -func (f Field) String() string { - return f.expr.String() -} - -// ParseField parses a field expression (either an attrExpr or a selectorExpr). -func ParseField(s string) (f Field, err error) { - expr, _, err := parse(s) - if err != nil { - return f, errors.WithDetail(ErrBadFilter, err.Error()) - } - if expr == nil { - return f, errors.WithDetail(ErrBadFilter, "empty field expression") - } - - switch expr.(type) { - case attrExpr, selectorExpr: - return Field{expr: expr}, nil - default: - return f, errors.WithDetailf(ErrBadFilter, "%q is not a valid field expression", s) - } -} - -func parse(exprString string) (expr expr, parser *parser, err error) { - defer func() { - r := recover() - if perr, ok := r.(parseError); ok { - err = perr - } else if r != nil { - panic(r) - } - }() - parser = newParser([]byte(exprString)) - - // An empty expression is a valid predicate. - if parser.tok == tokEOF { - return nil, parser, nil - } - - expr = parseExpr(parser) - parser.parseTok(tokEOF) - return expr, parser, err -} - -func newParser(src []byte) *parser { - p := new(parser) - p.scanner.init(src) - p.next() // advance onto the first input token - return p -} - -// The parser structure holds the parser's internal state. -type parser struct { - scanner scanner - - maxPlaceholder int - - // Current token - pos int // token position - tok token // one token look-ahead - lit string // token literal -} - -func determineBinaryOp(p *parser, minPrecedence int) (op *binaryOp, ok bool) { - op, ok = binaryOps[p.lit] - return op, ok && op.precedence >= minPrecedence -} - -// next advances to the next token. -func (p *parser) next() { - p.pos, p.tok, p.lit = p.scanner.Scan() -} - -func (p *parser) parseLit(lit string) { - if p.lit != lit { - p.errorf("got %s, expected %s", p.lit, lit) - } - p.next() -} - -func (p *parser) parseTok(tok token) { - if p.tok != tok { - p.errorf("got %s, expected %s", p.lit, tok.String()) - } - p.next() -} - -func parseExpr(p *parser) expr { - // Uses the precedence-climbing algorithm: - // https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method - expr := parsePrimaryExpr(p) - return parseExprCont(p, expr, 0) -} - -func parseExprCont(p *parser, lhs expr, minPrecedence int) expr { - for { - op, ok := determineBinaryOp(p, minPrecedence) - if !ok { - break - } - p.next() - - rhs := parsePrimaryExpr(p) - - for { - op2, ok := determineBinaryOp(p, op.precedence+1) - if !ok { - break - } - rhs = parseExprCont(p, rhs, op2.precedence) - } - lhs = binaryExpr{l: lhs, r: rhs, op: op} - } - return lhs -} - -func parsePrimaryExpr(p *parser) expr { - x := parseOperand(p) - for p.lit == "." { - x = parseSelectorExpr(p, x) - } - return x -} - -func parseOperand(p *parser) expr { - switch { - case p.lit == "(": - p.next() - expr := parseExpr(p) - p.parseLit(")") - return parenExpr{inner: expr} - case p.tok == tokString: - v := valueExpr{typ: p.tok, value: p.lit} - p.next() - return v - case p.tok == tokInteger: - // Parse the literal into an integer so that we store the string - // representation of the *decimal* value, never the hex. - integer, err := strconv.ParseInt(p.lit, 0, 64) - if err != nil { - // can't happen; scanner guarantees it - p.errorf("invalid integer: %q", p.lit) - } - v := valueExpr{typ: p.tok, value: strconv.Itoa(int(integer))} - p.next() - return v - case p.tok == tokPlaceholder: - num, err := strconv.Atoi(p.lit[1:]) - if err != nil || num <= 0 { - p.errorf("invalid placeholder: %q", p.lit) - } - v := placeholderExpr{num: num} - p.next() - - if num > p.maxPlaceholder { - p.maxPlaceholder = num - } - return v - default: - return parseEnvironmentExpr(p) - } -} - -func parseSelectorExpr(p *parser, objExpr expr) expr { - p.next() // move past the '.' - - ident := p.lit - p.parseTok(tokIdent) - return selectorExpr{ - ident: ident, - objExpr: objExpr, - } -} - -func parseEnvironmentExpr(p *parser) expr { - name := p.lit - p.parseTok(tokIdent) - if p.lit != "(" { - return attrExpr{attr: name} - } - p.next() - expr := parseExpr(p) - p.parseLit(")") - return envExpr{ - ident: name, - expr: expr, - } -} - -type parseError struct { - pos int - msg string -} - -func (err parseError) Error() string { - return fmt.Sprintf("col %d: %s", err.pos, err.msg) -} - -func (p *parser) errorf(format string, args ...interface{}) { - panic(parseError{pos: p.pos, msg: fmt.Sprintf(format, args...)}) -} diff --git a/blockchain/query/filter/scanner.go b/blockchain/query/filter/scanner.go deleted file mode 100644 index 7ef4eb7e..00000000 --- a/blockchain/query/filter/scanner.go +++ /dev/null @@ -1,220 +0,0 @@ -package filter - -import ( - "fmt" - "unicode" - "unicode/utf8" -) - -type token int - -const ( - tokInvalid token = iota - tokEOF - tokKeyword - tokIdent - tokString - tokInteger - tokPunct - tokPlaceholder -) - -func (t token) String() string { - switch t { - case tokInvalid: - return "invalid" - case tokEOF: - return "EOF" - case tokKeyword: - return "keyword" - case tokIdent: - return "identifier" - case tokString: - return "string" - case tokInteger: - return "integer" - case tokPunct: - return "punctuation" - case tokPlaceholder: - return "placeholder" - } - return "unknown token" -} - -// A scanner holds the scanner's internal state while processing -// a given text. -type scanner struct { - // immutable state - src []byte // source - - // scanning state - ch rune // current character - offset int // character offset - rdOffset int // reading offset (position after current character) -} - -func (s *scanner) init(src []byte) { - s.rdOffset = 0 - s.offset = -1 - s.src = src - s.next() // advance onto the first input rune -} - -const bom = 0xFEFF // byte order mark, always prohibited - -// next reads the next Unicode char into s.ch. -// s.ch < 0 means end-of-file. -func (s *scanner) next() { - if s.rdOffset < len(s.src) { - s.offset = s.rdOffset - r, w := rune(s.src[s.rdOffset]), 1 - switch { - case r == 0: - s.error(s.offset+1, "illegal character NUL") - case r >= utf8.RuneSelf: - // not ASCII - r, w = utf8.DecodeRune(s.src[s.rdOffset:]) - if r == utf8.RuneError && w == 1 { - s.error(s.offset, "illegal UTF-8 encoding") - } else if r == bom { - s.error(s.offset, "illegal byte order mark") - } - } - s.rdOffset += w - s.ch = r - } else { - s.offset = len(s.src) - s.ch = -1 // eof - } -} - -func (s *scanner) error(offs int, msg string) { - panic(parseError{pos: offs, msg: msg}) -} - -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) -} - -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) -} - -func (s *scanner) scanIdentifier() string { - offs := s.offset - for isLetter(s.ch) || isDigit(s.ch) { - s.next() - } - return string(s.src[offs:s.offset]) -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} - -func (s *scanner) scanMantissa(base int) { - for digitVal(s.ch) < base { - s.next() - } -} - -func (s *scanner) scanNumber() { - // digitVal(s.ch) < 10 - if s.ch == '0' { - // int - offs := s.offset - s.next() - if s.ch == 'x' || s.ch == 'X' { - // hexadecimal int - s.next() - s.scanMantissa(16) - if s.offset-offs <= 2 { - // only scanned "0x" or "0X" - s.error(offs, "illegal hexadecimal number") - } - } else if digitVal(s.ch) < 10 { - s.error(offs, "illegal leading 0 in number") - } - } else { - // decimal int - s.scanMantissa(10) - } -} - -func (s *scanner) scanString() { - // "'" opening already consumed - offs := s.offset - 1 - - for { - ch := s.ch - if ch < 0 { - s.error(offs, "string literal not terminated") - break - } - s.next() - if ch == '\'' { - break - } - if ch == '\\' { - s.error(offs, "illegal backslash in string literal") - } - } -} - -func (s *scanner) skipWhitespace() { - for s.ch == ' ' || s.ch == '\t' { - s.next() - } -} - -func (s *scanner) Scan() (pos int, tok token, lit string) { - s.skipWhitespace() - - // current token start - pos = s.offset - - // determine token value - switch ch := s.ch; { - case isLetter(ch): - lit = s.scanIdentifier() - switch lit { - case "AND", "OR": - tok = tokKeyword - default: - tok = tokIdent - } - return pos, tok, lit - case '0' <= ch && ch <= '9': - s.scanNumber() - tok = tokInteger - default: - s.next() // always make progress - switch ch { - case -1: - return pos, tokEOF, "" - case '\'': - tok = tokString - s.scanString() - case '.', '(', ')', '=': - tok = tokPunct - case '$': - s.scanMantissa(10) - if s.offset-pos <= 1 { - s.error(pos, "illegal $ character") - } - tok = tokPlaceholder - default: - s.error(pos, fmt.Sprintf("illegal character %q", ch)) - } - } - lit = string(s.src[pos:s.offset]) - return -} diff --git a/blockchain/query/filter/typecheck.go b/blockchain/query/filter/typecheck.go deleted file mode 100644 index 301e37f3..00000000 --- a/blockchain/query/filter/typecheck.go +++ /dev/null @@ -1,241 +0,0 @@ -package filter - -import ( - "fmt" - - "github.com/vapor/errors" -) - -//Column describe a column -type Column struct { - Name string - Type Type -} - -//Table describe a table -type Table struct { - Name string - Alias string - Columns map[string]*Column - ForeignKeys map[string]*ForeignKey -} - -//ForeignKey describe a foreign key -type ForeignKey struct { - Table *Table - LocalColumn string - ForeignColumn string -} - -func isType(got Type, want Type) bool { - return got == want || got == Any -} - -func knownType(t Type) bool { - return t == Bool || t == String || t == Integer || t == Object -} - -func valueTypes(vals []interface{}) ([]Type, error) { - valTypes := make([]Type, len(vals)) - for i, val := range vals { - switch val.(type) { - case int, uint, int32, uint32, int64, uint64: - valTypes[i] = Integer - case string: - valTypes[i] = String - case bool: - valTypes[i] = Bool - default: - return nil, fmt.Errorf("unsupported value type %T", val) - } - } - return valTypes, nil -} - -// typeCheck will statically type check expr with vals as the parameters -// and using tbl to determine available attributes and environments. It -// returns the inferred types of arbitrary json keys as a map. -func typeCheck(expr expr, tbl *Table, vals []interface{}) (map[string]Type, error) { - valTypes, err := valueTypes(vals) - if err != nil { - return nil, err - } - selectorTypes := make(map[string]Type) - typ, err := typeCheckExpr(expr, tbl, valTypes, selectorTypes) - if err != nil { - return nil, err - } - ok, err := assertType(expr, typ, Bool, selectorTypes) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("filter predicate must evaluate to bool, got %s", typ) - } - return selectorTypes, nil -} - -func typeCheckExpr(expr expr, tbl *Table, valTypes []Type, selectorTypes map[string]Type) (typ Type, err error) { - if expr == nil { // no expr is a valid, bool type - return Bool, nil - } - - switch e := expr.(type) { - case parenExpr: - return typeCheckExpr(e.inner, tbl, valTypes, selectorTypes) - case binaryExpr: - leftTyp, err := typeCheckExpr(e.l, tbl, valTypes, selectorTypes) - if err != nil { - return leftTyp, err - } - rightTyp, err := typeCheckExpr(e.r, tbl, valTypes, selectorTypes) - if err != nil { - return rightTyp, err - } - - switch e.op.name { - case "OR", "AND": - ok, err := assertType(e.l, leftTyp, Bool, selectorTypes) - if err != nil { - return typ, err - } - if !ok { - return typ, fmt.Errorf("%s expects bool operands", e.op.name) - } - - ok, err = assertType(e.r, rightTyp, Bool, selectorTypes) - if err != nil { - return typ, err - } - if !ok { - return typ, fmt.Errorf("%s expects bool operands", e.op.name) - } - return Bool, nil - case "=": - // The = operand requires left and right types to be equal. If - // one of our types is known but the other is not, we need to - // coerce the untyped one to a matching type. - if !knownType(leftTyp) && knownType(rightTyp) { - err := setType(e.l, rightTyp, selectorTypes) - if err != nil { - return leftTyp, err - } - leftTyp = rightTyp - } - if !knownType(rightTyp) && knownType(leftTyp) { - err := setType(e.r, leftTyp, selectorTypes) - if err != nil { - return leftTyp, err - } - rightTyp = leftTyp - } - if !isType(leftTyp, String) && !isType(leftTyp, Integer) { - return typ, fmt.Errorf("%s expects integer or string operands", e.op.name) - } - if !isType(rightTyp, String) && !isType(rightTyp, Integer) { - return typ, fmt.Errorf("%s expects integer or string operands", e.op.name) - } - if knownType(rightTyp) && knownType(leftTyp) && leftTyp != rightTyp { - return typ, fmt.Errorf("%s expects operands of matching types", e.op.name) - } - return Bool, nil - default: - panic(fmt.Errorf("unsupported operator: %s", e.op.name)) - } - case placeholderExpr: - if len(valTypes) == 0 { - return Any, nil - } - if e.num <= 0 || e.num > len(valTypes) { - return typ, fmt.Errorf("unbound placeholder: $%d", e.num) - } - return valTypes[e.num-1], nil - case attrExpr: - col, ok := tbl.Columns[e.attr] - if !ok { - return typ, fmt.Errorf("invalid attribute: %s", e.attr) - } - return col.Type, nil - case valueExpr: - switch e.typ { - case tokString: - return String, nil - case tokInteger: - return Integer, nil - default: - panic(fmt.Errorf("value expr with invalid token type: %s", e.typ)) - } - case selectorExpr: - typ, err = typeCheckExpr(e.objExpr, tbl, valTypes, selectorTypes) - if err != nil { - return typ, err - } - ok, err := assertType(e.objExpr, typ, Object, selectorTypes) - if err != nil { - return typ, err - } - if !ok { - return typ, errors.New("selector `.` can only be used on objects") - } - - // Unfortunately, we can't know the type of the field within the - // object yet. Depending on the context, we might be able to assign it - // a type later in setType. - return Any, nil - case envExpr: - fk, ok := tbl.ForeignKeys[e.ident] - if !ok { - return typ, fmt.Errorf("invalid environment `%s`", e.ident) - } - typ, err = typeCheckExpr(e.expr, fk.Table, valTypes, selectorTypes) - if err != nil { - return typ, err - } - ok, err = assertType(e.expr, typ, Bool, selectorTypes) - if err != nil { - return typ, err - } - if !ok { - return typ, errors.New(e.ident + "(...) body must have type bool") - } - return Bool, nil - default: - panic(fmt.Errorf("unrecognized expr type %T", expr)) - } -} - -func assertType(expr expr, got, want Type, selectorTypes map[string]Type) (bool, error) { - if !isType(got, want) { // type does not match - return false, nil - } - if got != Any { // matching type *and* it's a concrete type - return true, nil - } - // got is `Any`. we should restrict expr to be `want`. - err := setType(expr, want, selectorTypes) - return true, err -} - -func setType(expr expr, typ Type, selectorTypes map[string]Type) error { - switch e := expr.(type) { - case parenExpr: - return setType(e.inner, typ, selectorTypes) - case placeholderExpr: - // This is a special case for when we parse a txfeed filter at - // txfeed creation time. We don't have access to concrete values - // yet, so the parameters are untyped. - return nil - case selectorExpr: - /*path := strings.Join(jsonbPath(expr), ".") - boundTyp, ok := selectorTypes[path] - if ok && boundTyp != typ { - return fmt.Errorf("%q used as both %s and %s", path, boundTyp, typ) - } - selectorTypes[path] = typ*/ - return nil - default: - // This should be impossible because all other expressions are - // strongly typed. - panic(fmt.Errorf("unexpected setType on %T", expr)) - } -} diff --git a/blockchain/query/transactions.go b/blockchain/query/transactions.go deleted file mode 100644 index 8b0c091e..00000000 --- a/blockchain/query/transactions.go +++ /dev/null @@ -1,30 +0,0 @@ -package query - -import ( - "github.com/vapor/blockchain/query/filter" - "github.com/vapor/errors" -) - -var filterTable = filter.Table{ - Name: "annotated_txs", - Alias: "txs", - Columns: map[string]*filter.Column{ - "asset_id": {Name: "assetid", Type: filter.String}, - "amount_lower_limit": {Name: "amountlower", Type: filter.Integer}, - "amount_upper_limit": {Name: "amountupper", Type: filter.Integer}, - "trans_type": {Name: "transtype", Type: filter.String}, - }, -} - -var ( - //ErrBadAfter means malformed pagination parameter. - ErrBadAfter = errors.New("malformed pagination parameter after") - //ErrParameterCountMismatch means wrong number of parameters to query. - ErrParameterCountMismatch = errors.New("wrong number of parameters to query") -) - -//ValidateTransactionFilter verify txfeed filter validity. -func ValidateTransactionFilter(filt string) error { - _, err := filter.Parse(filt, &filterTable, nil) - return err -} diff --git a/blockchain/signers/idgenerate.go b/blockchain/signers/idgenerate.go deleted file mode 100644 index 61d5546b..00000000 --- a/blockchain/signers/idgenerate.go +++ /dev/null @@ -1,41 +0,0 @@ -package signers - -import ( - "encoding/binary" - "sync/atomic" - "time" - - "github.com/vapor/encoding/base32" -) - -//1 0xff { - panic("invalid padding") - } - - for i := 0; i < len(enc.encode); i++ { - if rune(enc.encode[i]) == padding { - panic("padding contained in alphabet") - } - } - - enc.padChar = padding - return &enc -} - -/* - * Encoder - */ - -// Encode encodes src using the encoding enc, writing -// EncodedLen(len(src)) bytes to dst. -// -// The encoding pads the output to a multiple of 8 bytes, -// so Encode is not appropriate for use on individual blocks -// of a large data stream. Use NewEncoder() instead. -func (enc *Encoding) Encode(dst, src []byte) { - if len(src) == 0 { - return - } - - for len(src) > 0 { - var b [8]byte - - // Unpack 8x 5-bit source blocks into a 5 byte - // destination quantum - switch len(src) { - default: - b[7] = src[4] & 0x1F - b[6] = src[4] >> 5 - fallthrough - case 4: - b[6] |= (src[3] << 3) & 0x1F - b[5] = (src[3] >> 2) & 0x1F - b[4] = src[3] >> 7 - fallthrough - case 3: - b[4] |= (src[2] << 1) & 0x1F - b[3] = (src[2] >> 4) & 0x1F - fallthrough - case 2: - b[3] |= (src[1] << 4) & 0x1F - b[2] = (src[1] >> 1) & 0x1F - b[1] = (src[1] >> 6) & 0x1F - fallthrough - case 1: - b[1] |= (src[0] << 2) & 0x1F - b[0] = src[0] >> 3 - } - - // Encode 5-bit blocks using the base32 alphabet - size := len(dst) - if size >= 8 { - // Common case, unrolled for extra performance - dst[0] = enc.encode[b[0]] - dst[1] = enc.encode[b[1]] - dst[2] = enc.encode[b[2]] - dst[3] = enc.encode[b[3]] - dst[4] = enc.encode[b[4]] - dst[5] = enc.encode[b[5]] - dst[6] = enc.encode[b[6]] - dst[7] = enc.encode[b[7]] - } else { - for i := 0; i < size; i++ { - dst[i] = enc.encode[b[i]] - } - } - - // Pad the final quantum - if len(src) < 5 { - if enc.padChar == NoPadding { - break - } - - dst[7] = byte(enc.padChar) - if len(src) < 4 { - dst[6] = byte(enc.padChar) - dst[5] = byte(enc.padChar) - if len(src) < 3 { - dst[4] = byte(enc.padChar) - if len(src) < 2 { - dst[3] = byte(enc.padChar) - dst[2] = byte(enc.padChar) - } - } - } - - break - } - - src = src[5:] - dst = dst[8:] - } -} - -// EncodeToString returns the base32 encoding of src. -func (enc *Encoding) EncodeToString(src []byte) string { - buf := make([]byte, enc.EncodedLen(len(src))) - enc.Encode(buf, src) - return string(buf) -} - -type encoder struct { - err error - enc *Encoding - w io.Writer - buf [5]byte // buffered data waiting to be encoded - nbuf int // number of bytes in buf - out [1024]byte // output buffer -} - -func (e *encoder) Write(p []byte) (n int, err error) { - if e.err != nil { - return 0, e.err - } - - // Leading fringe. - if e.nbuf > 0 { - var i int - for i = 0; i < len(p) && e.nbuf < 5; i++ { - e.buf[e.nbuf] = p[i] - e.nbuf++ - } - n += i - p = p[i:] - if e.nbuf < 5 { - return - } - e.enc.Encode(e.out[0:], e.buf[0:]) - if _, e.err = e.w.Write(e.out[0:8]); e.err != nil { - return n, e.err - } - e.nbuf = 0 - } - - // Large interior chunks. - for len(p) >= 5 { - nn := len(e.out) / 8 * 5 - if nn > len(p) { - nn = len(p) - nn -= nn % 5 - } - e.enc.Encode(e.out[0:], p[0:nn]) - if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil { - return n, e.err - } - n += nn - p = p[nn:] - } - - // Trailing fringe. - for i := 0; i < len(p); i++ { - e.buf[i] = p[i] - } - e.nbuf = len(p) - n += len(p) - return -} - -// Close flushes any pending output from the encoder. -// It is an error to call Write after calling Close. -func (e *encoder) Close() error { - // If there's anything left in the buffer, flush it out - if e.err == nil && e.nbuf > 0 { - e.enc.Encode(e.out[0:], e.buf[0:e.nbuf]) - e.nbuf = 0 - _, e.err = e.w.Write(e.out[0:8]) - } - return e.err -} - -// NewEncoder returns a new base32 stream encoder. Data written to -// the returned writer will be encoded using enc and then written to w. -// Base32 encodings operate in 5-byte blocks; when finished -// writing, the caller must Close the returned encoder to flush any -// partially written blocks. -func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser { - return &encoder{enc: enc, w: w} -} - -// EncodedLen returns the length in bytes of the base32 encoding -// of an input buffer of length n. -func (enc *Encoding) EncodedLen(n int) int { - if enc.padChar == NoPadding { - return (n*8 + 4) / 5 - } - return (n + 4) / 5 * 8 -} - -/* - * Decoder - */ - -type CorruptInputError int64 - -func (e CorruptInputError) Error() string { - return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10) -} - -// decode is like Decode but returns an additional 'end' value, which -// indicates if end-of-message padding was encountered and thus any -// additional data is an error. This method assumes that src has been -// stripped of all supported whitespace ('\r' and '\n'). -func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) { - olen := len(src) - for len(src) > 0 && !end { - // Decode quantum using the base32 alphabet - var dbuf [8]byte - dlen := 8 - - for j := 0; j < 8; { - - // We have reached the end and are missing padding - if len(src) == 0 && enc.padChar != NoPadding { - return n, false, CorruptInputError(olen - len(src) - j) - } - - // We have reached the end and are not expecing any padding - if len(src) == 0 && enc.padChar == NoPadding { - dlen, end = j, true - break - } - - in := src[0] - src = src[1:] - if in == byte(enc.padChar) && j >= 2 && len(src) < 8 { - // We've reached the end and there's padding - if len(src)+j < 8-1 { - // not enough padding - return n, false, CorruptInputError(olen) - } - for k := 0; k < 8-1-j; k++ { - if len(src) > k && src[k] != byte(enc.padChar) { - // incorrect padding - return n, false, CorruptInputError(olen - len(src) + k - 1) - } - } - dlen, end = j, true - // 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not - // valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing - // the five valid padding lengths, and Section 9 "Illustrations and - // Examples" for an illustration for how the 1st, 3rd and 6th base32 - // src bytes do not yield enough information to decode a dst byte. - if dlen == 1 || dlen == 3 || dlen == 6 { - return n, false, CorruptInputError(olen - len(src) - 1) - } - break - } - dbuf[j] = enc.decodeMap[in] - if dbuf[j] == 0xFF { - return n, false, CorruptInputError(olen - len(src) - 1) - } - j++ - } - - // Pack 8x 5-bit source blocks into 5 byte destination - // quantum - switch dlen { - case 8: - dst[4] = dbuf[6]<<5 | dbuf[7] - fallthrough - case 7: - dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3 - fallthrough - case 5: - dst[2] = dbuf[3]<<4 | dbuf[4]>>1 - fallthrough - case 4: - dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4 - fallthrough - case 2: - dst[0] = dbuf[0]<<3 | dbuf[1]>>2 - } - - if !end { - dst = dst[5:] - } - - switch dlen { - case 2: - n += 1 - case 4: - n += 2 - case 5: - n += 3 - case 7: - n += 4 - case 8: - n += 5 - } - } - return n, end, nil -} - -// Decode decodes src using the encoding enc. It writes at most -// DecodedLen(len(src)) bytes to dst and returns the number of bytes -// written. If src contains invalid base32 data, it will return the -// number of bytes successfully written and CorruptInputError. -// New line characters (\r and \n) are ignored. -func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { - src = bytes.Map(removeNewlinesMapper, src) - n, _, err = enc.decode(dst, src) - return -} - -// DecodeString returns the bytes represented by the base32 string s. -func (enc *Encoding) DecodeString(s string) ([]byte, error) { - s = strings.Map(removeNewlinesMapper, s) - dbuf := make([]byte, enc.DecodedLen(len(s))) - n, _, err := enc.decode(dbuf, []byte(s)) - return dbuf[:n], err -} - -type decoder struct { - err error - enc *Encoding - r io.Reader - end bool // saw end of message - buf [1024]byte // leftover input - nbuf int - out []byte // leftover decoded output - outbuf [1024 / 8 * 5]byte -} - -func readEncodedData(r io.Reader, buf []byte, min int) (n int, err error) { - for n < min && err == nil { - var nn int - nn, err = r.Read(buf[n:]) - n += nn - } - if n < min && n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -func (d *decoder) Read(p []byte) (n int, err error) { - // Use leftover decoded output from last read. - if len(d.out) > 0 { - n = copy(p, d.out) - d.out = d.out[n:] - if len(d.out) == 0 { - return n, d.err - } - return n, nil - } - - if d.err != nil { - return 0, d.err - } - - // Read a chunk. - nn := len(p) / 5 * 8 - if nn < 8 { - nn = 8 - } - if nn > len(d.buf) { - nn = len(d.buf) - } - - nn, d.err = readEncodedData(d.r, d.buf[d.nbuf:nn], 8-d.nbuf) - d.nbuf += nn - if d.nbuf < 8 { - return 0, d.err - } - - // Decode chunk into p, or d.out and then p if p is too small. - nr := d.nbuf / 8 * 8 - nw := d.nbuf / 8 * 5 - if nw > len(p) { - nw, d.end, err = d.enc.decode(d.outbuf[0:], d.buf[0:nr]) - d.out = d.outbuf[0:nw] - n = copy(p, d.out) - d.out = d.out[n:] - } else { - n, d.end, err = d.enc.decode(p, d.buf[0:nr]) - } - d.nbuf -= nr - for i := 0; i < d.nbuf; i++ { - d.buf[i] = d.buf[i+nr] - } - - if err != nil && (d.err == nil || d.err == io.EOF) { - d.err = err - } - - if len(d.out) > 0 { - // We cannot return all the decoded bytes to the caller in this - // invocation of Read, so we return a nil error to ensure that Read - // will be called again. The error stored in d.err, if any, will be - // returned with the last set of decoded bytes. - return n, nil - } - - return n, d.err -} - -type newlineFilteringReader struct { - wrapped io.Reader -} - -func (r *newlineFilteringReader) Read(p []byte) (int, error) { - n, err := r.wrapped.Read(p) - for n > 0 { - offset := 0 - for i, b := range p[0:n] { - if b != '\r' && b != '\n' { - if i != offset { - p[offset] = b - } - offset++ - } - } - if err != nil || offset > 0 { - return offset, err - } - // Previous buffer entirely whitespace, read again - n, err = r.wrapped.Read(p) - } - return n, err -} - -// NewDecoder constructs a new base32 stream decoder. -func NewDecoder(enc *Encoding, r io.Reader) io.Reader { - return &decoder{enc: enc, r: &newlineFilteringReader{r}} -} - -// DecodedLen returns the maximum length in bytes of the decoded data -// corresponding to n bytes of base32-encoded data. -func (enc *Encoding) DecodedLen(n int) int { - if enc.padChar == NoPadding { - return n * 5 / 8 - } - - return n / 8 * 5 -} diff --git a/encoding/base32/base32_test.go b/encoding/base32/base32_test.go deleted file mode 100644 index 50837798..00000000 --- a/encoding/base32/base32_test.go +++ /dev/null @@ -1,580 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package base32 - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -type testpair struct { - decoded, encoded string -} - -var pairs = []testpair{ - // RFC 4648 examples - {"", ""}, - {"f", "MY======"}, - {"fo", "MZXQ===="}, - {"foo", "MZXW6==="}, - {"foob", "MZXW6YQ="}, - {"fooba", "MZXW6YTB"}, - {"foobar", "MZXW6YTBOI======"}, - - // Wikipedia examples, converted to base32 - {"sure.", "ON2XEZJO"}, - {"sure", "ON2XEZI="}, - {"sur", "ON2XE==="}, - {"su", "ON2Q===="}, - {"leasure.", "NRSWC43VOJSS4==="}, - {"easure.", "MVQXG5LSMUXA===="}, - {"asure.", "MFZXK4TFFY======"}, - {"sure.", "ON2XEZJO"}, -} - -var bigtest = testpair{ - "Twas brillig, and the slithy toves", - "KR3WC4ZAMJZGS3DMNFTSYIDBNZSCA5DIMUQHG3DJORUHSIDUN53GK4Y=", -} - -func testEqual(t *testing.T, msg string, args ...interface{}) bool { - //t.Helper() - if args[len(args)-2] != args[len(args)-1] { - t.Errorf(msg, args...) - return false - } - return true -} - -func TestEncode(t *testing.T) { - for _, p := range pairs { - got := StdEncoding.EncodeToString([]byte(p.decoded)) - testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded) - } -} - -func TestEncoder(t *testing.T) { - for _, p := range pairs { - bb := &bytes.Buffer{} - encoder := NewEncoder(StdEncoding, bb) - encoder.Write([]byte(p.decoded)) - encoder.Close() - testEqual(t, "Encode(%q) = %q, want %q", p.decoded, bb.String(), p.encoded) - } -} - -func TestEncoderBuffering(t *testing.T) { - input := []byte(bigtest.decoded) - for bs := 1; bs <= 12; bs++ { - bb := &bytes.Buffer{} - encoder := NewEncoder(StdEncoding, bb) - for pos := 0; pos < len(input); pos += bs { - end := pos + bs - if end > len(input) { - end = len(input) - } - n, err := encoder.Write(input[pos:end]) - testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil)) - testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos) - } - err := encoder.Close() - testEqual(t, "Close gave error %v, want %v", err, error(nil)) - testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, bb.String(), bigtest.encoded) - } -} - -func TestDecode(t *testing.T) { - for _, p := range pairs { - dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) - count, end, err := StdEncoding.decode(dbuf, []byte(p.encoded)) - testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil)) - testEqual(t, "Decode(%q) = length %v, want %v", p.encoded, count, len(p.decoded)) - if len(p.encoded) > 0 { - testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '=')) - } - testEqual(t, "Decode(%q) = %q, want %q", p.encoded, - string(dbuf[0:count]), - p.decoded) - - dbuf, err = StdEncoding.DecodeString(p.encoded) - testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil)) - testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded) - } -} - -func TestDecoder(t *testing.T) { - for _, p := range pairs { - decoder := NewDecoder(StdEncoding, strings.NewReader(p.encoded)) - dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) - count, err := decoder.Read(dbuf) - if err != nil && err != io.EOF { - t.Fatal("Read failed", err) - } - testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded)) - testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded) - if err != io.EOF { - count, err = decoder.Read(dbuf) - } - testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF) - } -} - -type badReader struct { - data []byte - errs []error - called int - limit int -} - -// Populates p with data, returns a count of the bytes written and an -// error. The error returned is taken from badReader.errs, with each -// invocation of Read returning the next error in this slice, or io.EOF, -// if all errors from the slice have already been returned. The -// number of bytes returned is determined by the size of the input buffer -// the test passes to decoder.Read and will be a multiple of 8, unless -// badReader.limit is non zero. -func (b *badReader) Read(p []byte) (int, error) { - lim := len(p) - if b.limit != 0 && b.limit < lim { - lim = b.limit - } - if len(b.data) < lim { - lim = len(b.data) - } - for i := range p[:lim] { - p[i] = b.data[i] - } - b.data = b.data[lim:] - err := io.EOF - if b.called < len(b.errs) { - err = b.errs[b.called] - } - b.called++ - return lim, err -} - -// TestIssue20044 tests that decoder.Read behaves correctly when the caller -// supplied reader returns an error. -func TestIssue20044(t *testing.T) { - badErr := errors.New("bad reader error") - testCases := []struct { - r badReader - res string - err error - dbuflen int - }{ - // Check valid input data accompanied by an error is processed and the error is propagated. - {r: badReader{data: []byte("MY======"), errs: []error{badErr}}, - res: "f", err: badErr}, - // Check a read error accompanied by input data consisting of newlines only is propagated. - {r: badReader{data: []byte("\n\n\n\n\n\n\n\n"), errs: []error{badErr, nil}}, - res: "", err: badErr}, - // Reader will be called twice. The first time it will return 8 newline characters. The - // second time valid base32 encoded data and an error. The data should be decoded - // correctly and the error should be propagated. - {r: badReader{data: []byte("\n\n\n\n\n\n\n\nMY======"), errs: []error{nil, badErr}}, - res: "f", err: badErr, dbuflen: 8}, - // Reader returns invalid input data (too short) and an error. Verify the reader - // error is returned. - {r: badReader{data: []byte("MY====="), errs: []error{badErr}}, - res: "", err: badErr}, - // Reader returns invalid input data (too short) but no error. Verify io.ErrUnexpectedEOF - // is returned. - {r: badReader{data: []byte("MY====="), errs: []error{nil}}, - res: "", err: io.ErrUnexpectedEOF}, - // Reader returns invalid input data and an error. Verify the reader and not the - // decoder error is returned. - {r: badReader{data: []byte("Ma======"), errs: []error{badErr}}, - res: "", err: badErr}, - // Reader returns valid data and io.EOF. Check data is decoded and io.EOF is propagated. - {r: badReader{data: []byte("MZXW6YTB"), errs: []error{io.EOF}}, - res: "fooba", err: io.EOF}, - // Check errors are properly reported when decoder.Read is called multiple times. - // decoder.Read will be called 8 times, badReader.Read will be called twice, returning - // valid data both times but an error on the second call. - {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, badErr}}, - res: "leasure.", err: badErr, dbuflen: 1}, - // Check io.EOF is properly reported when decoder.Read is called multiple times. - // decoder.Read will be called 8 times, badReader.Read will be called twice, returning - // valid data both times but io.EOF on the second call. - {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, io.EOF}}, - res: "leasure.", err: io.EOF, dbuflen: 1}, - // The following two test cases check that errors are propagated correctly when more than - // 8 bytes are read at a time. - {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{io.EOF}}, - res: "leasure.", err: io.EOF, dbuflen: 11}, - {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{badErr}}, - res: "leasure.", err: badErr, dbuflen: 11}, - // Check that errors are correctly propagated when the reader returns valid bytes in - // groups that are not divisible by 8. The first read will return 11 bytes and no - // error. The second will return 7 and an error. The data should be decoded correctly - // and the error should be propagated. - {r: badReader{data: []byte("NRSWC43VOJSS4==="), errs: []error{nil, badErr}, limit: 11}, - res: "leasure.", err: badErr}, - } - - for _, tc := range testCases { - input := tc.r.data - decoder := NewDecoder(StdEncoding, &tc.r) - var dbuflen int - if tc.dbuflen > 0 { - dbuflen = tc.dbuflen - } else { - dbuflen = StdEncoding.DecodedLen(len(input)) - } - dbuf := make([]byte, dbuflen) - var err error - var res []byte - for err == nil { - var n int - n, err = decoder.Read(dbuf) - if n > 0 { - res = append(res, dbuf[:n]...) - } - } - - testEqual(t, "Decoding of %q = %q, want %q", string(input), string(res), tc.res) - testEqual(t, "Decoding of %q err = %v, expected %v", string(input), err, tc.err) - } -} - -// TestDecoderError verifies decode errors are propagated when there are no read -// errors. -func TestDecoderError(t *testing.T) { - for _, readErr := range []error{io.EOF, nil} { - input := "MZXW6YTb" - dbuf := make([]byte, StdEncoding.DecodedLen(len(input))) - br := badReader{data: []byte(input), errs: []error{readErr}} - decoder := NewDecoder(StdEncoding, &br) - n, err := decoder.Read(dbuf) - testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) - if _, ok := err.(CorruptInputError); !ok { - t.Errorf("Corrupt input error expected. Found %T", err) - } - } -} - -// TestReaderEOF ensures decoder.Read behaves correctly when input data is -// exhausted. -func TestReaderEOF(t *testing.T) { - for _, readErr := range []error{io.EOF, nil} { - input := "MZXW6YTB" - br := badReader{data: []byte(input), errs: []error{nil, readErr}} - decoder := NewDecoder(StdEncoding, &br) - dbuf := make([]byte, StdEncoding.DecodedLen(len(input))) - n, err := decoder.Read(dbuf) - testEqual(t, "Decoding of %q err = %v, expected %v", string(input), err, error(nil)) - n, err = decoder.Read(dbuf) - testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) - testEqual(t, "Read after EOF, err = %v, expected %v", err, io.EOF) - n, err = decoder.Read(dbuf) - testEqual(t, "Read after EOF, n = %d, expected %d", n, 0) - testEqual(t, "Read after EOF, err = %v, expected %v", err, io.EOF) - } -} - -func TestDecoderBuffering(t *testing.T) { - for bs := 1; bs <= 12; bs++ { - decoder := NewDecoder(StdEncoding, strings.NewReader(bigtest.encoded)) - buf := make([]byte, len(bigtest.decoded)+12) - var total int - var n int - var err error - for total = 0; total < len(bigtest.decoded) && err == nil; { - n, err = decoder.Read(buf[total : total+bs]) - total += n - } - if err != nil && err != io.EOF { - t.Errorf("Read from %q at pos %d = %d, unexpected error %v", bigtest.encoded, total, n, err) - } - testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded) - } -} - -func TestDecodeCorrupt(t *testing.T) { - testCases := []struct { - input string - offset int // -1 means no corruption. - }{ - {"", -1}, - {"!!!!", 0}, - {"x===", 0}, - {"AA=A====", 2}, - {"AAA=AAAA", 3}, - {"MMMMMMMMM", 8}, - {"MMMMMM", 0}, - {"A=", 1}, - {"AA=", 3}, - {"AA==", 4}, - {"AA===", 5}, - {"AAAA=", 5}, - {"AAAA==", 6}, - {"AAAAA=", 6}, - {"AAAAA==", 7}, - {"A=======", 1}, - {"AA======", -1}, - {"AAA=====", 3}, - {"AAAA====", -1}, - {"AAAAA===", -1}, - {"AAAAAA==", 6}, - {"AAAAAAA=", -1}, - {"AAAAAAAA", -1}, - } - for _, tc := range testCases { - dbuf := make([]byte, StdEncoding.DecodedLen(len(tc.input))) - _, err := StdEncoding.Decode(dbuf, []byte(tc.input)) - if tc.offset == -1 { - if err != nil { - t.Error("Decoder wrongly detected corruption in", tc.input) - } - continue - } - switch err := err.(type) { - case CorruptInputError: - testEqual(t, "Corruption in %q at offset %v, want %v", tc.input, int(err), tc.offset) - default: - t.Error("Decoder failed to detect corruption in", tc) - } - } -} - -func TestBig(t *testing.T) { - n := 3*1000 + 1 - raw := make([]byte, n) - const alpha = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - for i := 0; i < n; i++ { - raw[i] = alpha[i%len(alpha)] - } - encoded := new(bytes.Buffer) - w := NewEncoder(StdEncoding, encoded) - nn, err := w.Write(raw) - if nn != n || err != nil { - t.Fatalf("Encoder.Write(raw) = %d, %v want %d, nil", nn, err, n) - } - err = w.Close() - if err != nil { - t.Fatalf("Encoder.Close() = %v want nil", err) - } - decoded, err := ioutil.ReadAll(NewDecoder(StdEncoding, encoded)) - if err != nil { - t.Fatalf("ioutil.ReadAll(NewDecoder(...)): %v", err) - } - - if !bytes.Equal(raw, decoded) { - var i int - for i = 0; i < len(decoded) && i < len(raw); i++ { - if decoded[i] != raw[i] { - break - } - } - t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i) - } -} - -func testStringEncoding(t *testing.T, expected string, examples []string) { - for _, e := range examples { - buf, err := StdEncoding.DecodeString(e) - if err != nil { - t.Errorf("Decode(%q) failed: %v", e, err) - continue - } - if s := string(buf); s != expected { - t.Errorf("Decode(%q) = %q, want %q", e, s, expected) - } - } -} - -func TestNewLineCharacters(t *testing.T) { - // Each of these should decode to the string "sure", without errors. - examples := []string{ - "ON2XEZI=", - "ON2XEZI=\r", - "ON2XEZI=\n", - "ON2XEZI=\r\n", - "ON2XEZ\r\nI=", - "ON2X\rEZ\nI=", - "ON2X\nEZ\rI=", - "ON2XEZ\nI=", - "ON2XEZI\n=", - } - testStringEncoding(t, "sure", examples) - - // Each of these should decode to the string "foobar", without errors. - examples = []string{ - "MZXW6YTBOI======", - "MZXW6YTBOI=\r\n=====", - } - testStringEncoding(t, "foobar", examples) -} - -func TestDecoderIssue4779(t *testing.T) { - encoded := `JRXXEZLNEBUXA43VNUQGI33MN5ZCA43JOQQGC3LFOQWCAY3PNZZWKY3UMV2HK4 -RAMFSGS4DJONUWG2LOM4QGK3DJOQWCA43FMQQGI3YKMVUXK43NN5SCA5DFNVYG64RANFXGG2LENFSH -K3TUEB2XIIDMMFRG64TFEBSXIIDEN5WG64TFEBWWCZ3OMEQGC3DJOF2WCLRAKV2CAZLONFWQUYLEEB -WWS3TJNUQHMZLONFQW2LBAOF2WS4ZANZXXG5DSOVSCAZLYMVZGG2LUMF2GS33OEB2WY3DBNVRW6IDM -MFRG64TJOMQG42LTNEQHK5AKMFWGS4LVNFYCAZLYEBSWCIDDN5WW233EN4QGG33OONSXC5LBOQXCAR -DVNFZSAYLVORSSA2LSOVZGKIDEN5WG64RANFXAU4TFOBZGK2DFNZSGK4TJOQQGS3RAOZXWY5LQORQX -IZJAOZSWY2LUEBSXG43FEBRWS3DMOVWSAZDPNRXXEZJAMV2SAZTVM5UWC5BANZ2WY3DBBJYGC4TJMF -2HK4ROEBCXQY3FOB2GK5LSEBZWS3TUEBXWGY3BMVRWC5BAMN2XA2LEMF2GC5BANZXW4IDQOJXWSZDF -NZ2CYIDTOVXHIIDJNYFGG5LMOBQSA4LVNEQG6ZTGNFRWSYJAMRSXGZLSOVXHIIDNN5WGY2LUEBQW42 -LNEBUWIIDFON2CA3DBMJXXE5LNFY== -====` - encodedShort := strings.Replace(encoded, "\n", "", -1) - - dec := NewDecoder(StdEncoding, strings.NewReader(encoded)) - res1, err := ioutil.ReadAll(dec) - if err != nil { - t.Errorf("ReadAll failed: %v", err) - } - - dec = NewDecoder(StdEncoding, strings.NewReader(encodedShort)) - var res2 []byte - res2, err = ioutil.ReadAll(dec) - if err != nil { - t.Errorf("ReadAll failed: %v", err) - } - - if !bytes.Equal(res1, res2) { - t.Error("Decoded results not equal") - } -} - -func BenchmarkEncodeToString(b *testing.B) { - data := make([]byte, 8192) - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - StdEncoding.EncodeToString(data) - } -} - -func BenchmarkDecodeString(b *testing.B) { - data := StdEncoding.EncodeToString(make([]byte, 8192)) - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - StdEncoding.DecodeString(data) - } -} - -func TestWithCustomPadding(t *testing.T) { - for _, testcase := range pairs { - defaultPadding := StdEncoding.EncodeToString([]byte(testcase.decoded)) - customPadding := StdEncoding.WithPadding('@').EncodeToString([]byte(testcase.decoded)) - expected := strings.Replace(defaultPadding, "=", "@", -1) - - if expected != customPadding { - t.Errorf("Expected custom %s, got %s", expected, customPadding) - } - if testcase.encoded != defaultPadding { - t.Errorf("Expected %s, got %s", testcase.encoded, defaultPadding) - } - } -} - -func TestWithoutPadding(t *testing.T) { - for _, testcase := range pairs { - defaultPadding := StdEncoding.EncodeToString([]byte(testcase.decoded)) - customPadding := StdEncoding.WithPadding(NoPadding).EncodeToString([]byte(testcase.decoded)) - expected := strings.TrimRight(defaultPadding, "=") - - if expected != customPadding { - t.Errorf("Expected custom %s, got %s", expected, customPadding) - } - if testcase.encoded != defaultPadding { - t.Errorf("Expected %s, got %s", testcase.encoded, defaultPadding) - } - } -} - -func TestDecodeWithPadding(t *testing.T) { - encodings := []*Encoding{ - StdEncoding, - StdEncoding.WithPadding('-'), - StdEncoding.WithPadding(NoPadding), - } - - for i, enc := range encodings { - for _, pair := range pairs { - - input := pair.decoded - encoded := enc.EncodeToString([]byte(input)) - - decoded, err := enc.DecodeString(encoded) - if err != nil { - t.Errorf("DecodeString Error for encoding %d (%q): %v", i, input, err) - } - - if input != string(decoded) { - t.Errorf("Unexpected result for encoding %d: got %q; want %q", i, decoded, input) - } - } - } -} - -func TestDecodeWithWrongPadding(t *testing.T) { - encoded := StdEncoding.EncodeToString([]byte("foobar")) - - _, err := StdEncoding.WithPadding('-').DecodeString(encoded) - if err == nil { - t.Error("expected error") - } - - _, err = StdEncoding.WithPadding(NoPadding).DecodeString(encoded) - if err == nil { - t.Error("expected error") - } -} - -func TestEncodedDecodedLen(t *testing.T) { - type test struct { - in int - wantEnc int - wantDec int - } - data := bytes.Repeat([]byte("x"), 100) - for _, test := range []struct { - name string - enc *Encoding - cases []test - }{ - {"StdEncoding", StdEncoding, []test{ - {0, 0, 0}, - {1, 8, 5}, - {5, 8, 5}, - {6, 16, 10}, - {10, 16, 10}, - }}, - {"NoPadding", StdEncoding.WithPadding(NoPadding), []test{ - {0, 0, 0}, - {1, 2, 1}, - {2, 4, 2}, - {5, 8, 5}, - {6, 10, 6}, - {7, 12, 7}, - {10, 16, 10}, - {11, 18, 11}, - }}, - } { - t.Run(test.name, func(t *testing.T) { - for _, tc := range test.cases { - encLen := test.enc.EncodedLen(tc.in) - decLen := test.enc.DecodedLen(encLen) - enc := test.enc.EncodeToString(data[:tc.in]) - if len(enc) != encLen { - t.Fatalf("EncodedLen(%d) = %d but encoded to %q (%d)", tc.in, encLen, enc, len(enc)) - } - if encLen != tc.wantEnc { - t.Fatalf("EncodedLen(%d) = %d; want %d", tc.in, encLen, tc.wantEnc) - } - if decLen != tc.wantDec { - t.Fatalf("DecodedLen(%d) = %d; want %d", encLen, decLen, tc.wantDec) - } - } - }) - } -} diff --git a/encoding/base32/example_test.go b/encoding/base32/example_test.go deleted file mode 100644 index 2a302d88..00000000 --- a/encoding/base32/example_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Keep in sync with ../base64/example_test.go. - -package base32_test - -import ( - "encoding/base32" - "fmt" - "os" -) - -func ExampleEncoding_EncodeToString() { - data := []byte("any + old & data") - str := base32.StdEncoding.EncodeToString(data) - fmt.Println(str) - // Output: - // MFXHSIBLEBXWYZBAEYQGIYLUME====== -} - -func ExampleEncoding_DecodeString() { - str := "ONXW2ZJAMRQXIYJAO5UXI2BAAAQGC3TEEDX3XPY=" - data, err := base32.StdEncoding.DecodeString(str) - if err != nil { - fmt.Println("error:", err) - return - } - fmt.Printf("%q\n", data) - // Output: - // "some data with \x00 and \ufeff" -} - -func ExampleNewEncoder() { - input := []byte("foo\x00bar") - encoder := base32.NewEncoder(base32.StdEncoding, os.Stdout) - encoder.Write(input) - // Must close the encoder when finished to flush any partial blocks. - // If you comment out the following line, the last partial block "r" - // won't be encoded. - encoder.Close() - // Output: - // MZXW6ADCMFZA==== -} diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 00000000..04fdf09f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b4bb97f6 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 00000000..9d92c11f --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 00000000..fa820b9d --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 00000000..5b8a4b9a --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 00000000..b1746163 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 00000000..7f9e0c6c --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 00000000..3e4e90dc --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 00000000..24b78edc --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 00000000..0cbbcddb --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 00000000..f326b54d --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 00000000..e6ef06cd --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 00000000..5ea6c737 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 00000000..920dadf5 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,208 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 00000000..199a1ac6 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 00000000..84af91c9 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +}